You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by in...@apache.org on 2016/10/03 22:58:14 UTC

[01/57] [abbrv] hadoop git commit: HADOOP-13544. JDiff reports unncessarily show unannotated APIs and cause confusion while our javadocs only show annotated and public APIs. (vinodkv via wangda)

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10467 382dff747 -> 736d33cdd


http://git-wip-us.apache.org/repos/asf/hadoop/blob/875062b5/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.7.2.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.7.2.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.7.2.xml
index 1a1d88b..385a613 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.7.2.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.7.2.xml
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
 <!-- Generated by the JDiff Javadoc doclet -->
 <!-- (http://www.jdiff.org) -->
-<!-- on Thu May 12 17:47:55 PDT 2016 -->
+<!-- on Wed Aug 24 13:55:20 PDT 2016 -->
 
 <api
   xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
@@ -9,291 +9,16 @@
   name="hadoop-yarn-server-common 2.7.2"
   jdversion="1.0.9">
 
-<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/classes:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vi
 nodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.ja
 r:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/vinodkv/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanut
 ils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/re
 pository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/
 repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.7.2.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/vinodkv/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/com/google/inject/exten
 sions/guice-servlet/3.0/guice-servlet-3.0.jar:/Users/vinodkv/.m2/repository/com/google/inject/guice/3.0/guice-3.0.jar:/Users/vinodkv/.m2/repository/javax/inject/javax.inject/1/javax.inject-1.jar:/Users/vinodkv/.m2/repository/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.7.0_45.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3
 .6.2.Final.jar:/Users/vinodkv/.m2/repository/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar -sourcepath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/site/jdiff/xml -apiname hadoop-yarn-server-common 2.7.2 -->
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/classes:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vin
 odkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar
 :/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/vinodkv/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanuti
 ls/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/rep
 ository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/r
 epository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.7.2.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/vinodkv/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/com/google/inject/extens
 ions/guice-servlet/3.0/guice-servlet-3.0.jar:/Users/vinodkv/.m2/repository/com/google/inject/guice/3.0/guice-3.0.jar:/Users/vinodkv/.m2/repository/javax/inject/javax.inject/1/javax.inject-1.jar:/Users/vinodkv/.m2/repository/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_40.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.
 6.2.Final.jar:/Users/vinodkv/.m2/repository/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar -sourcepath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/jdiff.jar -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/site/jdiff/xml -apiname hadoop-yarn-server-common 2.7.2 -->
 <package name="org.apache.hadoop.yarn.server">
-  <!-- start class org.apache.hadoop.yarn.server.RMNMSecurityInfoClass -->
-  <class name="RMNMSecurityInfoClass" extends="org.apache.hadoop.security.SecurityInfo"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="RMNMSecurityInfoClass"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="protocol" type="java.lang.Class"/>
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-    </method>
-    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="protocol" type="java.lang.Class"/>
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.RMNMSecurityInfoClass -->
 </package>
 <package name="org.apache.hadoop.yarn.server.api">
-  <!-- start interface org.apache.hadoop.yarn.server.api.ResourceManagerConstants -->
-  <interface name="ResourceManagerConstants"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <field name="RM_INVALID_IDENTIFIER" type="long"
-      transient="false" volatile="false"
-      static="true" final="true" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[This states the invalid identifier of Resource Manager. This is used as a
- default value for initializing RM identifier. Currently, RM is using time
- stamp as RM identifier.]]>
-      </doc>
-    </field>
-  </interface>
-  <!-- end interface org.apache.hadoop.yarn.server.api.ResourceManagerConstants -->
-  <!-- start interface org.apache.hadoop.yarn.server.api.ResourceTracker -->
-  <interface name="ResourceTracker"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="registerNodeManager" return="org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="nodeHeartbeat" return="org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.yarn.server.api.ResourceTracker -->
-  <!-- start interface org.apache.hadoop.yarn.server.api.ResourceTrackerPB -->
-  <interface name="ResourceTrackerPB"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.yarn.proto.ResourceTracker.ResourceTrackerService.BlockingInterface"/>
-  </interface>
-  <!-- end interface org.apache.hadoop.yarn.server.api.ResourceTrackerPB -->
-  <!-- start interface org.apache.hadoop.yarn.server.api.SCMUploaderProtocolPB -->
-  <interface name="SCMUploaderProtocolPB"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.yarn.proto.SCMUploaderProtocol.SCMUploaderProtocolService.BlockingInterface"/>
-  </interface>
-  <!-- end interface org.apache.hadoop.yarn.server.api.SCMUploaderProtocolPB -->
-  <!-- start class org.apache.hadoop.yarn.server.api.ServerRMProxy -->
-  <class name="ServerRMProxy" extends="org.apache.hadoop.yarn.client.RMProxy"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="createRMProxy" return="T"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="configuration" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="protocol" type="java.lang.Class"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Create a proxy to the ResourceManager for the specified protocol.
- @param configuration Configuration with all the required information.
- @param protocol Server protocol for which proxy is being requested.
- @param <T> Type of proxy.
- @return Proxy to the ResourceManager for the specified server protocol.
- @throws IOException]]>
-      </doc>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.api.ServerRMProxy -->
 </package>
 <package name="org.apache.hadoop.yarn.server.api.impl.pb.client">
-  <!-- start class org.apache.hadoop.yarn.server.api.impl.pb.client.ResourceTrackerPBClientImpl -->
-  <class name="ResourceTrackerPBClientImpl" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.yarn.server.api.ResourceTracker"/>
-    <implements name="java.io.Closeable"/>
-    <constructor name="ResourceTrackerPBClientImpl" type="long, java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </constructor>
-    <method name="close"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="registerNodeManager" return="org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="nodeHeartbeat" return="org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.api.impl.pb.client.ResourceTrackerPBClientImpl -->
-  <!-- start class org.apache.hadoop.yarn.server.api.impl.pb.client.SCMUploaderProtocolPBClientImpl -->
-  <class name="SCMUploaderProtocolPBClientImpl" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.yarn.server.api.SCMUploaderProtocol"/>
-    <implements name="java.io.Closeable"/>
-    <constructor name="SCMUploaderProtocolPBClientImpl" type="long, java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </constructor>
-    <method name="close"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="notify" return="org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderNotifyResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderNotifyRequest"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="canUpload" return="org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderCanUploadResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.yarn.server.api.protocolrecords.SCMUploaderCanUploadRequest"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.api.impl.pb.client.SCMUploaderProtocolPBClientImpl -->
 </package>
 <package name="org.apache.hadoop.yarn.server.api.impl.pb.service">
-  <!-- start class org.apache.hadoop.yarn.server.api.impl.pb.service.ResourceTrackerPBServiceImpl -->
-  <class name="ResourceTrackerPBServiceImpl" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.yarn.server.api.ResourceTrackerPB"/>
-    <constructor name="ResourceTrackerPBServiceImpl" type="org.apache.hadoop.yarn.server.api.ResourceTracker"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="registerNodeManager" return="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerResponseProto"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="controller" type="com.google.protobuf.RpcController"/>
-      <param name="proto" type="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProto"/>
-      <exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
-    </method>
-    <method name="nodeHeartbeat" return="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatResponseProto"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="controller" type="com.google.protobuf.RpcController"/>
-      <param name="proto" type="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProto"/>
-      <exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.api.impl.pb.service.ResourceTrackerPBServiceImpl -->
-  <!-- start class org.apache.hadoop.yarn.server.api.impl.pb.service.SCMUploaderProtocolPBServiceImpl -->
-  <class name="SCMUploaderProtocolPBServiceImpl" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.yarn.server.api.SCMUploaderProtocolPB"/>
-    <constructor name="SCMUploaderProtocolPBServiceImpl" type="org.apache.hadoop.yarn.server.api.SCMUploaderProtocol"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="notify" return="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderNotifyResponseProto"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="controller" type="com.google.protobuf.RpcController"/>
-      <param name="proto" type="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderNotifyRequestProto"/>
-      <exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
-    </method>
-    <method name="canUpload" return="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderCanUploadResponseProto"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="controller" type="com.google.protobuf.RpcController"/>
-      <param name="proto" type="org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SCMUploaderCanUploadRequestProto"/>
-      <exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.api.impl.pb.service.SCMUploaderProtocolPBServiceImpl -->
 </package>
 <package name="org.apache.hadoop.yarn.server.api.records">
-  <!-- start interface org.apache.hadoop.yarn.server.api.records.MasterKey -->
-  <interface name="MasterKey"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getKeyId" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setKeyId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="keyId" type="int"/>
-    </method>
-    <method name="getBytes" return="java.nio.ByteBuffer"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setBytes"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="bytes" type="java.nio.ByteBuffer"/>
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.yarn.server.api.records.MasterKey -->
-  <!-- start class org.apache.hadoop.yarn.server.api.records.NodeAction -->
-  <class name="NodeAction" extends="java.lang.Enum"
-    abstract="false"
-    static="false" final="true" visibility="public"
-    deprecated="not deprecated">
-    <method name="values" return="org.apache.hadoop.yarn.server.api.records.NodeAction[]"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="valueOf" return="org.apache.hadoop.yarn.server.api.records.NodeAction"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-    </method>
-    <doc>
-    <![CDATA[The NodeManager is instructed to perform the given action.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.api.records.NodeAction -->
   <!-- start class org.apache.hadoop.yarn.server.api.records.NodeHealthStatus -->
   <class name="NodeHealthStatus" extends="java.lang.Object"
     abstract="true"
@@ -348,411 +73,20 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.yarn.server.api.records.NodeHealthStatus -->
-  <!-- start class org.apache.hadoop.yarn.server.api.records.NodeStatus -->
-  <class name="NodeStatus" extends="java.lang.Object"
-    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="NodeStatus"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="newInstance" return="org.apache.hadoop.yarn.server.api.records.NodeStatus"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
-      <param name="responseId" type="int"/>
-      <param name="containerStatuses" type="java.util.List"/>
-      <param name="keepAliveApplications" type="java.util.List"/>
-      <param name="nodeHealthStatus" type="org.apache.hadoop.yarn.server.api.records.NodeHealthStatus"/>
-    </method>
-    <method name="getNodeId" return="org.apache.hadoop.yarn.api.records.NodeId"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getResponseId" return="int"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getContainersStatuses" return="java.util.List"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setContainersStatuses"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="containersStatuses" type="java.util.List"/>
-    </method>
-    <method name="getKeepAliveApplications" return="java.util.List"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setKeepAliveApplications"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="appIds" type="java.util.List"/>
-    </method>
-    <method name="getNodeHealthStatus" return="org.apache.hadoop.yarn.server.api.records.NodeHealthStatus"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setNodeHealthStatus"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="healthStatus" type="org.apache.hadoop.yarn.server.api.records.NodeHealthStatus"/>
-    </method>
-    <method name="setNodeId"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
-    </method>
-    <method name="setResponseId"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="responseId" type="int"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.api.records.NodeStatus -->
 </package>
 <package name="org.apache.hadoop.yarn.server.api.records.impl.pb">
-  <!-- start class org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl -->
-  <class name="MasterKeyPBImpl" extends="org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.yarn.server.api.records.MasterKey"/>
-    <constructor name="MasterKeyPBImpl"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="MasterKeyPBImpl" type="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getKeyId" return="int"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setKeyId"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="id" type="int"/>
-    </method>
-    <method name="getBytes" return="java.nio.ByteBuffer"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setBytes"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="bytes" type="java.nio.ByteBuffer"/>
-    </method>
-    <method name="hashCode" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="equals" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="obj" type="java.lang.Object"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl -->
-  <!-- start class org.apache.hadoop.yarn.server.api.records.impl.pb.NodeHealthStatusPBImpl -->
-  <class name="NodeHealthStatusPBImpl" extends="org.apache.hadoop.yarn.server.api.records.NodeHealthStatus"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="NodeHealthStatusPBImpl"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="NodeHealthStatusPBImpl" type="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeHealthStatusProto"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeHealthStatusProto"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="hashCode" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="equals" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="other" type="java.lang.Object"/>
-    </method>
-    <method name="toString" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getIsNodeHealthy" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setIsNodeHealthy"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="isNodeHealthy" type="boolean"/>
-    </method>
-    <method name="getHealthReport" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setHealthReport"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="healthReport" type="java.lang.String"/>
-    </method>
-    <method name="getLastHealthReportTime" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setLastHealthReportTime"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="lastHealthReport" type="long"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.api.records.impl.pb.NodeHealthStatusPBImpl -->
-  <!-- start class org.apache.hadoop.yarn.server.api.records.impl.pb.NodeStatusPBImpl -->
-  <class name="NodeStatusPBImpl" extends="org.apache.hadoop.yarn.server.api.records.NodeStatus"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="NodeStatusPBImpl"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="NodeStatusPBImpl" type="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="hashCode" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="equals" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="other" type="java.lang.Object"/>
-    </method>
-    <method name="getResponseId" return="int"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setResponseId"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="responseId" type="int"/>
-    </method>
-    <method name="getNodeId" return="org.apache.hadoop.yarn.api.records.NodeId"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setNodeId"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
-    </method>
-    <method name="getContainersStatuses" return="java.util.List"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setContainersStatuses"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="containers" type="java.util.List"/>
-    </method>
-    <method name="getKeepAliveApplications" return="java.util.List"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setKeepAliveApplications"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="appIds" type="java.util.List"/>
-    </method>
-    <method name="getNodeHealthStatus" return="org.apache.hadoop.yarn.server.api.records.NodeHealthStatus"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setNodeHealthStatus"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="healthStatus" type="org.apache.hadoop.yarn.server.api.records.NodeHealthStatus"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.api.records.impl.pb.NodeStatusPBImpl -->
 </package>
-<package name="org.apache.hadoop.yarn.server.metrics">
-</package>
-<package name="org.apache.hadoop.yarn.server.records">
-</package>
-<package name="org.apache.hadoop.yarn.server.records.impl.pb">
-  <!-- start class org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl -->
-  <class name="VersionPBImpl" extends="org.apache.hadoop.yarn.server.records.Version"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="VersionPBImpl"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="VersionPBImpl" type="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getMajorVersion" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setMajorVersion"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="major" type="int"/>
-    </method>
-    <method name="getMinorVersion" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setMinorVersion"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="minor" type="int"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl -->
+<package name="org.apache.hadoop.yarn.server.metrics">
+</package>
+<package name="org.apache.hadoop.yarn.server.records">
+</package>
+<package name="org.apache.hadoop.yarn.server.records.impl.pb">
 </package>
 <package name="org.apache.hadoop.yarn.server.security.http">
-  <!-- start class org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilterInitializer -->
-  <class name="RMAuthenticationFilterInitializer" extends="org.apache.hadoop.http.FilterInitializer"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="RMAuthenticationFilterInitializer"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="createFilterConfig" return="java.util.Map"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-    </method>
-    <method name="initFilter"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="container" type="org.apache.hadoop.http.FilterContainer"/>
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilterInitializer -->
 </package>
 <package name="org.apache.hadoop.yarn.server.sharedcache">
 </package>
 <package name="org.apache.hadoop.yarn.server.utils">
-  <!-- start class org.apache.hadoop.yarn.server.utils.BuilderUtils.ApplicationIdComparator -->
-  <class name="BuilderUtils.ApplicationIdComparator" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="java.util.Comparator"/>
-    <implements name="java.io.Serializable"/>
-    <constructor name="BuilderUtils.ApplicationIdComparator"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="compare" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="a1" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
-      <param name="a2" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.utils.BuilderUtils.ApplicationIdComparator -->
-  <!-- start class org.apache.hadoop.yarn.server.utils.BuilderUtils.ContainerIdComparator -->
-  <class name="BuilderUtils.ContainerIdComparator" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="java.util.Comparator"/>
-    <implements name="java.io.Serializable"/>
-    <constructor name="BuilderUtils.ContainerIdComparator"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="compare" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="c1" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
-      <param name="c2" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.utils.BuilderUtils.ContainerIdComparator -->
   <!-- start class org.apache.hadoop.yarn.server.utils.LeveldbIterator -->
   <class name="LeveldbIterator" extends="java.lang.Object"
     abstract="false"
@@ -890,384 +224,8 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.yarn.server.utils.LeveldbIterator -->
-  <!-- start class org.apache.hadoop.yarn.server.utils.Lock -->
-  <class name="Lock"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="java.lang.annotation.Annotation"/>
-    <doc>
-    <![CDATA[Annotation to document locking order.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.utils.Lock -->
-  <!-- start class org.apache.hadoop.yarn.server.utils.Lock.NoLock -->
-  <class name="Lock.NoLock" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="Lock.NoLock"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.utils.Lock.NoLock -->
-  <!-- start class org.apache.hadoop.yarn.server.utils.YarnServerBuilderUtils -->
-  <class name="YarnServerBuilderUtils" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="YarnServerBuilderUtils"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="newNodeHeartbeatResponse" return="org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="responseId" type="int"/>
-      <param name="action" type="org.apache.hadoop.yarn.server.api.records.NodeAction"/>
-      <param name="containersToCleanUp" type="java.util.List"/>
-      <param name="applicationsToCleanUp" type="java.util.List"/>
-      <param name="containerTokenMasterKey" type="org.apache.hadoop.yarn.server.api.records.MasterKey"/>
-      <param name="nmTokenMasterKey" type="org.apache.hadoop.yarn.server.api.records.MasterKey"/>
-      <param name="nextHeartbeatInterval" type="long"/>
-    </method>
-    <doc>
-    <![CDATA[Server Builder utilities to construct various objects.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.utils.YarnServerBuilderUtils -->
 </package>
 <package name="org.apache.hadoop.yarn.server.webapp">
-  <!-- start class org.apache.hadoop.yarn.server.webapp.AppAttemptBlock -->
-  <class name="AppAttemptBlock" extends="org.apache.hadoop.yarn.webapp.view.HtmlBlock"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="AppAttemptBlock" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol, org.apache.hadoop.yarn.webapp.View.ViewContext"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="render"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
-    </method>
-    <method name="generateOverview"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="appAttemptReport" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"/>
-      <param name="containers" type="java.util.Collection"/>
-      <param name="appAttempt" type="org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo"/>
-      <param name="node" type="java.lang.String"/>
-    </method>
-    <method name="hasAMContainer" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
-      <param name="containers" type="java.util.Collection"/>
-    </method>
-    <method name="createAttemptHeadRoomTable"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
-    </method>
-    <method name="createTablesForAttemptMetrics"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
-    </method>
-    <field name="appBaseProt" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-    <field name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.webapp.AppAttemptBlock -->
-  <!-- start class org.apache.hadoop.yarn.server.webapp.AppBlock -->
-  <class name="AppBlock" extends="org.apache.hadoop.yarn.webapp.view.HtmlBlock"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="AppBlock" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol, org.apache.hadoop.yarn.webapp.View.ViewContext, org.apache.hadoop.conf.Configuration"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="render"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
-    </method>
-    <method name="generateApplicationTable"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
-      <param name="callerUGI" type="org.apache.hadoop.security.UserGroupInformation"/>
-      <param name="attempts" type="java.util.Collection"/>
-    </method>
-    <method name="createApplicationMetricsTable"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
-    </method>
-    <field name="appBaseProt" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-    <field name="conf" type="org.apache.hadoop.conf.Configuration"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-    <field name="appID" type="org.apache.hadoop.yarn.api.records.ApplicationId"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.webapp.AppBlock -->
-  <!-- start class org.apache.hadoop.yarn.server.webapp.AppsBlock -->
-  <class name="AppsBlock" extends="org.apache.hadoop.yarn.webapp.view.HtmlBlock"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="AppsBlock" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol, org.apache.hadoop.yarn.webapp.View.ViewContext"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="fetchData"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="render"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
-    </method>
-    <method name="renderData"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
-    </method>
-    <field name="appBaseProt" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-    <field name="reqAppStates" type="java.util.EnumSet"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-    <field name="callerUGI" type="org.apache.hadoop.security.UserGroupInformation"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-    <field name="appReports" type="java.util.Collection"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.webapp.AppsBlock -->
-  <!-- start class org.apache.hadoop.yarn.server.webapp.ContainerBlock -->
-  <class name="ContainerBlock" extends="org.apache.hadoop.yarn.webapp.view.HtmlBlock"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="ContainerBlock" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol, org.apache.hadoop.yarn.webapp.View.ViewContext"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="render"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
-    </method>
-    <field name="appBaseProt" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.webapp.ContainerBlock -->
-  <!-- start class org.apache.hadoop.yarn.server.webapp.WebPageUtils -->
-  <class name="WebPageUtils" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="WebPageUtils"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="appsTableInit" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="appsTableInit" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="isFairSchedulerPage" type="boolean"/>
-    </method>
-    <method name="attemptsTableInit" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="containersTableInit" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.webapp.WebPageUtils -->
-  <!-- start class org.apache.hadoop.yarn.server.webapp.WebServices -->
-  <class name="WebServices" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="WebServices" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getApps" return="org.apache.hadoop.yarn.server.webapp.dao.AppsInfo"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
-      <param name="res" type="javax.servlet.http.HttpServletResponse"/>
-      <param name="stateQuery" type="java.lang.String"/>
-      <param name="statesQuery" type="java.util.Set"/>
-      <param name="finalStatusQuery" type="java.lang.String"/>
-      <param name="userQuery" type="java.lang.String"/>
-      <param name="queueQuery" type="java.lang.String"/>
-      <param name="count" type="java.lang.String"/>
-      <param name="startedBegin" type="java.lang.String"/>
-      <param name="startedEnd" type="java.lang.String"/>
-      <param name="finishBegin" type="java.lang.String"/>
-      <param name="finishEnd" type="java.lang.String"/>
-      <param name="applicationTypes" type="java.util.Set"/>
-    </method>
-    <method name="getApp" return="org.apache.hadoop.yarn.server.webapp.dao.AppInfo"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
-      <param name="res" type="javax.servlet.http.HttpServletResponse"/>
-      <param name="appId" type="java.lang.String"/>
-    </method>
-    <method name="getAppAttempts" return="org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
-      <param name="res" type="javax.servlet.http.HttpServletResponse"/>
-      <param name="appId" type="java.lang.String"/>
-    </method>
-    <method name="getAppAttempt" return="org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
-      <param name="res" type="javax.servlet.http.HttpServletResponse"/>
-      <param name="appId" type="java.lang.String"/>
-      <param name="appAttemptId" type="java.lang.String"/>
-    </method>
-    <method name="getContainers" return="org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
-      <param name="res" type="javax.servlet.http.HttpServletResponse"/>
-      <param name="appId" type="java.lang.String"/>
-      <param name="appAttemptId" type="java.lang.String"/>
-    </method>
-    <method name="getContainer" return="org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
-      <param name="res" type="javax.servlet.http.HttpServletResponse"/>
-      <param name="appId" type="java.lang.String"/>
-      <param name="appAttemptId" type="java.lang.String"/>
-      <param name="containerId" type="java.lang.String"/>
-    </method>
-    <method name="init"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="response" type="javax.servlet.http.HttpServletResponse"/>
-    </method>
-    <method name="parseQueries" return="java.util.Set"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="queries" type="java.util.Set"/>
-      <param name="isState" type="boolean"/>
-    </method>
-    <method name="parseApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="appId" type="java.lang.String"/>
-    </method>
-    <method name="parseApplicationAttemptId" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="appAttemptId" type="java.lang.String"/>
-    </method>
-    <method name="parseContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="containerId" type="java.lang.String"/>
-    </method>
-    <method name="validateIds"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
-      <param name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
-      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
-    </method>
-    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
-    </method>
-    <field name="appBaseProt" type="org.apache.hadoop.yarn.api.ApplicationBaseProtocol"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.webapp.WebServices -->
 </package>
 <package name="org.apache.hadoop.yarn.server.webapp.dao">
   <!-- start class org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo -->

http://git-wip-us.apache.org/repos/asf/hadoop/blob/875062b5/hadoop-yarn-project/hadoop-yarn/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml
index 3353e33..a41b928 100644
--- a/hadoop-yarn-project/hadoop-yarn/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/pom.xml
@@ -186,7 +186,7 @@
                     <mkdir dir="${project.build.directory}/site/jdiff/xml"/>
 
                     <javadoc maxmemory="${jdiff.javadoc.maxmemory}" verbose="yes">
-                      <doclet name="org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet"
+                      <doclet name="org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet"
                               path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar">
                         <param name="-apidir" value="${project.build.directory}/site/jdiff/xml"/>
                         <param name="-apiname" value="${project.name} ${project.version}"/>
@@ -201,7 +201,7 @@
                       destdir="${project.build.directory}/site/jdiff/xml"
                       sourceFiles="${dev-support.relative.dir}/jdiff/Null.java"
                              maxmemory="${jdiff.javadoc.maxmemory}">
-                      <doclet name="org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet"
+                      <doclet name="org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet"
                               path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar:${project.build.directory}/xerces.jar">
                         <param name="-oldapi" value="${project.name} ${jdiff.stable.api}"/>
                         <param name="-newapi" value="${project.name} ${project.version}"/>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/57] [abbrv] hadoop git commit: HADOOP-11780. Prevent IPC reader thread death. Contributed by Daryn Sharp.

Posted by in...@apache.org.
HADOOP-11780. Prevent IPC reader thread death. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e19b37ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e19b37ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e19b37ea

Branch: refs/heads/HDFS-10467
Commit: e19b37ead23805c7ed45bdcbfa7fdc8898cde7b2
Parents: 9b0fd01
Author: Kihwal Lee <ki...@apache.org>
Authored: Wed Sep 28 08:24:21 2016 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Wed Sep 28 08:24:21 2016 -0500

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/ipc/Server.java | 25 +++++++++++++++++---
 1 file changed, 22 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e19b37ea/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 531d574..f509d71 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -110,6 +110,7 @@ import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
@@ -956,10 +957,16 @@ public abstract class Server {
             while (iter.hasNext()) {
               key = iter.next();
               iter.remove();
-              if (key.isValid()) {
+              try {
                 if (key.isReadable()) {
                   doRead(key);
                 }
+              } catch (CancelledKeyException cke) {
+                // something else closed the connection, ex. responder or
+                // the listener doing an idle scan.  ignore it and let them
+                // clean up.
+                LOG.info(Thread.currentThread().getName() +
+                    ": connection aborted from " + key.attachment());
               }
               key = null;
             }
@@ -969,6 +976,9 @@ public abstract class Server {
             }
           } catch (IOException ex) {
             LOG.error("Error in Reader", ex);
+          } catch (Throwable re) {
+            LOG.fatal("Bug in read selector!", re);
+            ExitUtil.terminate(1, "Bug in read selector!");
           }
         }
       }
@@ -1187,8 +1197,17 @@ public abstract class Server {
             SelectionKey key = iter.next();
             iter.remove();
             try {
-              if (key.isValid() && key.isWritable()) {
-                  doAsyncWrite(key);
+              if (key.isWritable()) {
+                doAsyncWrite(key);
+              }
+            } catch (CancelledKeyException cke) {
+              // something else closed the connection, ex. reader or the
+              // listener doing an idle scan.  ignore it and let them clean
+              // up
+              RpcCall call = (RpcCall)key.attachment();
+              if (call != null) {
+                LOG.info(Thread.currentThread().getName() +
+                    ": connection aborted from " + call.connection);
               }
             } catch (IOException e) {
               LOG.info(Thread.currentThread().getName() + ": doAsyncWrite threw exception " + e);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/57] [abbrv] hadoop git commit: HADOOP-12974. Create a CachingGetSpaceUsed implementation that uses df. Contributed by Elliott Clark.

Posted by in...@apache.org.
HADOOP-12974. Create a CachingGetSpaceUsed implementation that uses df. Contributed by Elliott Clark.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57aec2b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57aec2b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57aec2b4

Branch: refs/heads/HDFS-10467
Commit: 57aec2b46b0e46b73a1f49927e30e2c41138d535
Parents: 7fad122
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Fri Sep 30 12:58:37 2016 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Fri Sep 30 12:58:37 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/fs/DFCachingGetSpaceUsed.java | 48 +++++++++++++
 .../src/main/java/org/apache/hadoop/fs/DU.java  |  8 +--
 .../hadoop/fs/TestDFCachingGetSpaceUsed.java    | 75 ++++++++++++++++++++
 3 files changed, 126 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57aec2b4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DFCachingGetSpaceUsed.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DFCachingGetSpaceUsed.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DFCachingGetSpaceUsed.java
new file mode 100644
index 0000000..6e8cd46
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DFCachingGetSpaceUsed.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.IOException;
+
+/**
+ * Fast but inaccurate class to tell how much space HDFS is using.
+ * This class makes the assumption that the entire mount is used for
+ * HDFS and that no two hdfs data dirs are on the same disk.
+ *
+ * To use set fs.getspaceused.classname
+ * to org.apache.hadoop.fs.DFCachingGetSpaceUsed in your core-site.xml
+ *
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Evolving
+public class DFCachingGetSpaceUsed extends CachingGetSpaceUsed {
+  private final DF df;
+
+  public DFCachingGetSpaceUsed(Builder builder) throws IOException {
+    super(builder);
+    this.df = new DF(builder.getPath(), builder.getInterval());
+  }
+
+  @Override
+  protected void refresh() {
+    this.used.set(df.getUsed());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/57aec2b4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
index 20e8202..b64a19d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
@@ -31,12 +31,13 @@ import java.io.IOException;
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 public class DU extends CachingGetSpaceUsed {
-  private DUShell duShell;
+  private final DUShell duShell;
 
   @VisibleForTesting
-  public DU(File path, long interval, long jitter, long initialUsed)
+   public DU(File path, long interval, long jitter, long initialUsed)
       throws IOException {
     super(path, interval, jitter, initialUsed);
+    this.duShell = new DUShell();
   }
 
   public DU(CachingGetSpaceUsed.Builder builder) throws IOException {
@@ -48,9 +49,6 @@ public class DU extends CachingGetSpaceUsed {
 
   @Override
   protected synchronized void refresh() {
-    if (duShell == null) {
-      duShell = new DUShell();
-    }
     try {
       duShell.startRefresh();
     } catch (IOException ioe) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/57aec2b4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java
new file mode 100644
index 0000000..3def5d5
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.apache.commons.lang.RandomStringUtils;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test to make sure df can run and work.
+ */
+public class TestDFCachingGetSpaceUsed {
+  final static private File DF_DIR = GenericTestUtils.getTestDir("testdfspace");
+  public static final int FILE_SIZE = 1024;
+
+  @Before
+  public void setUp() {
+    FileUtil.fullyDelete(DF_DIR);
+    assertTrue(DF_DIR.mkdirs());
+  }
+
+  @After
+  public void tearDown() throws IOException {
+    FileUtil.fullyDelete(DF_DIR);
+  }
+
+  @Test
+  public void testCanBuildRun() throws Exception {
+    File file = writeFile("testCanBuild");
+
+    GetSpaceUsed instance = new CachingGetSpaceUsed.Builder()
+        .setPath(file)
+        .setInterval(50060)
+        .setKlass(DFCachingGetSpaceUsed.class)
+        .build();
+    assertTrue(instance instanceof DFCachingGetSpaceUsed);
+    assertTrue(instance.getUsed() >= FILE_SIZE - 20);
+    ((DFCachingGetSpaceUsed) instance).close();
+  }
+
+  private File writeFile(String fileName) throws IOException {
+    File f = new File(DF_DIR, fileName);
+    assertTrue(f.createNewFile());
+    RandomAccessFile randomAccessFile = new RandomAccessFile(f, "rws");
+    randomAccessFile.writeUTF(RandomStringUtils.randomAlphabetic(FILE_SIZE));
+    randomAccessFile.getFD().sync();
+    randomAccessFile.close();
+    return f;
+  }
+
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/57] [abbrv] hadoop git commit: HADOOP-13544. JDiff reports unncessarily show unannotated APIs and cause confusion while our javadocs only show annotated and public APIs. (vinodkv via wangda)

Posted by in...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/875062b5/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.7.2.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.7.2.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.7.2.xml
index f877336..eec6dfa 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.7.2.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.7.2.xml
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
 <!-- Generated by the JDiff Javadoc doclet -->
 <!-- (http://www.jdiff.org) -->
-<!-- on Thu May 12 17:47:43 PDT 2016 -->
+<!-- on Wed Aug 24 13:55:05 PDT 2016 -->
 
 <api
   xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
@@ -9,7 +9,7 @@
   name="hadoop-yarn-common 2.7.2"
   jdversion="1.0.9">
 
-<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/classes:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vinodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-
 net-3.1.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/vinodkv/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils
 -core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0
 .0-M20/api-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.7.2.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/vinodkv/.m2/repo
 sitory/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1
 .9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.7.0_45.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/.m2/repository/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons
 -io-2.4.jar:/Users/vinodkv/.m2/repository/com/google/inject/guice/3.0/guice-3.0.jar:/Users/vinodkv/.m2/repository/javax/inject/javax.inject/1/javax.inject-1.jar:/Users/vinodkv/.m2/repository/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar -sourcepath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hado
 op-yarn-project/hadoop-yarn/hadoop-yarn-common/target/site/jdiff/xml -apiname hadoop-yarn-common 2.7.2 -->
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/classes:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vinodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-n
 et-3.1.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/vinodkv/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-
 core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0.
 0-M20/api-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.7.2.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/vinodkv/.m2/repos
 itory/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.
 9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_40.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/.m2/repository/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons-
 io-2.4.jar:/Users/vinodkv/.m2/repository/com/google/inject/guice/3.0/guice-3.0.jar:/Users/vinodkv/.m2/repository/javax/inject/javax.inject/1/javax.inject-1.jar:/Users/vinodkv/.m2/repository/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar -sourcepath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDi
 ffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/jdiff.jar -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/site/jdiff/xml -apiname hadoop-yarn-common 2.7.2 -->
 <package name="org.apache.hadoop.yarn">
   <!-- start class org.apache.hadoop.yarn.ContainerLogAppender -->
   <class name="ContainerLogAppender" extends="org.apache.log4j.FileAppender"
@@ -268,44 +268,6 @@
     </method>
   </class>
   <!-- end class org.apache.hadoop.yarn.client.NMProxy -->
-  <!-- start class org.apache.hadoop.yarn.client.RMHAServiceTarget -->
-  <class name="RMHAServiceTarget" extends="org.apache.hadoop.ha.HAServiceTarget"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="RMHAServiceTarget" type="org.apache.hadoop.yarn.conf.YarnConfiguration"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </constructor>
-    <method name="getAddress" return="java.net.InetSocketAddress"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getZKFCAddress" return="java.net.InetSocketAddress"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getFencer" return="org.apache.hadoop.ha.NodeFencer"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="checkFencingConfigured"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="BadFencingConfigurationException" type="org.apache.hadoop.ha.BadFencingConfigurationException"/>
-    </method>
-    <method name="isAutoFailoverEnabled" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.client.RMHAServiceTarget -->
   <!-- start class org.apache.hadoop.yarn.client.RMProxy -->
   <class name="RMProxy" extends="java.lang.Object"
     abstract="false"
@@ -629,12 +591,12 @@
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="getEventHandler" return="org.apache.hadoop.yarn.event.EventHandler"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
     <method name="register"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="eventType" type="java.lang.Class"/>
@@ -661,17 +623,17 @@
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="getType" return="TYPE"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
     <method name="getTimestamp" return="long"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
     <method name="toString" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
@@ -685,7 +647,7 @@
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="handle"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="event" type="T"/>
@@ -720,15 +682,15 @@
     static="true" final="false" visibility="public"
     deprecated="not deprecated">
     <implements name="org.apache.hadoop.io.Writable"/>
-    <constructor name="AggregatedLogFormat.LogKey"
+    <constructor name="LogKey"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
-    <constructor name="AggregatedLogFormat.LogKey" type="org.apache.hadoop.yarn.api.records.ContainerId"
+    <constructor name="LogKey" type="org.apache.hadoop.yarn.api.records.ContainerId"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
-    <constructor name="AggregatedLogFormat.LogKey" type="java.lang.String"
+    <constructor name="LogKey" type="java.lang.String"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
@@ -755,7 +717,7 @@
     abstract="false"
     static="true" final="false" visibility="public"
     deprecated="not deprecated">
-    <constructor name="AggregatedLogFormat.LogReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path"
+    <constructor name="LogReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -788,842 +750,92 @@
     <method name="next" return="java.io.DataInputStream"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="key" type="org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Read the next key and return the value-stream.
-
- @param key
- @return the valueStream if there are more keys or null otherwise.
- @throws IOException]]>
-      </doc>
-    </method>
-    <method name="readAcontainerLogs"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="valueStream" type="java.io.DataInputStream"/>
-      <param name="writer" type="java.io.Writer"/>
-      <param name="logUploadedTime" type="long"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Writes all logs for a single container to the provided writer.
- @param valueStream
- @param writer
- @param logUploadedTime
- @throws IOException]]>
-      </doc>
-    </method>
-    <method name="readAcontainerLogs"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="valueStream" type="java.io.DataInputStream"/>
-      <param name="writer" type="java.io.Writer"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Writes all logs for a single container to the provided writer.
- @param valueStream
- @param writer
- @throws IOException]]>
-      </doc>
-    </method>
-    <method name="readAContainerLogsForALogType"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="valueStream" type="java.io.DataInputStream"/>
-      <param name="out" type="java.io.PrintStream"/>
-      <param name="logUploadedTime" type="long"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Keep calling this till you get a {@link EOFException} for getting logs of
- all types for a single container.
-
- @param valueStream
- @param out
- @param logUploadedTime
- @throws IOException]]>
-      </doc>
-    </method>
-    <method name="readAContainerLogsForALogType"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="valueStream" type="java.io.DataInputStream"/>
-      <param name="out" type="java.io.PrintStream"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Keep calling this till you get a {@link EOFException} for getting logs of
- all types for a single container.
-
- @param valueStream
- @param out
- @throws IOException]]>
-      </doc>
-    </method>
-    <method name="close"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader -->
-  <!-- start class org.apache.hadoop.yarn.logaggregation.LogCLIHelpers -->
-  <class name="LogCLIHelpers" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.conf.Configurable"/>
-    <constructor name="LogCLIHelpers"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="setConf"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-    </method>
-    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.logaggregation.LogCLIHelpers -->
-</package>
-<package name="org.apache.hadoop.yarn.nodelabels">
-  <!-- start class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager -->
-  <class name="CommonNodeLabelsManager" extends="org.apache.hadoop.service.AbstractService"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="CommonNodeLabelsManager"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="handleStoreEvent"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="event" type="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent"/>
-    </method>
-    <method name="initDispatcher"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-    </method>
-    <method name="serviceInit"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <exception name="Exception" type="java.lang.Exception"/>
-    </method>
-    <method name="initNodeLabelStore"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <exception name="Exception" type="java.lang.Exception"/>
-    </method>
-    <method name="startDispatcher"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </method>
-    <method name="serviceStart"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <exception name="Exception" type="java.lang.Exception"/>
-    </method>
-    <method name="stopDispatcher"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </method>
-    <method name="serviceStop"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <exception name="Exception" type="java.lang.Exception"/>
-    </method>
-    <method name="addToCluserNodeLabels"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="labels" type="java.util.Set"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Add multiple node labels to repository
-
- @param labels
-          new node labels added]]>
-      </doc>
-    </method>
-    <method name="checkAddLabelsToNode"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="addedLabelsToNode" type="java.util.Map"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="addLabelsToNode"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="addedLabelsToNode" type="java.util.Map"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[add more labels to nodes
-
- @param addedLabelsToNode node {@literal ->} labels map]]>
-      </doc>
-    </method>
-    <method name="checkRemoveFromClusterNodeLabels"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="labelsToRemove" type="java.util.Collection"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="internalRemoveFromClusterNodeLabels"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="labelsToRemove" type="java.util.Collection"/>
-    </method>
-    <method name="removeFromClusterNodeLabels"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="labelsToRemove" type="java.util.Collection"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Remove multiple node labels from repository
-
- @param labelsToRemove
-          node labels to remove
- @throws IOException]]>
-      </doc>
-    </method>
-    <method name="checkRemoveLabelsFromNode"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="removeLabelsFromNode" type="java.util.Map"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="removeNodeFromLabels"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="node" type="org.apache.hadoop.yarn.api.records.NodeId"/>
-      <param name="labels" type="java.util.Set"/>
-    </method>
-    <method name="internalUpdateLabelsOnNodes"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="nodeToLabels" type="java.util.Map"/>
-      <param name="op" type="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.NodeLabelUpdateOperation"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="removeLabelsFromNode"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="removeLabelsFromNode" type="java.util.Map"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[remove labels from nodes, labels being removed most be contained by these
- nodes
-
- @param removeLabelsFromNode node {@literal ->} labels map]]>
-      </doc>
-    </method>
-    <method name="checkReplaceLabelsOnNode"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="replaceLabelsToNode" type="java.util.Map"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="replaceLabelsOnNode"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="replaceLabelsToNode" type="java.util.Map"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[replace labels to nodes
-
- @param replaceLabelsToNode node {@literal ->} labels map]]>
-      </doc>
-    </method>
-    <method name="getNodeLabels" return="java.util.Map"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Get mapping of nodes to labels
-
- @return nodes to labels map]]>
-      </doc>
-    </method>
-    <method name="getLabelsToNodes" return="java.util.Map"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Get mapping of labels to nodes for all the labels.
-
- @return labels to nodes map]]>
-      </doc>
-    </method>
-    <method name="getLabelsToNodes" return="java.util.Map"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="labels" type="java.util.Set"/>
-      <doc>
-      <![CDATA[Get mapping of labels to nodes for specified set of labels.
-
- @param labels set of labels for which labels to nodes mapping will be
-        returned.
- @return labels to nodes map]]>
-      </doc>
-    </method>
-    <method name="getClusterNodeLabels" return="java.util.Set"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Get existing valid labels in repository
-
- @return existing valid labels in repository]]>
-      </doc>
-    </method>
-    <method name="normalizeLabel" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="label" type="java.lang.String"/>
-    </method>
-    <method name="getNMInNodeSet" return="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Node"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
-    </method>
-    <method name="getNMInNodeSet" return="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Node"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
-      <param name="map" type="java.util.Map"/>
-    </method>
-    <method name="getNMInNodeSet" return="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Node"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
-      <param name="map" type="java.util.Map"/>
-      <param name="checkRunning" type="boolean"/>
-    </method>
-    <method name="getLabelsByNode" return="java.util.Set"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
-    </method>
-    <method name="getLabelsByNode" return="java.util.Set"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
-      <param name="map" type="java.util.Map"/>
-    </method>
-    <method name="createNodeIfNonExisted"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="createHostIfNonExisted"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="hostName" type="java.lang.String"/>
-    </method>
-    <method name="normalizeNodeIdToLabels" return="java.util.Map"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="nodeIdToLabels" type="java.util.Map"/>
-    </method>
-    <field name="LOG" type="org.apache.commons.logging.Log"
-      transient="false" volatile="false"
-      static="true" final="true" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-    <field name="EMPTY_STRING_SET" type="java.util.Set"
-      transient="false" volatile="false"
-      static="true" final="true" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <field name="ANY" type="java.lang.String"
-      transient="false" volatile="false"
-      static="true" final="true" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <field name="ACCESS_ANY_LABEL_SET" type="java.util.Set"
-      transient="false" volatile="false"
-      static="true" final="true" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <field name="WILDCARD_PORT" type="int"
-      transient="false" volatile="false"
-      static="true" final="true" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <field name="NODE_LABELS_NOT_ENABLED_ERR" type="java.lang.String"
-      transient="false" volatile="false"
-      static="true" final="true" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Error messages]]>
-      </doc>
-    </field>
-    <field name="NO_LABEL" type="java.lang.String"
-      transient="false" volatile="false"
-      static="true" final="true" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[If a user doesn't specify label of a queue or node, it belongs
- DEFAULT_LABEL]]>
-      </doc>
-    </field>
-    <field name="dispatcher" type="org.apache.hadoop.yarn.event.Dispatcher"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-    <field name="labelCollections" type="java.util.concurrent.ConcurrentMap"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-    <field name="nodeCollections" type="java.util.concurrent.ConcurrentMap"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-    <field name="readLock" type="java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock"
-      transient="false" volatile="false"
-      static="false" final="true" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-    <field name="writeLock" type="java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock"
-      transient="false" volatile="false"
-      static="false" final="true" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-    <field name="store" type="org.apache.hadoop.yarn.nodelabels.NodeLabelsStore"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager -->
-  <!-- start class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Host -->
-  <class name="CommonNodeLabelsManager.Host" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="protected"
-    deprecated="not deprecated">
-    <constructor name="CommonNodeLabelsManager.Host"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="copy" return="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Host"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <field name="labels" type="java.util.Set"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <field name="nms" type="java.util.Map"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <doc>
-    <![CDATA[A <code>Host</code> can have multiple <code>Node</code>s]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Host -->
-  <!-- start class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Node -->
-  <class name="CommonNodeLabelsManager.Node" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="protected"
-    deprecated="not deprecated">
-    <constructor name="CommonNodeLabelsManager.Node" type="org.apache.hadoop.yarn.api.records.NodeId"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="copy" return="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Node"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <field name="labels" type="java.util.Set"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <field name="resource" type="org.apache.hadoop.yarn.api.records.Resource"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <field name="running" type="boolean"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <field name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </field>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager.Node -->
-  <!-- start class org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore -->
-  <class name="FileSystemNodeLabelsStore" extends="org.apache.hadoop.yarn.nodelabels.NodeLabelsStore"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="FileSystemNodeLabelsStore" type="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="init"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <exception name="Exception" type="java.lang.Exception"/>
-    </method>
-    <method name="close"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="updateNodeToLabelsMappings"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="nodeToLabels" type="java.util.Map"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="storeNewClusterNodeLabels"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="labels" type="java.util.Set"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="removeClusterNodeLabels"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="labels" type="java.util.Collection"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="recover"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <field name="LOG" type="org.apache.commons.logging.Log"
-      transient="false" volatile="false"
-      static="true" final="true" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-    <field name="DEFAULT_DIR_NAME" type="java.lang.String"
-      transient="false" volatile="false"
-      static="true" final="true" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-    <field name="MIRROR_FILENAME" type="java.lang.String"
-      transient="false" volatile="false"
-      static="true" final="true" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-    <field name="EDITLOG_FILENAME" type="java.lang.String"
-      transient="false" volatile="false"
-      static="true" final="true" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore -->
-  <!-- start class org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore.SerializedLogType -->
-  <class name="FileSystemNodeLabelsStore.SerializedLogType" extends="java.lang.Enum"
-    abstract="false"
-    static="true" final="true" visibility="protected"
-    deprecated="not deprecated">
-    <method name="values" return="org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore.SerializedLogType[]"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="valueOf" return="org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore.SerializedLogType"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore.SerializedLogType -->
-  <!-- start class org.apache.hadoop.yarn.nodelabels.NodeLabel -->
-  <class name="NodeLabel" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="java.lang.Comparable"/>
-    <constructor name="NodeLabel" type="java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="NodeLabel" type="java.lang.String, org.apache.hadoop.yarn.api.records.Resource, int"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="addNodeId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="node" type="org.apache.hadoop.yarn.api.records.NodeId"/>
-    </method>
-    <method name="removeNodeId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="node" type="org.apache.hadoop.yarn.api.records.NodeId"/>
-    </method>
-    <method name="getAssociatedNodeIds" return="java.util.Set"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="addNode"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="nodeRes" type="org.apache.hadoop.yarn.api.records.Resource"/>
-    </method>
-    <method name="removeNode"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="nodeRes" type="org.apache.hadoop.yarn.api.records.Resource"/>
-    </method>
-    <method name="getResource" return="org.apache.hadoop.yarn.api.records.Resource"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getNumActiveNMs" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getLabelName" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getCopy" return="org.apache.hadoop.yarn.nodelabels.NodeLabel"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="compareTo" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="o" type="org.apache.hadoop.yarn.nodelabels.NodeLabel"/>
-    </method>
-    <method name="equals" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="obj" type="java.lang.Object"/>
-    </method>
-    <method name="hashCode" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.nodelabels.NodeLabel -->
-  <!-- start class org.apache.hadoop.yarn.nodelabels.NodeLabelsStore -->
-  <class name="NodeLabelsStore" extends="java.lang.Object"
-    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="java.io.Closeable"/>
-    <constructor name="NodeLabelsStore" type="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="updateNodeToLabelsMappings"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="nodeToLabels" type="java.util.Map"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Store node {@literal ->} label]]>
-      </doc>
-    </method>
-    <method name="storeNewClusterNodeLabels"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="label" type="java.util.Set"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Store new labels]]>
-      </doc>
-    </method>
-    <method name="removeClusterNodeLabels"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="labels" type="java.util.Collection"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Remove labels]]>
-      </doc>
-    </method>
-    <method name="recover"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Recover labels and node to labels mappings from store]]>
-      </doc>
-    </method>
-    <method name="init"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <exception name="Exception" type="java.lang.Exception"/>
-    </method>
-    <method name="getNodeLabelsManager" return="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <field name="mgr" type="org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager"
-      transient="false" volatile="false"
-      static="false" final="true" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.nodelabels.NodeLabelsStore -->
-</package>
-<package name="org.apache.hadoop.yarn.nodelabels.event">
-  <!-- start class org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent -->
-  <class name="NodeLabelsStoreEvent" extends="org.apache.hadoop.yarn.event.AbstractEvent"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="NodeLabelsStoreEvent" type="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEventType"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent -->
-  <!-- start class org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEventType -->
-  <class name="NodeLabelsStoreEventType" extends="java.lang.Enum"
-    abstract="false"
-    static="false" final="true" visibility="public"
-    deprecated="not deprecated">
-    <method name="values" return="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEventType[]"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read the next key and return the value-stream.
+
+ @param key
+ @return the valueStream if there are more keys or null otherwise.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readAcontainerLogs"
       abstract="false" native="false" synchronized="false"
       static="true" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="writer" type="java.io.Writer"/>
+      <param name="logUploadedTime" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes all logs for a single container to the provided writer.
+ @param valueStream
+ @param writer
+ @param logUploadedTime
+ @throws IOException]]>
+      </doc>
     </method>
-    <method name="valueOf" return="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEventType"
+    <method name="readAcontainerLogs"
       abstract="false" native="false" synchronized="false"
       static="true" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="writer" type="java.io.Writer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes all logs for a single container to the provided writer.
+ @param valueStream
+ @param writer
+ @throws IOException]]>
+      </doc>
     </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEventType -->
-  <!-- start class org.apache.hadoop.yarn.nodelabels.event.RemoveClusterNodeLabels -->
-  <class name="RemoveClusterNodeLabels" extends="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="RemoveClusterNodeLabels" type="java.util.Collection"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getLabels" return="java.util.Collection"
+    <method name="readAContainerLogsForALogType"
       abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
+      static="true" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="out" type="java.io.PrintStream"/>
+      <param name="logUploadedTime" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Keep calling this till you get a {@link EOFException} for getting logs of
+ all types for a single container.
+
+ @param valueStream
+ @param out
+ @param logUploadedTime
+ @throws IOException]]>
+      </doc>
     </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.nodelabels.event.RemoveClusterNodeLabels -->
-  <!-- start class org.apache.hadoop.yarn.nodelabels.event.StoreNewClusterNodeLabels -->
-  <class name="StoreNewClusterNodeLabels" extends="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="StoreNewClusterNodeLabels" type="java.util.Set"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getLabels" return="java.util.Set"
+    <method name="readAContainerLogsForALogType"
       abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
+      static="true" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="out" type="java.io.PrintStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Keep calling this till you get a {@link EOFException} for getting logs of
+ all types for a single container.
+
+ @param valueStream
+ @param out
+ @throws IOException]]>
+      </doc>
     </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.nodelabels.event.StoreNewClusterNodeLabels -->
-  <!-- start class org.apache.hadoop.yarn.nodelabels.event.UpdateNodeToLabelsMappingsEvent -->
-  <class name="UpdateNodeToLabelsMappingsEvent" extends="org.apache.hadoop.yarn.nodelabels.event.NodeLabelsStoreEvent"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="UpdateNodeToLabelsMappingsEvent" type="java.util.Map"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getNodeToLabels" return="java.util.Map"
+    <method name="close"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
   </class>
-  <!-- end class org.apache.hadoop.yarn.nodelabels.event.UpdateNodeToLabelsMappingsEvent -->
+  <!-- end class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader -->
+</package>
+<package name="org.apache.hadoop.yarn.nodelabels">
+</package>
+<package name="org.apache.hadoop.yarn.nodelabels.event">
 </package>
 <package name="org.apache.hadoop.yarn.security">
   <!-- start class org.apache.hadoop.yarn.security.AMRMTokenIdentifier -->
@@ -1981,43 +1193,6 @@
     </field>
   </class>
   <!-- end class org.apache.hadoop.yarn.security.NMTokenIdentifier -->
-  <!-- start class org.apache.hadoop.yarn.security.NMTokenSelector -->
-  <class name="NMTokenSelector" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.security.token.TokenSelector"/>
-    <constructor name="NMTokenSelector"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="selectToken" return="org.apache.hadoop.security.token.Token"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="service" type="org.apache.hadoop.io.Text"/>
-      <param name="tokens" type="java.util.Collection"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.security.NMTokenSelector -->
-  <!-- start class org.apache.hadoop.yarn.security.PrivilegedEntity.EntityType -->
-  <class name="PrivilegedEntity.EntityType" extends="java.lang.Enum"
-    abstract="false"
-    static="true" final="true" visibility="public"
-    deprecated="not deprecated">
-    <method name="values" return="org.apache.hadoop.yarn.security.PrivilegedEntity.EntityType[]"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="valueOf" return="org.apache.hadoop.yarn.security.PrivilegedEntity.EntityType"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.security.PrivilegedEntity.EntityType -->
   <!-- start class org.apache.hadoop.yarn.security.SchedulerSecurityInfo -->
   <class name="SchedulerSecurityInfo" extends="org.apache.hadoop.security.SecurityInfo"
     abstract="false"
@@ -2252,25 +1427,6 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager -->
-  <!-- start class org.apache.hadoop.yarn.security.client.ClientToAMTokenSelector -->
-  <class name="ClientToAMTokenSelector" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.security.token.TokenSelector"/>
-    <constructor name="ClientToAMTokenSelector"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="selectToken" return="org.apache.hadoop.security.token.Token"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="service" type="org.apache.hadoop.io.Text"/>
-      <param name="tokens" type="java.util.Collection"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.security.client.ClientToAMTokenSelector -->
   <!-- start class org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier -->
   <class name="RMDelegationTokenIdentifier" extends="org.apache.hadoop.yarn.security.client.YARNDelegationTokenIdentifier"
     abstract="false"
@@ -2306,48 +1462,6 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier -->
-  <!-- start class org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier.Renewer -->
-  <class name="RMDelegationTokenIdentifier.Renewer" extends="org.apache.hadoop.security.token.TokenRenewer"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="RMDelegationTokenIdentifier.Renewer"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="handleKind" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="kind" type="org.apache.hadoop.io.Text"/>
-    </method>
-    <method name="isManaged" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="token" type="org.apache.hadoop.security.token.Token"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="renew" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="token" type="org.apache.hadoop.security.token.Token"/>
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="cancel"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="token" type="org.apache.hadoop.security.token.Token"/>
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier.Renewer -->
   <!-- start class org.apache.hadoop.yarn.security.client.RMDelegationTokenSelector -->
   <class name="RMDelegationTokenSelector" extends="java.lang.Object"
     abstract="false"
@@ -2422,55 +1536,8 @@
 <package name="org.apache.hadoop.yarn.server.api">
 </package>
 <package name="org.apache.hadoop.yarn.server.api.impl.pb.client">
-  <!-- start class org.apache.hadoop.yarn.server.api.impl.pb.client.SCMAdminProtocolPBClientImpl -->
-  <class name="SCMAdminProtocolPBClientImpl" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.yarn.server.api.SCMAdminProtocol"/>
-    <implements name="java.io.Closeable"/>
-    <constructor name="SCMAdminProtocolPBClientImpl" type="long, java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </constructor>
-    <method name="close"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="runCleanerTask" return="org.apache.hadoop.yarn.server.api.protocolrecords.RunSharedCacheCleanerTaskResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.yarn.server.api.protocolrecords.RunSharedCacheCleanerTaskRequest"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.api.impl.pb.client.SCMAdminProtocolPBClientImpl -->
 </package>
 <package name="org.apache.hadoop.yarn.server.api.impl.pb.service">
-  <!-- start class org.apache.hadoop.yarn.server.api.impl.pb.service.SCMAdminProtocolPBServiceImpl -->
-  <class name="SCMAdminProtocolPBServiceImpl" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.yarn.server.api.SCMAdminProtocolPB"/>
-    <constructor name="SCMAdminProtocolPBServiceImpl" type="org.apache.hadoop.yarn.server.api.SCMAdminProtocol"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="runCleanerTask" return="org.apache.hadoop.yarn.proto.YarnServiceProtos.RunSharedCacheCleanerTaskResponseProto"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="controller" type="com.google.protobuf.RpcController"/>
-      <param name="proto" type="org.apache.hadoop.yarn.proto.YarnServiceProtos.RunSharedCacheCleanerTaskRequestProto"/>
-      <exception name="ServiceException" type="com.google.protobuf.ServiceException"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.server.api.impl.pb.service.SCMAdminProtocolPBServiceImpl -->
 </package>
 <package name="org.apache.hadoop.yarn.sharedcache">
   <!-- start interface org.apache.hadoop.yarn.sharedcache.SharedCacheChecksum -->
@@ -2478,7 +1545,7 @@
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="computeChecksum" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="in" type="java.io.InputStream"/>
@@ -2519,57 +1586,6 @@
   <!-- end class org.apache.hadoop.yarn.sharedcache.SharedCacheChecksumFactory -->
 </package>
 <package name="org.apache.hadoop.yarn.state">
-  <!-- start class org.apache.hadoop.yarn.state.Graph.Edge -->
-  <class name="Graph.Edge" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="Graph.Edge" type="org.apache.hadoop.yarn.state.Graph.Node, org.apache.hadoop.yarn.state.Graph.Node, java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="sameAs" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="rhs" type="org.apache.hadoop.yarn.state.Graph.Edge"/>
-    </method>
-    <method name="combine" return="org.apache.hadoop.yarn.state.Graph.Edge"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="rhs" type="org.apache.hadoop.yarn.state.Graph.Edge"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.state.Graph.Edge -->
-  <!-- start class org.apache.hadoop.yarn.state.Graph.Node -->
-  <class name="Graph.Node" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="Graph.Node" type="java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getParent" return="org.apache.hadoop.yarn.state.Graph"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="addEdge" return="org.apache.hadoop.yarn.state.Graph.Node"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="to" type="org.apache.hadoop.yarn.state.Graph.Node"/>
-      <param name="info" type="java.lang.String"/>
-    </method>
-    <method name="getUniqueId" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.state.Graph.Node -->
   <!-- start class org.apache.hadoop.yarn.state.InvalidStateTransitonException -->
   <class name="InvalidStateTransitonException" extends="org.apache.hadoop.yarn.exceptions.YarnRuntimeException"
     abstract="false"
@@ -2596,7 +1612,7 @@
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="transition" return="STATE"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="operand" type="OPERAND"/>
@@ -2622,7 +1638,7 @@
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="transition"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="operand" type="OPERAND"/>
@@ -2646,12 +1662,12 @@
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="getCurrentState" return="STATE"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
     <method name="doTransition" return="STATE"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="eventType" type="EVENTTYPE"/>
@@ -2941,43 +1957,12 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.yarn.util.ApplicationClassLoader -->
-  <!-- start class org.apache.hadoop.yarn.util.AuxiliaryServiceHelper -->
-  <class name="AuxiliaryServiceHelper" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="AuxiliaryServiceHelper"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getServiceDataFromEnv" return="java.nio.ByteBuffer"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="serviceName" type="java.lang.String"/>
-      <param name="env" type="java.util.Map"/>
-    </method>
-    <method name="setServiceDataIntoEnv"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="serviceName" type="java.lang.String"/>
-      <param name="metaData" type="java.nio.ByteBuffer"/>
-      <param name="env" type="java.util.Map"/>
-    </method>
-    <field name="NM_AUX_SERVICE" type="java.lang.String"
-      transient="false" volatile="false"
-      static="true" final="true" visibility="public"
-      deprecated="not deprecated">
-    </field>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.util.AuxiliaryServiceHelper -->
   <!-- start interface org.apache.hadoop.yarn.util.Clock -->
   <interface name="Clock"    abstract="true"
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="getTime" return="long"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/57] [abbrv] hadoop git commit: YARN-5662. Provide an option to enable ContainerMonitor. Contributed by Jian He.

Posted by in...@apache.org.
YARN-5662. Provide an option to enable ContainerMonitor. Contributed by Jian He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc2656f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc2656f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc2656f0

Branch: refs/heads/HDFS-10467
Commit: bc2656f09f857fdbc39da6b060cee453d2dec4dc
Parents: 03f519a
Author: Varun Vasudev <vv...@apache.org>
Authored: Wed Sep 28 15:18:18 2016 +0530
Committer: Varun Vasudev <vv...@apache.org>
Committed: Wed Sep 28 15:18:18 2016 +0530

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |  6 ++-
 .../records/impl/pb/ContainerStatusPBImpl.java  |  2 +
 .../src/main/resources/yarn-default.xml         |  6 +++
 .../monitor/ContainersMonitorImpl.java          | 31 ++++++++--------
 .../monitor/TestContainersMonitor.java          | 39 ++++++++++++++++++++
 5 files changed, 68 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc2656f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 86e8a95..f3009a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1106,7 +1106,7 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_VMEM_PMEM_RATIO =
     NM_PREFIX + "vmem-pmem-ratio";
   public static final float DEFAULT_NM_VMEM_PMEM_RATIO = 2.1f;
-  
+
   /** Number of Virtual CPU Cores which can be allocated for containers.*/
   public static final String NM_VCORES = NM_PREFIX + "resource.cpu-vcores";
   public static final int DEFAULT_NM_VCORES = 8;
@@ -1259,6 +1259,10 @@ public class YarnConfiguration extends Configuration {
       NM_PREFIX + "resource-monitor.interval-ms";
   public static final int DEFAULT_NM_RESOURCE_MON_INTERVAL_MS = 3000;
 
+  public static final String NM_CONTAINER_MONITOR_ENABLED =
+      NM_PREFIX + "container-monitor.enabled";
+  public static final boolean DEFAULT_NM_CONTAINER_MONITOR_ENABLED = true;
+
   /** How often to monitor containers.*/
   public final static String NM_CONTAINER_MON_INTERVAL_MS =
     NM_PREFIX + "container-monitor.interval-ms";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc2656f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java
index 7ec6619..219cf02 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java
@@ -95,6 +95,8 @@ public class ContainerStatusPBImpl extends ContainerStatus {
     sb.append("Capability: ").append(getCapability()).append(", ");
     sb.append("Diagnostics: ").append(getDiagnostics()).append(", ");
     sb.append("ExitStatus: ").append(getExitStatus()).append(", ");
+    sb.append("IP: ").append(getIPs()).append(", ");
+    sb.append("Host: ").append(getHost());
     sb.append("]");
     return sb.toString();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc2656f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 423b78b..d6c33a2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1365,6 +1365,12 @@
   </property>
 
   <property>
+    <description>Enable container monitor</description>
+    <name>yarn.nodemanager.container-monitor.enabled</name>
+    <value>true</value>
+  </property>
+
+  <property>
     <description>How often to monitor containers. If not set, the value for
     yarn.nodemanager.resource-monitor.interval-ms will be used.</description>
     <name>yarn.nodemanager.container-monitor.interval-ms</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc2656f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index c456bde..a04a914 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -171,7 +171,7 @@ public class ContainersMonitorImpl extends AbstractService implements
     LOG.info("Physical memory check enabled: " + pmemCheckEnabled);
     LOG.info("Virtual memory check enabled: " + vmemCheckEnabled);
 
-    containersMonitorEnabled = isEnabled();
+    containersMonitorEnabled = isContainerMonitorEnabled();
     LOG.info("ContainersMonitor enabled: " + containersMonitorEnabled);
 
     nodeCpuPercentageForYARN =
@@ -204,23 +204,24 @@ public class ContainersMonitorImpl extends AbstractService implements
     super.serviceInit(conf);
   }
 
-  private boolean isEnabled() {
+  private boolean isContainerMonitorEnabled() {
+    return conf.getBoolean(YarnConfiguration.NM_CONTAINER_MONITOR_ENABLED,
+        YarnConfiguration.DEFAULT_NM_CONTAINER_MONITOR_ENABLED);
+  }
+
+  private boolean isResourceCalculatorAvailable() {
     if (resourceCalculatorPlugin == null) {
-            LOG.info("ResourceCalculatorPlugin is unavailable on this system. "
-                + this.getClass().getName() + " is disabled.");
-            return false;
-    }
-    if (ResourceCalculatorProcessTree.getResourceCalculatorProcessTree("0", processTreeClass, conf) == null) {
-        LOG.info("ResourceCalculatorProcessTree is unavailable on this system. "
-                + this.getClass().getName() + " is disabled.");
-            return false;
+      LOG.info("ResourceCalculatorPlugin is unavailable on this system. " + this
+          .getClass().getName() + " is disabled.");
+      return false;
     }
-    if (!(isPmemCheckEnabled() || isVmemCheckEnabled())) {
-      LOG.info("Neither virtual-memory nor physical-memory monitoring is " +
-          "needed. Not running the monitor-thread");
+    if (ResourceCalculatorProcessTree
+        .getResourceCalculatorProcessTree("0", processTreeClass, conf)
+        == null) {
+      LOG.info("ResourceCalculatorProcessTree is unavailable on this system. "
+          + this.getClass().getName() + " is disabled.");
       return false;
     }
-
     return true;
   }
 
@@ -462,7 +463,7 @@ public class ContainersMonitorImpl extends AbstractService implements
             }
             // End of initializing any uninitialized processTrees
 
-            if (pId == null) {
+            if (pId == null || !isResourceCalculatorAvailable()) {
               continue; // processTree cannot be tracked
             }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc2656f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
index 1b4e3b7..0f1c6f5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
@@ -29,15 +29,18 @@ import java.io.FileReader;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.regex.Pattern;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
@@ -181,6 +184,42 @@ public class TestContainersMonitor extends BaseContainerManagerTest {
     }
   }
 
+  // Test that even if VMEM_PMEM_CHECK is not enabled, container monitor will
+  // run.
+  @Test
+  public void testContainerMonitor() throws Exception {
+    conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);
+    conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
+    containerManager.start();
+    ContainerLaunchContext context =
+        recordFactory.newRecordInstance(ContainerLaunchContext.class);
+    context.setCommands(Arrays.asList("sleep 6"));
+    ContainerId cId = createContainerId(1705);
+
+    // start the container
+    StartContainerRequest scRequest = StartContainerRequest.newInstance(context,
+        createContainerToken(cId, DUMMY_RM_IDENTIFIER, this.context.getNodeId(),
+            user, this.context.getContainerTokenSecretManager()));
+    StartContainersRequest allRequests =
+        StartContainersRequest.newInstance(Arrays.asList(scRequest));
+    containerManager.startContainers(allRequests);
+    BaseContainerManagerTest
+        .waitForContainerState(containerManager, cId, ContainerState.RUNNING);
+    Thread.sleep(2000);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      public Boolean get() {
+        try {
+          return containerManager.getContainerStatuses(
+              GetContainerStatusesRequest.newInstance(Arrays.asList(cId)))
+              .getContainerStatuses().get(0).getHost() != null;
+        } catch (Exception e) {
+          return false;
+        }
+      }
+
+    }, 300, 10000);
+  }
+
   @Test
   public void testContainerKillOnMemoryOverflow() throws IOException,
       InterruptedException, YarnException {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/57] [abbrv] hadoop git commit: HADOOP-13544. JDiff reports unncessarily show unannotated APIs and cause confusion while our javadocs only show annotated and public APIs. (vinodkv via wangda)

Posted by in...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/875062b5/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.7.2.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.7.2.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.7.2.xml
index 158528d..f822ebb 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.7.2.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.7.2.xml
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
 <!-- Generated by the JDiff Javadoc doclet -->
 <!-- (http://www.jdiff.org) -->
-<!-- on Thu May 12 17:48:36 PDT 2016 -->
+<!-- on Wed Aug 24 13:55:59 PDT 2016 -->
 
 <api
   xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
@@ -9,44 +9,8 @@
   name="hadoop-yarn-client 2.7.2"
   jdversion="1.0.9">
 
-<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/classes:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vinodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/comm
 ons-codec-1.4.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vin
 odkv/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.1
 3/jackson-core-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.
 0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guav
 a-11.0.2.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.7.0_45.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.7.2.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/
 Users/vinodkv/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/Users/vinodkv/.m2/repository/com/google/inject/guice/3.0/guice-3.0.jar:/Users/vinodkv/.m2/repository/javax/inject/javax.inject/1/javax.inject-1.jar:/Users/vinodkv/.m2/repository/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar -sourcepath /Users/vinodkv/Workspace/eclipse-workspace/apache-g
 it/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/site/jdiff/xml -apiname hadoop-yarn-client 2.7.2 -->
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/classes:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vinodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/commo
 ns-codec-1.4.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vino
 dkv/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13
 /jackson-core-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0
 -M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guava
 -11.0.2.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_40.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.7.2.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/U
 sers/vinodkv/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/Users/vinodkv/.m2/repository/com/google/inject/guice/3.0/guice-3.0.jar:/Users/vinodkv/.m2/repository/javax/inject/javax.inject/1/javax.inject-1.jar:/Users/vinodkv/.m2/repository/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar -sourcepath /Users/vinodkv/Workspace/eclipse-workspace/apache-gi
 t/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/jdiff.jar -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/site/jdiff/xml -apiname hadoop-yarn-client 2.7.2 -->
 <package name="org.apache.hadoop.yarn.client">
-  <!-- start class org.apache.hadoop.yarn.client.SCMAdmin -->
-  <class name="SCMAdmin" extends="org.apache.hadoop.conf.Configured"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.util.Tool"/>
-    <constructor name="SCMAdmin"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="SCMAdmin" type="org.apache.hadoop.conf.Configuration"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="createSCMAdminProtocol" return="org.apache.hadoop.yarn.server.api.SCMAdminProtocol"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="run" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="args" type="java.lang.String[]"/>
-      <exception name="Exception" type="java.lang.Exception"/>
-    </method>
-    <method name="main"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="args" type="java.lang.String[]"/>
-      <exception name="Exception" type="java.lang.Exception"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.client.SCMAdmin -->
 </package>
 <package name="org.apache.hadoop.yarn.client.api">
   <!-- start class org.apache.hadoop.yarn.client.api.AHSClient -->
@@ -472,166 +436,6 @@
     </method>
   </class>
   <!-- end class org.apache.hadoop.yarn.client.api.AMRMClient -->
-  <!-- start class org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest -->
-  <class name="AMRMClient.ContainerRequest" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="AMRMClient.ContainerRequest" type="org.apache.hadoop.yarn.api.records.Resource, java.lang.String[], java.lang.String[], org.apache.hadoop.yarn.api.records.Priority"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Instantiates a {@link ContainerRequest} with the given constraints and
- locality relaxation enabled.
-
- @param capability
-          The {@link Resource} to be requested for each container.
- @param nodes
-          Any hosts to request that the containers are placed on.
- @param racks
-          Any racks to request that the containers are placed on. The
-          racks corresponding to any hosts requested will be automatically
-          added to this list.
- @param priority
-          The priority at which to request the containers. Higher
-          priorities have lower numerical values.]]>
-      </doc>
-    </constructor>
-    <constructor name="AMRMClient.ContainerRequest" type="org.apache.hadoop.yarn.api.records.Resource, java.lang.String[], java.lang.String[], org.apache.hadoop.yarn.api.records.Priority, boolean"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Instantiates a {@link ContainerRequest} with the given constraints.
-
- @param capability
-          The {@link Resource} to be requested for each container.
- @param nodes
-          Any hosts to request that the containers are placed on.
- @param racks
-          Any racks to request that the containers are placed on. The
-          racks corresponding to any hosts requested will be automatically
-          added to this list.
- @param priority
-          The priority at which to request the containers. Higher
-          priorities have lower numerical values.
- @param relaxLocality
-          If true, containers for this request may be assigned on hosts
-          and racks other than the ones explicitly requested.]]>
-      </doc>
-    </constructor>
-    <constructor name="AMRMClient.ContainerRequest" type="org.apache.hadoop.yarn.api.records.Resource, java.lang.String[], java.lang.String[], org.apache.hadoop.yarn.api.records.Priority, boolean, java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Instantiates a {@link ContainerRequest} with the given constraints.
-
- @param capability
-          The {@link Resource} to be requested for each container.
- @param nodes
-          Any hosts to request that the containers are placed on.
- @param racks
-          Any racks to request that the containers are placed on. The
-          racks corresponding to any hosts requested will be automatically
-          added to this list.
- @param priority
-          The priority at which to request the containers. Higher
-          priorities have lower numerical values.
- @param relaxLocality
-          If true, containers for this request may be assigned on hosts
-          and racks other than the ones explicitly requested.
- @param nodeLabelsExpression
-          Set node labels to allocate resource, now we only support
-          asking for only a single node label]]>
-      </doc>
-    </constructor>
-    <method name="getCapability" return="org.apache.hadoop.yarn.api.records.Resource"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getNodes" return="java.util.List"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getRacks" return="java.util.List"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getPriority" return="org.apache.hadoop.yarn.api.records.Priority"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getRelaxLocality" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getNodeLabelExpression" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="toString" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <doc>
-    <![CDATA[Object to represent a single container request for resources. Scheduler
- documentation should be consulted for the specifics of how the parameters
- are honored.
-
- By default, YARN schedulers try to allocate containers at the requested
- locations but they may relax the constraints in order to expedite meeting
- allocations limits. They first relax the constraint to the same rack as the
- requested node and then to anywhere in the cluster. The relaxLocality flag
- may be used to disable locality relaxation and request containers at only
- specific locations. The following conditions apply.
- <ul>
- <li>Within a priority, all container requests must have the same value for
- locality relaxation. Either enabled or disabled.</li>
- <li>If locality relaxation is disabled, then across requests, locations at
- different network levels may not be specified. E.g. its invalid to make a
- request for a specific node and another request for a specific rack.</li>
- <li>If locality relaxation is disabled, then only within the same request,
- a node and its rack may be specified together. This allows for a specific
- rack with a preference for a specific node within that rack.</li>
- <li></li>
- </ul>
- To re-enable locality relaxation at a given priority, all pending requests
- with locality relaxation disabled must be first removed. Then they can be
- added back with locality relaxation enabled.
-
- All getters return immutable values.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest -->
-  <!-- start class org.apache.hadoop.yarn.client.api.InvalidContainerRequestException -->
-  <class name="InvalidContainerRequestException" extends="org.apache.hadoop.yarn.exceptions.YarnRuntimeException"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="InvalidContainerRequestException" type="java.lang.Throwable"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="InvalidContainerRequestException" type="java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="InvalidContainerRequestException" type="java.lang.String, java.lang.Throwable"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <doc>
-    <![CDATA[Thrown when an arguments are combined to construct a
- <code>AMRMClient.ContainerRequest</code> in an invalid way.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.client.api.InvalidContainerRequestException -->
   <!-- start class org.apache.hadoop.yarn.client.api.NMClient -->
   <class name="NMClient" extends="org.apache.hadoop.service.AbstractService"
     abstract="true"
@@ -1978,72 +1782,6 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.yarn.client.api.async.AMRMClientAsync -->
-  <!-- start interface org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler -->
-  <interface name="AMRMClientAsync.CallbackHandler"    abstract="true"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="onContainersCompleted"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="statuses" type="java.util.List"/>
-      <doc>
-      <![CDATA[Called when the ResourceManager responds to a heartbeat with completed
- containers. If the response contains both completed containers and
- allocated containers, this will be called before containersAllocated.]]>
-      </doc>
-    </method>
-    <method name="onContainersAllocated"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="containers" type="java.util.List"/>
-      <doc>
-      <![CDATA[Called when the ResourceManager responds to a heartbeat with allocated
- containers. If the response containers both completed containers and
- allocated containers, this will be called after containersCompleted.]]>
-      </doc>
-    </method>
-    <method name="onShutdownRequest"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Called when the ResourceManager wants the ApplicationMaster to shutdown
- for being out of sync etc. The ApplicationMaster should not unregister
- with the RM unless the ApplicationMaster wants to be the last attempt.]]>
-      </doc>
-    </method>
-    <method name="onNodesUpdated"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="updatedNodes" type="java.util.List"/>
-      <doc>
-      <![CDATA[Called when nodes tracked by the ResourceManager have changed in health,
- availability etc.]]>
-      </doc>
-    </method>
-    <method name="getProgress" return="float"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="onError"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="e" type="java.lang.Throwable"/>
-      <doc>
-      <![CDATA[Called when error comes from RM communications as well as from errors in
- the callback itself from the app. Calling
- stop() is the recommended action.
-
- @param e]]>
-      </doc>
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler -->
   <!-- start class org.apache.hadoop.yarn.client.api.async.NMClientAsync -->
   <class name="NMClientAsync" extends="org.apache.hadoop.service.AbstractService"
     abstract="true"
@@ -2187,357 +1925,10 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.yarn.client.api.async.NMClientAsync -->
-  <!-- start interface org.apache.hadoop.yarn.client.api.async.NMClientAsync.CallbackHandler -->
-  <interface name="NMClientAsync.CallbackHandler"    abstract="true"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="onContainerStarted"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
-      <param name="allServiceResponse" type="java.util.Map"/>
-      <doc>
-      <![CDATA[The API is called when <code>NodeManager</code> responds to indicate its
- acceptance of the starting container request
- @param containerId the Id of the container
- @param allServiceResponse a Map between the auxiliary service names and
-                           their outputs]]>
-      </doc>
-    </method>
-    <method name="onContainerStatusReceived"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
-      <param name="containerStatus" type="org.apache.hadoop.yarn.api.records.ContainerStatus"/>
-      <doc>
-      <![CDATA[The API is called when <code>NodeManager</code> responds with the status
- of the container
- @param containerId the Id of the container
- @param containerStatus the status of the container]]>
-      </doc>
-    </method>
-    <method name="onContainerStopped"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
-      <doc>
-      <![CDATA[The API is called when <code>NodeManager</code> responds to indicate the
- container is stopped.
- @param containerId the Id of the container]]>
-      </doc>
-    </method>
-    <method name="onStartContainerError"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
-      <param name="t" type="java.lang.Throwable"/>
-      <doc>
-      <![CDATA[The API is called when an exception is raised in the process of
- starting a container
-
- @param containerId the Id of the container
- @param t the raised exception]]>
-      </doc>
-    </method>
-    <method name="onGetContainerStatusError"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
-      <param name="t" type="java.lang.Throwable"/>
-      <doc>
-      <![CDATA[The API is called when an exception is raised in the process of
- querying the status of a container
-
- @param containerId the Id of the container
- @param t the raised exception]]>
-      </doc>
-    </method>
-    <method name="onStopContainerError"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
-      <param name="t" type="java.lang.Throwable"/>
-      <doc>
-      <![CDATA[The API is called when an exception is raised in the process of
- stopping a container
-
- @param containerId the Id of the container
- @param t the raised exception]]>
-      </doc>
-    </method>
-    <doc>
-    <![CDATA[<p>
- The callback interface needs to be implemented by {@link NMClientAsync}
- users. The APIs are called when responses from <code>NodeManager</code> are
- available.
- </p>
-
- <p>
- Once a callback happens, the users can chose to act on it in blocking or
- non-blocking manner. If the action on callback is done in a blocking
- manner, some of the threads performing requests on NodeManagers may get
- blocked depending on how many threads in the pool are busy.
- </p>
-
- <p>
- The implementation of the callback function should not throw the
- unexpected exception. Otherwise, {@link NMClientAsync} will just
- catch, log and then ignore it.
- </p>]]>
-    </doc>
-  </interface>
-  <!-- end interface org.apache.hadoop.yarn.client.api.async.NMClientAsync.CallbackHandler -->
 </package>
 <package name="org.apache.hadoop.yarn.client.api.async.impl">
-  <!-- start class org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerEvent -->
-  <class name="NMClientAsyncImpl.ContainerEvent" extends="org.apache.hadoop.yarn.event.AbstractEvent"
-    abstract="false"
-    static="true" final="false" visibility="protected"
-    deprecated="not deprecated">
-    <constructor name="NMClientAsyncImpl.ContainerEvent" type="org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.NodeId, org.apache.hadoop.yarn.api.records.Token, org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerEventType"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getNodeId" return="org.apache.hadoop.yarn.api.records.NodeId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getContainerToken" return="org.apache.hadoop.yarn.api.records.Token"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerEvent -->
-  <!-- start class org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerEventProcessor -->
-  <class name="NMClientAsyncImpl.ContainerEventProcessor" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="protected"
-    deprecated="not deprecated">
-    <implements name="java.lang.Runnable"/>
-    <constructor name="NMClientAsyncImpl.ContainerEventProcessor" type="org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerEvent"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="run"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <field name="event" type="org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerEvent"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerEventProcessor -->
-  <!-- start class org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerEventType -->
-  <class name="NMClientAsyncImpl.ContainerEventType" extends="java.lang.Enum"
-    abstract="false"
-    static="true" final="true" visibility="protected"
-    deprecated="not deprecated">
-    <method name="values" return="org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerEventType[]"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="valueOf" return="org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerEventType"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-    </method>
-    <doc>
-    <![CDATA[The type of the event of interacting with a container]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerEventType -->
-  <!-- start class org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerState -->
-  <class name="NMClientAsyncImpl.ContainerState" extends="java.lang.Enum"
-    abstract="false"
-    static="true" final="true" visibility="protected"
-    deprecated="not deprecated">
-    <method name="values" return="org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerState[]"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="valueOf" return="org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerState"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerState -->
-  <!-- start class org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.StartContainerEvent -->
-  <class name="NMClientAsyncImpl.StartContainerEvent" extends="org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerEvent"
-    abstract="false"
-    static="true" final="false" visibility="protected"
-    deprecated="not deprecated">
-    <constructor name="NMClientAsyncImpl.StartContainerEvent" type="org.apache.hadoop.yarn.api.records.Container, org.apache.hadoop.yarn.api.records.ContainerLaunchContext"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getContainer" return="org.apache.hadoop.yarn.api.records.Container"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getContainerLaunchContext" return="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.StartContainerEvent -->
-  <!-- start class org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.StatefulContainer -->
-  <class name="NMClientAsyncImpl.StatefulContainer" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="protected"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.yarn.event.EventHandler"/>
-    <constructor name="NMClientAsyncImpl.StatefulContainer" type="org.apache.hadoop.yarn.client.api.async.NMClientAsync, org.apache.hadoop.yarn.api.records.ContainerId"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="handle"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="event" type="org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerEvent"/>
-    </method>
-    <method name="getContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getState" return="org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerState"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <field name="stateMachineFactory" type="org.apache.hadoop.yarn.state.StateMachineFactory"
-      transient="false" volatile="false"
-      static="true" final="true" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.StatefulContainer -->
-  <!-- start class org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.StatefulContainer.OutOfOrderTransition -->
-  <class name="NMClientAsyncImpl.StatefulContainer.OutOfOrderTransition" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="protected"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.yarn.state.SingleArcTransition"/>
-    <constructor name="NMClientAsyncImpl.StatefulContainer.OutOfOrderTransition"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="transition"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="container" type="org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.StatefulContainer"/>
-      <param name="event" type="org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerEvent"/>
-    </method>
-    <field name="STOP_BEFORE_START_ERROR_MSG" type="java.lang.String"
-      transient="false" volatile="false"
-      static="true" final="true" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.StatefulContainer.OutOfOrderTransition -->
-  <!-- start class org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.StatefulContainer.StartContainerTransition -->
-  <class name="NMClientAsyncImpl.StatefulContainer.StartContainerTransition" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="protected"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.yarn.state.MultipleArcTransition"/>
-    <constructor name="NMClientAsyncImpl.StatefulContainer.StartContainerTransition"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="transition" return="org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerState"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="container" type="org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.StatefulContainer"/>
-      <param name="event" type="org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerEvent"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.StatefulContainer.StartContainerTransition -->
-  <!-- start class org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.StatefulContainer.StopContainerTransition -->
-  <class name="NMClientAsyncImpl.StatefulContainer.StopContainerTransition" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="protected"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.yarn.state.MultipleArcTransition"/>
-    <constructor name="NMClientAsyncImpl.StatefulContainer.StopContainerTransition"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="transition" return="org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerState"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="container" type="org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.StatefulContainer"/>
-      <param name="event" type="org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.ContainerEvent"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.StatefulContainer.StopContainerTransition -->
 </package>
 <package name="org.apache.hadoop.yarn.client.api.impl">
-  <!-- start class org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData -->
-  <class name="ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData" type="org.apache.hadoop.yarn.ipc.YarnRPC, java.lang.String, org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.Token"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
-    </constructor>
-    <method name="getContainerManagementProtocol" return="org.apache.hadoop.yarn.api.ContainerManagementProtocol"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData -->
-  <!-- start class org.apache.hadoop.yarn.client.api.impl.NMClientImpl.StartedContainer -->
-  <class name="NMClientImpl.StartedContainer" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="protected"
-    deprecated="not deprecated">
-    <constructor name="NMClientImpl.StartedContainer" type="org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.NodeId, org.apache.hadoop.yarn.api.records.Token"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getNodeId" return="org.apache.hadoop.yarn.api.records.NodeId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.client.api.impl.NMClientImpl.StartedContainer -->
 </package>
 <package name="org.apache.hadoop.yarn.client.cli">
   <!-- start class org.apache.hadoop.yarn.client.cli.LogsCLI -->


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[52/57] [abbrv] hadoop git commit: YARN-5672. FairScheduler: Wrong queue name in log when adding application. (Wilfred Spiegelenburg via kasha)

Posted by in...@apache.org.
YARN-5672. FairScheduler: Wrong queue name in log when adding application. (Wilfred Spiegelenburg via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0da54e88
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0da54e88
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0da54e88

Branch: refs/heads/HDFS-10467
Commit: 0da54e8848764c71a31473516d23ada582013f8c
Parents: 6e130c3
Author: Karthik Kambatla <ka...@apache.org>
Authored: Mon Oct 3 06:03:46 2016 -0700
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Mon Oct 3 06:03:46 2016 -0700

----------------------------------------------------------------------
 .../server/resourcemanager/scheduler/fair/FairScheduler.java  | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0da54e88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 310f2f9..920052f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -668,11 +668,12 @@ public class FairScheduler extends
     queue.getMetrics().submitApp(user);
 
     LOG.info("Accepted application " + applicationId + " from user: " + user
-        + ", in queue: " + queueName + ", currently num of applications: "
-        + applications.size());
+        + ", in queue: " + queue.getName()
+        + ", currently num of applications: " + applications.size());
     if (isAppRecovering) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug(applicationId + " is recovering. Skip notifying APP_ACCEPTED");
+        LOG.debug(applicationId
+            + " is recovering. Skip notifying APP_ACCEPTED");
       }
     } else {
       rmContext.getDispatcher().getEventHandler()


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/57] [abbrv] hadoop git commit: HADOOP-13544. JDiff reports unncessarily show unannotated APIs and cause confusion while our javadocs only show annotated and public APIs. (vinodkv via wangda)

Posted by in...@apache.org.
HADOOP-13544. JDiff reports unncessarily show unannotated APIs and cause confusion while our javadocs only show annotated and public APIs. (vinodkv via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/875062b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/875062b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/875062b5

Branch: refs/heads/HDFS-10467
Commit: 875062b5bc789158290bf93dadc71b5328ca4fee
Parents: 8ae4729
Author: Wangda Tan <wa...@apache.org>
Authored: Tue Sep 27 11:26:45 2016 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Tue Sep 27 11:26:45 2016 -0700

----------------------------------------------------------------------
 .../IncludePublicAnnotationsJDiffDoclet.java    |    64 +
 .../jdiff/Apache_Hadoop_Common_2.7.2.xml        | 41149 ++++++-----------
 .../jdiff/Apache_Hadoop_HDFS_2.7.2.xml          | 21704 +--------
 .../Apache_Hadoop_MapReduce_Common_2.7.2.xml    |   727 +-
 .../Apache_Hadoop_MapReduce_Core_2.7.2.xml      |  7059 +--
 .../Apache_Hadoop_MapReduce_JobClient_2.7.2.xml |   962 +-
 .../hadoop-mapreduce-client/pom.xml             |     4 +-
 hadoop-project-dist/pom.xml                     |     4 +-
 .../jdiff/Apache_Hadoop_YARN_API_2.7.2.xml      |   530 +-
 .../jdiff/Apache_Hadoop_YARN_Client_2.7.2.xml   |   613 +-
 .../jdiff/Apache_Hadoop_YARN_Common_2.7.2.xml   |  1185 +-
 .../Apache_Hadoop_YARN_Server_Common_2.7.2.xml  |  1056 +-
 hadoop-yarn-project/hadoop-yarn/pom.xml         |     4 +-
 13 files changed, 16499 insertions(+), 58562 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/875062b5/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/IncludePublicAnnotationsJDiffDoclet.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/IncludePublicAnnotationsJDiffDoclet.java b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/IncludePublicAnnotationsJDiffDoclet.java
new file mode 100644
index 0000000..91b3a9d
--- /dev/null
+++ b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/IncludePublicAnnotationsJDiffDoclet.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.classification.tools;
+
+import com.sun.javadoc.DocErrorReporter;
+import com.sun.javadoc.LanguageVersion;
+import com.sun.javadoc.RootDoc;
+
+import jdiff.JDiff;
+
+/**
+ * A <a href="http://java.sun.com/javase/6/docs/jdk/api/javadoc/doclet/">Doclet</a>
+ * that only includes class-level elements that are annotated with
+ * {@link org.apache.hadoop.classification.InterfaceAudience.Public}.
+ * Class-level elements with no annotation are excluded.
+ * In addition, all elements that are annotated with
+ * {@link org.apache.hadoop.classification.InterfaceAudience.Private} or
+ * {@link org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate}
+ * are also excluded.
+ * It delegates to the JDiff Doclet, and takes the same options.
+ */
+public class IncludePublicAnnotationsJDiffDoclet {
+
+  public static LanguageVersion languageVersion() {
+    return LanguageVersion.JAVA_1_5;
+  }
+
+  public static boolean start(RootDoc root) {
+    System.out.println(
+        IncludePublicAnnotationsJDiffDoclet.class.getSimpleName());
+    RootDocProcessor.treatUnannotatedClassesAsPrivate = true;
+    return JDiff.start(RootDocProcessor.process(root));
+  }
+
+  public static int optionLength(String option) {
+    Integer length = StabilityOptions.optionLength(option);
+    if (length != null) {
+      return length;
+    }
+    return JDiff.optionLength(option);
+  }
+
+  public static boolean validOptions(String[][] options,
+      DocErrorReporter reporter) {
+    StabilityOptions.validOptions(options, reporter);
+    String[][] filteredOptions = StabilityOptions.filterOptions(options);
+    return JDiff.validOptions(filteredOptions, reporter);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/57] [abbrv] hadoop git commit: HDFS-10907. Fix Erasure Coding documentation. Contributed by Manoj Govindassamy.

Posted by in...@apache.org.
HDFS-10907. Fix Erasure Coding documentation. Contributed by Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fad1221
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fad1221
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fad1221

Branch: refs/heads/HDFS-10467
Commit: 7fad1221d6f35e84b320fab82174525c067ad521
Parents: ee33a02
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Fri Sep 30 12:51:27 2016 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Fri Sep 30 12:51:27 2016 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md  | 2 +-
 .../hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md             | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fad1221/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 9f9fba5..e923b86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -463,7 +463,7 @@ Runs the diskbalancer CLI. See [HDFS Diskbalancer](./HDFSDiskbalancer.html) for
 Usage:
 
        hdfs erasurecode [generic options]
-         [-setPolicy [-s <policyName>] <path>]
+         [-setPolicy [-p <policyName>] <path>]
          [-getPolicy <path>]
          [-listPolicies]
          [-usage [cmd ...]]

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fad1221/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 76c1b3a..18b3a25 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -125,7 +125,7 @@ Below are the details about each command.
 
       `path`: An directory in HDFS. This is a mandatory parameter. Setting a policy only affects newly created files, and does not affect existing files.
 
-      `policyName`: The ErasureCoding policy to be used for files under this directory. This is an optional parameter, specified using \u2018-s\u2019 flag. If no policy is specified, the system default ErasureCodingPolicy will be used.
+      `policyName`: The ErasureCoding policy to be used for files under this directory. This is an optional parameter, specified using \u2018-p\u2019 flag. If no policy is specified, the system default ErasureCodingPolicy will be used.
 
  *  `[-getPolicy <path>]`
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/57] [abbrv] hadoop git commit: HADOOP-13544. JDiff reports unncessarily show unannotated APIs and cause confusion while our javadocs only show annotated and public APIs. (vinodkv via wangda)

Posted by in...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/875062b5/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml
index 028ba2d..87a8f36 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
 <!-- Generated by the JDiff Javadoc doclet -->
 <!-- (http://www.jdiff.org) -->
-<!-- on Thu Aug 18 16:02:32 PDT 2016 -->
+<!-- on Wed Aug 24 13:54:04 PDT 2016 -->
 
 <api
   xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
@@ -9,7 +9,7 @@
   name="Apache Hadoop HDFS 2.7.2"
   jdversion="1.0.9">
 
-<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -verbose -classpath /Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/target/classes:/Users/wtan/project/github/hadoop-common-trunk/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_92.jdk/Contents/Home/lib/tools.jar:/Users/wtan/project/github/hadoop-common-trunk/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/wtan/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/wtan/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/wtan/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/http
 core-4.2.5.jar:/Users/wtan/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/wtan/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/wtan/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/wtan/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/wtan/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/wtan/.m2/repository/jline/jline/0.9.94/jline-0.9.94.jar:/Users/wtan/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/wtan/project/github/hadoop-common-trunk/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/wtan/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/wtan/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/
 wtan/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/wtan/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/wtan/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/wtan/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/wtan/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/wtan/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/wtan/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/wtan/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/wtan/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/wtan/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/wtan/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/wtan/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/wtan/.m2/repos
 itory/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/wtan/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/wtan/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/wtan/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/wtan/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/wtan/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/wtan/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/wtan/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/wtan/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/wtan/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/wtan/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/wtan/.m2/repository/org/apache/curator/curat
 or-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/wtan/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/wtan/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/wtan/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/wtan/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/wtan/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/wtan/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/wtan/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/wtan/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/wtan/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/wtan/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/wtan/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/wtan/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/wtan/.m2/repository/commons-lang/common
 s-lang/2.6/commons-lang-2.6.jar:/Users/wtan/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/wtan/.m2/repository/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/Users/wtan/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/wtan/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/wtan/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/wtan/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/wtan/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/wtan/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/wtan/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/wtan/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/wtan/.m2/repository/io/netty/netty-all/4.0.23.Final/netty-all-4.0.23.Final.jar:/Users/wtan/.m2/repository/xerces/xercesIm
 pl/2.9.1/xercesImpl-2.9.1.jar:/Users/wtan/.m2/repository/xml-apis/xml-apis/1.3.04/xml-apis-1.3.04.jar:/Users/wtan/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/wtan/.m2/repository/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar -sourcepath /Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -apidir /Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/target/site/jdiff/xml -apiname Apache Hadoop HDFS 2.7.2 -->
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-hdfs-project/hadoop-hdfs/target/classes:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_40.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.j
 ar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/jline/jline/0.9.94/jline-0.9.94.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-mat
 h3-3.1.1.jar:/Users/vinodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/vinodkv/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/cod
 ehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/.m2/reposito
 ry/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.
 2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/.m2/repository/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/vinod
 kv/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/vinodkv/.m2/repository/io/netty/netty-all/4.0.23.Final/netty-all-4.0.23.Final.jar:/Users/vinodkv/.m2/repository/xerces/xercesImpl/2.9.1/xercesImpl-2.9.1.jar:/Users/vinodkv/.m2/repository/xml-apis/xml-apis/1.3.04/xml-apis-1.3.04.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/repository/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar -sourcepath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-hdfs-project/hadoop-hdfs/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-hdfs-project/
 hadoop-hdfs/target/jdiff.jar -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-hdfs-project/hadoop-hdfs/target/site/jdiff/xml -apiname Apache Hadoop HDFS 2.7.2 -->
 <package name="org.apache.hadoop.fs">
   <!-- start class org.apache.hadoop.fs.BlockStorageLocation -->
   <class name="BlockStorageLocation" extends="org.apache.hadoop.fs.BlockLocation"
@@ -126,354 +126,8 @@
     </doc>
   </interface>
   <!-- end interface org.apache.hadoop.fs.VolumeId -->
-  <!-- start class org.apache.hadoop.fs.XAttr.Builder -->
-  <class name="XAttr.Builder" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="Builder"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="setNameSpace" return="org.apache.hadoop.fs.XAttr.Builder"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="ns" type="org.apache.hadoop.fs.XAttr.NameSpace"/>
-    </method>
-    <method name="setName" return="org.apache.hadoop.fs.XAttr.Builder"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-    </method>
-    <method name="setValue" return="org.apache.hadoop.fs.XAttr.Builder"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="value" type="byte[]"/>
-    </method>
-    <method name="build" return="org.apache.hadoop.fs.XAttr"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.fs.XAttr.Builder -->
-  <!-- start class org.apache.hadoop.fs.XAttr.NameSpace -->
-  <class name="XAttr.NameSpace" extends="java.lang.Enum"
-    abstract="false"
-    static="true" final="true" visibility="public"
-    deprecated="not deprecated">
-    <method name="values" return="org.apache.hadoop.fs.XAttr.NameSpace[]"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="valueOf" return="org.apache.hadoop.fs.XAttr.NameSpace"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.fs.XAttr.NameSpace -->
 </package>
 <package name="org.apache.hadoop.hdfs">
-  <!-- start interface org.apache.hadoop.hdfs.BlockReader -->
-  <interface name="BlockReader"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.fs.ByteBufferReadable"/>
-    <method name="read" return="int"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="buf" type="byte[]"/>
-      <param name="off" type="int"/>
-      <param name="len" type="int"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="skip" return="long"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="n" type="long"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Skip the given number of bytes]]>
-      </doc>
-    </method>
-    <method name="available" return="int"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Returns an estimate of the number of bytes that can be read
- (or skipped over) from this input stream without performing
- network I/O.
- This may return more than what is actually present in the block.]]>
-      </doc>
-    </method>
-    <method name="close"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Close the block reader.
-
- @throws IOException]]>
-      </doc>
-    </method>
-    <method name="readFully"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="buf" type="byte[]"/>
-      <param name="readOffset" type="int"/>
-      <param name="amtToRead" type="int"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Read exactly the given amount of data, throwing an exception
- if EOF is reached before that amount]]>
-      </doc>
-    </method>
-    <method name="readAll" return="int"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="buf" type="byte[]"/>
-      <param name="offset" type="int"/>
-      <param name="len" type="int"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Similar to {@link #readFully(byte[], int, int)} except that it will
- not throw an exception on EOF. However, it differs from the simple
- {@link #read(byte[], int, int)} call in that it is guaranteed to
- read the data if it is available. In other words, if this call
- does not throw an exception, then either the buffer has been
- filled or the next call will return EOF.]]>
-      </doc>
-    </method>
-    <method name="isLocal" return="boolean"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[@return              true only if this is a local read.]]>
-      </doc>
-    </method>
-    <method name="isShortCircuit" return="boolean"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[@return              true only if this is a short-circuit read.
-                      All short-circuit reads are also local.]]>
-      </doc>
-    </method>
-    <method name="getClientMmap" return="org.apache.hadoop.hdfs.shortcircuit.ClientMmap"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="opts" type="java.util.EnumSet"/>
-      <doc>
-      <![CDATA[Get a ClientMmap object for this BlockReader.
-
- @param opts          The read options to use.
- @return              The ClientMmap object, or null if mmap is not
-                      supported.]]>
-      </doc>
-    </method>
-    <doc>
-    <![CDATA[A BlockReader is responsible for reading a single block
- from a single datanode.]]>
-    </doc>
-  </interface>
-  <!-- end interface org.apache.hadoop.hdfs.BlockReader -->
-  <!-- start class org.apache.hadoop.hdfs.BlockReaderFactory.BlockReaderPeer -->
-  <class name="BlockReaderFactory.BlockReaderPeer" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.BlockReaderFactory.BlockReaderPeer -->
-  <!-- start class org.apache.hadoop.hdfs.BlockReaderFactory.FailureInjector -->
-  <class name="BlockReaderFactory.FailureInjector" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="FailureInjector"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="injectRequestFileDescriptorsFailure"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getSupportsReceiptVerification" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.BlockReaderFactory.FailureInjector -->
-  <!-- start class org.apache.hadoop.hdfs.CorruptFileBlockIterator -->
-  <class name="CorruptFileBlockIterator" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.fs.RemoteIterator"/>
-    <constructor name="CorruptFileBlockIterator" type="org.apache.hadoop.hdfs.DFSClient, org.apache.hadoop.fs.Path"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </constructor>
-    <method name="getCallsMade" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[@return the number of calls made to the DFSClient.
- This is for debugging and testing purposes.]]>
-      </doc>
-    </method>
-    <method name="hasNext" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="next" return="org.apache.hadoop.fs.Path"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <doc>
-    <![CDATA[Provides an iterator interface for listCorruptFileBlocks.
- This class is used by DistributedFileSystem and Hdfs.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.CorruptFileBlockIterator -->
-  <!-- start class org.apache.hadoop.hdfs.DFSClient.Conf -->
-  <class name="DFSClient.Conf" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="Conf" type="org.apache.hadoop.conf.Configuration"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="isUseLegacyBlockReaderLocal" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getDomainSocketPath" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="isShortCircuitLocalReads" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="isDomainSocketDataTraffic" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <field name="brfFailureInjector" type="org.apache.hadoop.hdfs.BlockReaderFactory.FailureInjector"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <doc>
-    <![CDATA[DFSClient configuration]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.DFSClient.Conf -->
-  <!-- start class org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream -->
-  <class name="DFSClient.DFSDataInputStream" extends="org.apache.hadoop.hdfs.client.HdfsDataInputStream"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="use {@link HdfsDataInputStream} instead.">
-    <constructor name="DFSDataInputStream" type="org.apache.hadoop.hdfs.DFSInputStream"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </constructor>
-    <doc>
-    <![CDATA[@deprecated use {@link HdfsDataInputStream} instead.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream -->
-  <!-- start class org.apache.hadoop.hdfs.DFSHedgedReadMetrics -->
-  <class name="DFSHedgedReadMetrics" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="DFSHedgedReadMetrics"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="incHedgedReadOps"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="incHedgedReadOpsInCurThread"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="incHedgedReadWins"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getHedgedReadOps" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getHedgedReadOpsInCurThread" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getHedgedReadWins" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <field name="hedgedReadOps" type="java.util.concurrent.atomic.AtomicLong"
-      transient="false" volatile="false"
-      static="false" final="true" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <field name="hedgedReadOpsWin" type="java.util.concurrent.atomic.AtomicLong"
-      transient="false" volatile="false"
-      static="false" final="true" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <field name="hedgedReadOpsInCurThread" type="java.util.concurrent.atomic.AtomicLong"
-      transient="false" volatile="false"
-      static="false" final="true" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <doc>
-    <![CDATA[The client-side metrics for hedged read feature.
- This class has a number of metrics variables that are publicly accessible,
- we can grab them from client side, like HBase.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.DFSHedgedReadMetrics -->
   <!-- start class org.apache.hadoop.hdfs.DFSInotifyEventInputStream -->
   <class name="DFSInotifyEventInputStream" extends="java.lang.Object"
     abstract="false"
@@ -571,80 +225,55 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.hdfs.DFSInotifyEventInputStream -->
-  <!-- start class org.apache.hadoop.hdfs.DFSInputStream.ReadStatistics -->
-  <class name="DFSInputStream.ReadStatistics" extends="java.lang.Object"
+  <!-- start class org.apache.hadoop.hdfs.UnknownCipherSuiteException -->
+  <class name="UnknownCipherSuiteException" extends="java.io.IOException"
     abstract="false"
-    static="true" final="false" visibility="public"
+    static="false" final="false" visibility="public"
     deprecated="not deprecated">
-    <constructor name="ReadStatistics"
+    <constructor name="UnknownCipherSuiteException" type="java.lang.String"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
-    <constructor name="ReadStatistics" type="org.apache.hadoop.hdfs.DFSInputStream.ReadStatistics"
+    <doc>
+    <![CDATA[Thrown when an unknown cipher suite is encountered.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.UnknownCipherSuiteException -->
+  <!-- start class org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException -->
+  <class name="UnknownCryptoProtocolVersionException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="UnknownCryptoProtocolVersionException"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
-    <method name="getTotalBytesRead" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[@return The total bytes read.  This will always be at least as
- high as the other numbers, since it includes all of them.]]>
-      </doc>
-    </method>
-    <method name="getTotalLocalBytesRead" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[@return The total local bytes read.  This will always be at least
- as high as totalShortCircuitBytesRead, since all short-circuit
- reads are also local.]]>
-      </doc>
-    </method>
-    <method name="getTotalShortCircuitBytesRead" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[@return The total short-circuit local bytes read.]]>
-      </doc>
-    </method>
-    <method name="getTotalZeroCopyBytesRead" return="long"
-      abstract="false" native="false" synchronized="false"
+    <constructor name="UnknownCryptoProtocolVersionException" type="java.lang.String"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <doc>
-      <![CDATA[@return The total number of zero-copy bytes read.]]>
-      </doc>
-    </method>
-    <method name="getRemoteBytesRead" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[@return The total number of bytes read which were not local.]]>
-      </doc>
-    </method>
+    </constructor>
   </class>
-  <!-- end class org.apache.hadoop.hdfs.DFSInputStream.ReadStatistics -->
-  <!-- start class org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress -->
-  <class name="DFSUtil.ConfiguredNNAddress" extends="java.lang.Object"
+  <!-- end class org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException -->
+  <doc>
+  <![CDATA[<p>A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem}.  This is loosely modelled after
+Google's <a href="http://research.google.com/archive/gfs.html">GFS</a>.</p>
+
+<p>The most important difference is that unlike GFS, Hadoop DFS files
+have strictly one writer at any one time.  Bytes are always appended
+to the end of the writer's stream.  There is no notion of "record appends"
+or "mutations" that are then checked or reordered.  Writers simply emit
+a byte stream.  That byte stream is guaranteed to be stored in the
+order written.</p>]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.hdfs.client">
+  <!-- start class org.apache.hadoop.hdfs.client.BlockReportOptions -->
+  <class name="BlockReportOptions" extends="java.lang.Object"
     abstract="false"
-    static="true" final="false" visibility="public"
+    static="false" final="true" visibility="public"
     deprecated="not deprecated">
-    <method name="getNameserviceId" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getNamenodeId" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getAddress" return="java.net.InetSocketAddress"
+    <method name="isIncremental" return="boolean"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
@@ -655,20851 +284,1830 @@
       deprecated="not deprecated">
     </method>
     <doc>
-    <![CDATA[Represent one of the NameNodes configured in the cluster.]]>
+    <![CDATA[Options that can be specified when manually triggering a block report.]]>
     </doc>
   </class>
-  <!-- end class org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress -->
-  <!-- start class org.apache.hadoop.hdfs.ExtendedBlockId -->
-  <class name="ExtendedBlockId" extends="java.lang.Object"
+  <!-- end class org.apache.hadoop.hdfs.client.BlockReportOptions -->
+  <!-- start class org.apache.hadoop.hdfs.client.HdfsAdmin -->
+  <class name="HdfsAdmin" extends="java.lang.Object"
     abstract="false"
-    static="false" final="true" visibility="public"
+    static="false" final="false" visibility="public"
     deprecated="not deprecated">
-    <constructor name="ExtendedBlockId" type="long, java.lang.String"
+    <constructor name="HdfsAdmin" type="java.net.URI, org.apache.hadoop.conf.Configuration"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a new HdfsAdmin client.
+
+ @param uri the unique URI of the HDFS file system to administer
+ @param conf configuration
+ @throws IOException in the event the file system could not be created]]>
+      </doc>
     </constructor>
-    <method name="fromExtendedBlock" return="org.apache.hadoop.hdfs.ExtendedBlockId"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="block" type="org.apache.hadoop.hdfs.protocol.ExtendedBlock"/>
-    </method>
-    <method name="getBlockId" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getBlockPoolId" return="java.lang.String"
+    <method name="setQuota"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="quota" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the namespace quota (count of files, directories, and sym links) for a
+ directory.
+
+ @param src the path to set the quota for
+ @param quota the value to set for the quota
+ @throws IOException in the event of error]]>
+      </doc>
     </method>
-    <method name="equals" return="boolean"
+    <method name="clearQuota"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="o" type="java.lang.Object"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Clear the namespace quota (count of files, directories and sym links) for a
+ directory.
+
+ @param src the path to clear the quota of
+ @throws IOException in the event of error]]>
+      </doc>
     </method>
-    <method name="hashCode" return="int"
+    <method name="setSpaceQuota"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="spaceQuota" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the storage space quota (size of files) for a directory. Note that
+ directories and sym links do not occupy storage space.
+
+ @param src the path to set the space quota of
+ @param spaceQuota the value to set for the space quota
+ @throws IOException in the event of error]]>
+      </doc>
     </method>
-    <method name="toString" return="java.lang.String"
+    <method name="clearSpaceQuota"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-    </method>
-    <doc>
-    <![CDATA[An immutable key which identifies a block.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.ExtendedBlockId -->
-  <!-- start class org.apache.hadoop.hdfs.HAUtil -->
-  <class name="HAUtil" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="isHAEnabled" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nsId" type="java.lang.String"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Returns true if HA for namenode is configured for the given nameservice
+      <![CDATA[Clear the storage space quota (size of files) for a directory. Note that
+ directories and sym links do not occupy storage space.
 
- @param conf Configuration
- @param nsId nameservice, or null if no federated NS is configured
- @return true if HA is configured in the configuration; else false.]]>
+ @param src the path to clear the space quota of
+ @throws IOException in the event of error]]>
       </doc>
     </method>
-    <method name="usesSharedEditsDir" return="boolean"
+    <method name="setQuotaByStorageType"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="type" type="org.apache.hadoop.fs.StorageType"/>
+      <param name="quota" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Returns true if HA is using a shared edits directory.
+      <![CDATA[Set the quota by storage type for a directory. Note that
+ directories and sym links do not occupy storage type quota.
 
- @param conf Configuration
- @return true if HA config is using a shared edits dir, false otherwise.]]>
+ @param src the target directory to set the quota by storage type
+ @param type the storage type to set for quota by storage type
+ @param quota the value to set for quota by storage type
+ @throws IOException in the event of error]]>
       </doc>
     </method>
-    <method name="getNameNodeId" return="java.lang.String"
+    <method name="clearQuotaByStorageType"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nsId" type="java.lang.String"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="type" type="org.apache.hadoop.fs.StorageType"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Get the namenode Id by matching the {@code addressKey}
- with the the address of the local node.
-
- If {@link DFSConfigKeys#DFS_HA_NAMENODE_ID_KEY} is not specifically
- configured, this method determines the namenode Id by matching the local
- node's address with the configured addresses. When a match is found, it
- returns the namenode Id from the corresponding configuration key.
+      <![CDATA[Clear the space quota by storage type for a directory. Note that
+ directories and sym links do not occupy storage type quota.
 
- @param conf Configuration
- @return namenode Id on success, null on failure.
- @throws HadoopIllegalArgumentException on error]]>
+ @param src the target directory to clear the quota by storage type
+ @param type the storage type to clear for quota by storage type
+ @throws IOException in the event of error]]>
       </doc>
     </method>
-    <method name="getNameNodeIdFromAddress" return="java.lang.String"
+    <method name="allowSnapshot"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="address" type="java.net.InetSocketAddress"/>
-      <param name="keys" type="java.lang.String[]"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Similar to
- {@link DFSUtil#getNameServiceIdFromAddress(Configuration,
- InetSocketAddress, String...)}]]>
+      <![CDATA[Allow snapshot on a directory.
+ @param path The path of the directory where snapshots will be taken.]]>
       </doc>
     </method>
-    <method name="getNameNodeIdOfOtherNode" return="java.lang.String"
+    <method name="disallowSnapshot"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nsId" type="java.lang.String"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Get the NN ID of the other node in an HA setup.
-
- @param conf the configuration of this node
- @return the NN ID of the other node in this nameservice]]>
+      <![CDATA[Disallow snapshot on a directory.
+ @param path The path of the snapshottable directory.]]>
       </doc>
     </method>
-    <method name="getConfForOtherNode" return="org.apache.hadoop.conf.Configuration"
+    <method name="addCacheDirective" return="long"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="myConf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="info" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"/>
+      <param name="flags" type="java.util.EnumSet"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Given the configuration for this node, return a Configuration object for
- the other node in an HA setup.
+      <![CDATA[Add a new CacheDirectiveInfo.
 
- @param myConf the configuration of this node
- @return the configuration of the other node in an HA setup]]>
+ @param info Information about a directive to add.
+ @param flags {@link CacheFlag}s to use for this operation.
+ @return the ID of the directive that was created.
+ @throws IOException if the directive could not be added]]>
       </doc>
     </method>
-    <method name="shouldAllowStandbyReads" return="boolean"
+    <method name="modifyCacheDirective"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="info" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"/>
+      <param name="flags" type="java.util.EnumSet"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[This is used only by tests at the moment.
- @return true if the NN should allow read operations while in standby mode.]]>
+      <![CDATA[Modify a CacheDirective.
+
+ @param info Information about the directive to modify. You must set the ID
+          to indicate which CacheDirective you want to modify.
+ @param flags {@link CacheFlag}s to use for this operation.
+ @throws IOException if the directive could not be modified]]>
       </doc>
     </method>
-    <method name="setAllowStandbyReads"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="val" type="boolean"/>
-    </method>
-    <method name="isLogicalUri" return="boolean"
+    <method name="removeCacheDirective"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nameNodeUri" type="java.net.URI"/>
+      <param name="id" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[@return true if the given nameNodeUri appears to be a logical URI.]]>
+      <![CDATA[Remove a CacheDirective.
+
+ @param id identifier of the CacheDirectiveInfo to remove
+ @throws IOException if the directive could not be removed]]>
       </doc>
     </method>
-    <method name="isClientFailoverConfigured" return="boolean"
+    <method name="listCacheDirectives" return="org.apache.hadoop.fs.RemoteIterator"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nameNodeUri" type="java.net.URI"/>
+      <param name="filter" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Check whether the client has a failover proxy provider configured
- for the namenode/nameservice.
+      <![CDATA[List cache directives. Incrementally fetches results from the server.
 
- @param conf Configuration
- @param nameNodeUri The URI of namenode
- @return true if failover is configured.]]>
+ @param filter Filter parameters to use when listing the directives, null to
+               list all directives visible to us.
+ @return A RemoteIterator which returns CacheDirectiveInfo objects.]]>
       </doc>
     </method>
-    <method name="useLogicalUri" return="boolean"
+    <method name="addCachePool"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nameNodeUri" type="java.net.URI"/>
+      <param name="info" type="org.apache.hadoop.hdfs.protocol.CachePoolInfo"/>
       <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Check whether logical URI is needed for the namenode and
- the corresponding failover proxy provider in the config.
+      <![CDATA[Add a cache pool.
 
- @param conf Configuration
- @param nameNodeUri The URI of namenode
- @return true if logical URI is needed. false, if not needed.
- @throws IOException most likely due to misconfiguration.]]>
+ @param info
+          The request to add a cache pool.
+ @throws IOException
+          If the request could not be completed.]]>
       </doc>
     </method>
-    <method name="getServiceUriFromToken" return="java.net.URI"
+    <method name="modifyCachePool"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="scheme" type="java.lang.String"/>
-      <param name="token" type="org.apache.hadoop.security.token.Token"/>
+      <param name="info" type="org.apache.hadoop.hdfs.protocol.CachePoolInfo"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Parse the file system URI out of the provided token.]]>
+      <![CDATA[Modify an existing cache pool.
+
+ @param info
+          The request to modify a cache pool.
+ @throws IOException
+          If the request could not be completed.]]>
       </doc>
     </method>
-    <method name="buildTokenServiceForLogicalUri" return="org.apache.hadoop.io.Text"
+    <method name="removeCachePool"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="uri" type="java.net.URI"/>
-      <param name="scheme" type="java.lang.String"/>
+      <param name="poolName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Get the service name used in the delegation token for the given logical
- HA service.
- @param uri the logical URI of the cluster
- @param scheme the scheme of the corresponding FileSystem
- @return the service name]]>
+      <![CDATA[Remove a cache pool.
+
+ @param poolName
+          Name of the cache pool to remove.
+ @throws IOException
+          if the cache pool did not exist, or could not be removed.]]>
       </doc>
     </method>
-    <method name="isTokenForLogicalUri" return="boolean"
+    <method name="listCachePools" return="org.apache.hadoop.fs.RemoteIterator"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="token" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[@return true if this token corresponds to a logical nameservice
- rather than a specific namenode.]]>
+      <![CDATA[List all cache pools.
+
+ @return A remote iterator from which you can get CachePoolEntry objects.
+          Requests will be made as needed.
+ @throws IOException
+          If there was an error listing cache pools.]]>
       </doc>
     </method>
-    <method name="buildTokenServicePrefixForLogicalUri" return="java.lang.String"
+    <method name="createEncryptionZone"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="scheme" type="java.lang.String"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="keyName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <doc>
+      <![CDATA[Create an encryption zone rooted at an empty existing directory, using the
+ specified encryption key. An encryption zone has an associated encryption
+ key used when reading and writing files within the zone.
+
+ @param path    The path of the root of the encryption zone. Must refer to
+                an empty, existing directory.
+ @param keyName Name of key available at the KeyProvider.
+ @throws IOException            if there was a general IO exception
+ @throws AccessControlException if the caller does not have access to path
+ @throws FileNotFoundException  if the path does not exist]]>
+      </doc>
     </method>
-    <method name="cloneDelegationTokenForLogicalUri"
+    <method name="getEncryptionZoneForPath" return="org.apache.hadoop.hdfs.protocol.EncryptionZone"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
-      <param name="haUri" type="java.net.URI"/>
-      <param name="nnAddrs" type="java.util.Collection"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
       <doc>
-      <![CDATA[Locate a delegation token associated with the given HA cluster URI, and if
- one is found, clone it to also represent the underlying namenode address.
- @param ugi the UGI to modify
- @param haUri the logical URI for the cluster
- @param nnAddrs collection of NNs in the cluster to which the token
- applies]]>
+      <![CDATA[Get the path of the encryption zone for a given file or directory.
+
+ @param path The path to get the ez for.
+
+ @return The EncryptionZone of the ez, or null if path is not in an ez.
+ @throws IOException            if there was a general IO exception
+ @throws AccessControlException if the caller does not have access to path
+ @throws FileNotFoundException  if the path does not exist]]>
       </doc>
     </method>
-    <method name="getAddressOfActive" return="java.net.InetSocketAddress"
+    <method name="listEncryptionZones" return="org.apache.hadoop.fs.RemoteIterator"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
       <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Get the internet address of the currently-active NN. This should rarely be
- used, since callers of this method who connect directly to the NN using the
- resulting InetSocketAddress will not be able to connect to the active NN if
- a failover were to occur after this method has been called.
-
- @param fs the file system to get the active address of.
- @return the internet address of the currently-active NN.
- @throws IOException if an error occurs while resolving the active NN.]]>
+      <![CDATA[Returns a RemoteIterator which can be used to list the encryption zones
+ in HDFS. For large numbers of encryption zones, the iterator will fetch
+ the list of zones in a number of small batches.
+ <p/>
+ Since the list is fetched in batches, it does not represent a
+ consistent snapshot of the entire list of encryption zones.
+ <p/>
+ This method can only be called by HDFS superusers.]]>
       </doc>
     </method>
-    <method name="getProxiesForAllNameNodesInNameservice" return="java.util.List"
+    <method name="getInotifyEventStream" return="org.apache.hadoop.hdfs.DFSInotifyEventInputStream"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nsId" type="java.lang.String"/>
       <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Get an RPC proxy for each NN in an HA nameservice. Used when a given RPC
- call should be made on every NN in an HA nameservice, not just the active.
+      <![CDATA[Exposes a stream of namesystem events. Only events occurring after the
+ stream is created are available.
+ See {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream}
+ for information on stream usage.
+ See {@link org.apache.hadoop.hdfs.inotify.Event}
+ for information on the available events.
+ <p/>
+ Inotify users may want to tune the following HDFS parameters to
+ ensure that enough extra HDFS edits are saved to support inotify clients
+ that fall behind the current state of the namespace while reading events.
+ The default parameter values should generally be reasonable. If edits are
+ deleted before their corresponding events can be read, clients will see a
+ {@link org.apache.hadoop.hdfs.inotify.MissingEventsException} on
+ {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream} method calls.
 
- @param conf configuration
- @param nsId the nameservice to get all of the proxies for.
- @return a list of RPC proxies for each NN in the nameservice.
- @throws IOException in the event of error.]]>
+ It should generally be sufficient to tune these parameters:
+ dfs.namenode.num.extra.edits.retained
+ dfs.namenode.max.extra.edits.segments.retained
+
+ Parameters that affect the number of created segments and the number of
+ edits that are considered necessary, i.e. do not count towards the
+ dfs.namenode.num.extra.edits.retained quota):
+ dfs.namenode.checkpoint.period
+ dfs.namenode.checkpoint.txns
+ dfs.namenode.num.checkpoints.retained
+ dfs.ha.log-roll.period
+ <p/>
+ It is recommended that local journaling be configured
+ (dfs.namenode.edits.dir) for inotify (in addition to a shared journal)
+ so that edit transfers from the shared journal can be avoided.
+
+ @throws IOException If there was an error obtaining the stream.]]>
       </doc>
     </method>
-    <method name="getProxiesForAllNameNodesInNameservice" return="java.util.List"
+    <method name="getInotifyEventStream" return="org.apache.hadoop.hdfs.DFSInotifyEventInputStream"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nsId" type="java.lang.String"/>
-      <param name="xface" type="java.lang.Class"/>
+      <param name="lastReadTxid" type="long"/>
       <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Get an RPC proxy for each NN in an HA nameservice. Used when a given RPC
- call should be made on every NN in an HA nameservice, not just the active.
-
- @param conf configuration
- @param nsId the nameservice to get all of the proxies for.
- @param xface the protocol class.
- @return a list of RPC proxies for each NN in the nameservice.
- @throws IOException in the event of error.]]>
+      <![CDATA[A version of {@link HdfsAdmin#getInotifyEventStream()} meant for advanced
+ users who are aware of HDFS edits up to lastReadTxid (e.g. because they
+ have access to an FSImage inclusive of lastReadTxid) and only want to read
+ events after this point.]]>
       </doc>
     </method>
-    <method name="isAtLeastOneActive" return="boolean"
+    <method name="setStoragePolicy"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="namenodes" type="java.util.List"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="policyName" type="java.lang.String"/>
       <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Used to ensure that at least one of the given HA NNs is currently in the
- active state..
+      <![CDATA[Set the source path to the specified storage policy.
 
- @param namenodes list of RPC proxies for each NN to check.
- @return true if at least one NN is active, false if all are in the standby state.
- @throws IOException in the event of error.]]>
+ @param src The source path referring to either a directory or a file.
+ @param policyName The name of the storage policy.]]>
       </doc>
     </method>
+    <doc>
+    <![CDATA[The public API for performing administrative functions on HDFS. Those writing
+ applications against HDFS should prefer this interface to directly accessing
+ functionality in DistributedFileSystem or DFSClient.
+
+ Note that this is distinct from the similarly-named {@link DFSAdmin}, which
+ is a class that provides the functionality for the CLI `hdfs dfsadmin ...'
+ commands.]]>
+    </doc>
   </class>
-  <!-- end class org.apache.hadoop.hdfs.HAUtil -->
-  <!-- start class org.apache.hadoop.hdfs.KeyProviderCache -->
-  <class name="KeyProviderCache" extends="java.lang.Object"
+  <!-- end class org.apache.hadoop.hdfs.client.HdfsAdmin -->
+  <!-- start class org.apache.hadoop.hdfs.client.HdfsDataInputStream -->
+  <class name="HdfsDataInputStream" extends="org.apache.hadoop.fs.FSDataInputStream"
     abstract="false"
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
-    <constructor name="KeyProviderCache" type="long"
+    <constructor name="HdfsDataInputStream" type="org.apache.hadoop.hdfs.DFSInputStream"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
     </constructor>
-    <method name="get" return="org.apache.hadoop.crypto.key.KeyProvider"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-    </method>
-    <method name="setKeyProvider"
-      abstract="false" native="false" synchronized="false"
+    <constructor name="HdfsDataInputStream" type="org.apache.hadoop.crypto.CryptoInputStream"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="keyProvider" type="org.apache.hadoop.crypto.key.KeyProvider"/>
       <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <field name="LOG" type="org.apache.commons.logging.Log"
-      transient="false" volatile="false"
-      static="true" final="true" visibility="public"
-      deprecated="not deprecated">
-    </field>
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.KeyProviderCache -->
-  <!-- start class org.apache.hadoop.hdfs.NameNodeProxies -->
-  <class name="NameNodeProxies" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="NameNodeProxies"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
     </constructor>
-    <method name="createProxy" return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
+    <method name="getWrappedStream" return="java.io.InputStream"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nameNodeUri" type="java.net.URI"/>
-      <param name="xface" type="java.lang.Class"/>
-      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Creates the namenode proxy with the passed protocol. This will handle
- creation of either HA- or non-HA-enabled proxy objects, depending upon
- if the provided URI is a configured logical URI.
+      <![CDATA[Get a reference to the wrapped output stream. We always want to return the
+ actual underlying InputStream, even when we're using a CryptoStream. e.g.
+ in the delegated methods below.
 
- @param conf the configuration containing the required IPC
-        properties, client failover configurations, etc.
- @param nameNodeUri the URI pointing either to a specific NameNode
-        or to a logical nameservice.
- @param xface the IPC interface which should be created
- @return an object containing both the proxy and the associated
-         delegation token service it corresponds to
- @throws IOException if there is an error creating the proxy]]>
+ @return the underlying output stream]]>
       </doc>
     </method>
-    <method name="createProxy" return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
+    <method name="getCurrentDatanode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nameNodeUri" type="java.net.URI"/>
-      <param name="xface" type="java.lang.Class"/>
-      <param name="fallbackToSimpleAuth" type="java.util.concurrent.atomic.AtomicBoolean"/>
-      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Creates the namenode proxy with the passed protocol. This will handle
- creation of either HA- or non-HA-enabled proxy objects, depending upon
- if the provided URI is a configured logical URI.
-
- @param conf the configuration containing the required IPC
-        properties, client failover configurations, etc.
- @param nameNodeUri the URI pointing either to a specific NameNode
-        or to a logical nameservice.
- @param xface the IPC interface which should be created
- @param fallbackToSimpleAuth set to true or false during calls to indicate if
-   a secure client falls back to simple auth
- @return an object containing both the proxy and the associated
-         delegation token service it corresponds to
- @throws IOException if there is an error creating the proxy]]>
+      <![CDATA[Get the datanode from which the stream is currently reading.]]>
       </doc>
     </method>
-    <method name="createProxyWithLossyRetryHandler" return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
+    <method name="getCurrentBlock" return="org.apache.hadoop.hdfs.protocol.ExtendedBlock"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="config" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nameNodeUri" type="java.net.URI"/>
-      <param name="xface" type="java.lang.Class"/>
-      <param name="numResponseToDrop" type="int"/>
-      <param name="fallbackToSimpleAuth" type="java.util.concurrent.atomic.AtomicBoolean"/>
-      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Generate a dummy namenode proxy instance that utilizes our hacked
- {@link LossyRetryInvocationHandler}. Proxy instance generated using this
- method will proactively drop RPC responses. Currently this method only
- support HA setup. null will be returned if the given configuration is not
- for HA.
-
- @param config the configuration containing the required IPC
-        properties, client failover configurations, etc.
- @param nameNodeUri the URI pointing either to a specific NameNode
-        or to a logical nameservice.
- @param xface the IPC interface which should be created
- @param numResponseToDrop The number of responses to drop for each RPC call
- @param fallbackToSimpleAuth set to true or false during calls to indicate if
-   a secure client falls back to simple auth
- @return an object containing both the proxy and the associated
-         delegation token service it corresponds to. Will return null of the
-         given configuration does not support HA.
- @throws IOException if there is an error creating the proxy]]>
+      <![CDATA[Get the block containing the target position.]]>
       </doc>
     </method>
-    <method name="createNonHAProxy" return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
+    <method name="getAllBlocks" return="java.util.List"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nnAddr" type="java.net.InetSocketAddress"/>
-      <param name="xface" type="java.lang.Class"/>
-      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
-      <param name="withRetries" type="boolean"/>
       <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Creates an explicitly non-HA-enabled proxy object. Most of the time you
- don't want to use this, and should instead use {@link NameNodeProxies#createProxy}.
-
- @param conf the configuration object
- @param nnAddr address of the remote NN to connect to
- @param xface the IPC interface which should be created
- @param ugi the user who is making the calls on the proxy object
- @param withRetries certain interfaces have a non-standard retry policy
- @return an object containing both the proxy and the associated
-         delegation token service it corresponds to
- @throws IOException]]>
+      <![CDATA[Get the collection of blocks that has already been located.]]>
       </doc>
     </method>
-    <method name="createNonHAProxy" return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
+    <method name="getVisibleLength" return="long"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nnAddr" type="java.net.InetSocketAddress"/>
-      <param name="xface" type="java.lang.Class"/>
-      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
-      <param name="withRetries" type="boolean"/>
-      <param name="fallbackToSimpleAuth" type="java.util.concurrent.atomic.AtomicBoolean"/>
       <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Creates an explicitly non-HA-enabled proxy object. Most of the time you
- don't want to use this, and should instead use {@link NameNodeProxies#createProxy}.
+      <![CDATA[Get the visible length of the file. It will include the length of the last
+ block even if that is in UnderConstruction state.
 
- @param conf the configuration object
- @param nnAddr address of the remote NN to connect to
- @param xface the IPC interface which should be created
- @param ugi the user who is making the calls on the proxy object
- @param withRetries certain interfaces have a non-standard retry policy
- @param fallbackToSimpleAuth - set to true or false during this method to
-   indicate if a secure client falls back to simple auth
- @return an object containing both the proxy and the associated
-         delegation token service it corresponds to
- @throws IOException]]>
+ @return The visible length of the file.]]>
       </doc>
     </method>
-    <method name="getFailoverProxyProviderClass" return="java.lang.Class"
+    <method name="getReadStatistics" return="org.apache.hadoop.hdfs.DFSInputStream.ReadStatistics"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nameNodeUri" type="java.net.URI"/>
-      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Gets the configured Failover proxy provider's class]]>
+      <![CDATA[Get statistics about the reads which this DFSInputStream has done.
+ Note that because HdfsDataInputStream is buffered, these stats may
+ be higher than you would expect just by adding up the number of
+ bytes read through HdfsDataInputStream.]]>
       </doc>
     </method>
-    <method name="createFailoverProxyProvider" return="org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider"
+    <method name="clearReadStatistics"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nameNodeUri" type="java.net.URI"/>
-      <param name="xface" type="java.lang.Class"/>
-      <param name="checkPort" type="boolean"/>
-      <param name="fallbackToSimpleAuth" type="java.util.concurrent.atomic.AtomicBoolean"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Creates the Failover proxy provider instance]]>
-      </doc>
     </method>
     <doc>
-    <![CDATA[Create proxy objects to communicate with a remote NN. All remote access to an
- NN should be funneled through this class. Most of the time you'll want to use
- {@link NameNodeProxies#createProxy(Configuration, URI, Class)}, which will
- create either an HA- or non-HA-enabled client proxy as appropriate.]]>
+    <![CDATA[The Hdfs implementation of {@link FSDataInputStream}.]]>
     </doc>
   </class>
-  <!-- end class org.apache.hadoop.hdfs.NameNodeProxies -->
-  <!-- start class org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo -->
-  <class name="NameNodeProxies.ProxyAndInfo" extends="java.lang.Object"
+  <!-- end class org.apache.hadoop.hdfs.client.HdfsDataInputStream -->
+  <!-- start class org.apache.hadoop.hdfs.client.HdfsDataOutputStream -->
+  <class name="HdfsDataOutputStream" extends="org.apache.hadoop.fs.FSDataOutputStream"
     abstract="false"
-    static="true" final="false" visibility="public"
+    static="false" final="false" visibility="public"
     deprecated="not deprecated">
-    <constructor name="ProxyAndInfo" type="PROXYTYPE, org.apache.hadoop.io.Text, java.net.InetSocketAddress"
+    <constructor name="HdfsDataOutputStream" type="org.apache.hadoop.hdfs.DFSOutputStream, org.apache.hadoop.fs.FileSystem.Statistics, long"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
     </constructor>
-    <method name="getProxy" return="PROXYTYPE"
-      abstract="false" native="false" synchronized="false"
+    <constructor name="HdfsDataOutputStream" type="org.apache.hadoop.hdfs.DFSOutputStream, org.apache.hadoop.fs.FileSystem.Statistics"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-    </method>
-    <method name="getDelegationTokenService" return="org.apache.hadoop.io.Text"
-      abstract="false" native="false" synchronized="false"
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <constructor name="HdfsDataOutputStream" type="org.apache.hadoop.crypto.CryptoOutputStream, org.apache.hadoop.fs.FileSystem.Statistics, long"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-    </method>
-    <method name="getAddress" return="java.net.InetSocketAddress"
-      abstract="false" native="false" synchronized="false"
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <constructor name="HdfsDataOutputStream" type="org.apache.hadoop.crypto.CryptoOutputStream, org.apache.hadoop.fs.FileSystem.Statistics"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-    </method>
-    <doc>
-    <![CDATA[Wrapper for a client proxy as well as its associated service ID.
- This is simply used as a tuple-like return type for
- {@link NameNodeProxies#createProxy} and
- {@link NameNodeProxies#createNonHAProxy}.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo -->
-  <!-- start interface org.apache.hadoop.hdfs.RemotePeerFactory -->
-  <interface name="RemotePeerFactory"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="newConnectedPeer" return="org.apache.hadoop.hdfs.net.Peer"
-      abstract="true" native="false" synchronized="false"
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="getCurrentBlockReplication" return="int"
+      abstract="false" native="false" synchronized="true"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="addr" type="java.net.InetSocketAddress"/>
-      <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
-      <param name="datanodeId" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
       <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[@param addr          The address to connect to.
- @param blockToken    Token used during optional SASL negotiation
- @param datanodeId    ID of destination DataNode
- @return              A new Peer connected to the address.
+      <![CDATA[Get the actual number of replicas of the current block.
+
+ This can be different from the designated replication factor of the file
+ because the namenode does not maintain replication for the blocks which are
+ currently being written to. Depending on the configuration, the client may
+ continue to write to a block even if a few datanodes in the write pipeline
+ have failed, or the client may add a new datanodes once a datanode has
+ failed.
 
- @throws IOException  If there was an error connecting or creating
-                      the remote socket, encrypted stream, etc.]]>
+ @return the number of valid replicas of the current block]]>
       </doc>
     </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.hdfs.RemotePeerFactory -->
-  <!-- start class org.apache.hadoop.hdfs.UnknownCipherSuiteException -->
-  <class name="UnknownCipherSuiteException" extends="java.io.IOException"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="UnknownCipherSuiteException" type="java.lang.String"
+    <method name="hsync"
+      abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-    </constructor>
+      <param name="syncFlags" type="java.util.EnumSet"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Sync buffered data to DataNodes (flush to disk devices).
+
+ @param syncFlags
+          Indicate the detailed semantic and actions of the hsync.
+ @throws IOException
+ @see FSDataOutputStream#hsync()]]>
+      </doc>
+    </method>
     <doc>
-    <![CDATA[Thrown when an unknown cipher suite is encountered.]]>
+    <![CDATA[The Hdfs implementation of {@link FSDataOutputStream}.]]>
     </doc>
   </class>
-  <!-- end class org.apache.hadoop.hdfs.UnknownCipherSuiteException -->
-  <!-- start class org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException -->
-  <class name="UnknownCryptoProtocolVersionException" extends="java.io.IOException"
+  <!-- end class org.apache.hadoop.hdfs.client.HdfsDataOutputStream -->
+  <!-- start class org.apache.hadoop.hdfs.client.HdfsUtils -->
+  <class name="HdfsUtils" extends="java.lang.Object"
     abstract="false"
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
-    <constructor name="UnknownCryptoProtocolVersionException"
+    <constructor name="HdfsUtils"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
-    <constructor name="UnknownCryptoProtocolVersionException" type="java.lang.String"
+    <method name="isHealthy" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <doc>
+      <![CDATA[Is the HDFS healthy?
+ HDFS is considered as healthy if it is up and not in safemode.
+
+ @param uri the HDFS URI.  Note that the URI path is ignored.
+ @return true if HDFS is healthy; false, otherwise.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The public utility API for HDFS.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.client.HdfsUtils -->
+</package>
+<package name="org.apache.hadoop.hdfs.inotify">
+  <!-- start class org.apache.hadoop.hdfs.inotify.Event -->
+  <class name="Event" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Event" type="org.apache.hadoop.hdfs.inotify.Event.EventType"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
+    <method name="getEventType" return="org.apache.hadoop.hdfs.inotify.Event.EventType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Events sent by the inotify system. Note that no events are necessarily sent
+ when a file is opened for read (although a MetadataUpdateEvent will be sent
+ if the atime is updated).]]>
+    </doc>
   </class>
-  <!-- end class org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException -->
-  <doc>
-  <![CDATA[<p>A distributed implementation of {@link
-org.apache.hadoop.fs.FileSystem}.  This is loosely modelled after
-Google's <a href="http://research.google.com/archive/gfs.html">GFS</a>.</p>
-
-<p>The most important difference is that unlike GFS, Hadoop DFS files
-have strictly one writer at any one time.  Bytes are always appended
-to the end of the writer's stream.  There is no notion of "record appends"
-or "mutations" that are then checked or reordered.  Writers simply emit
-a byte stream.  That byte stream is guaranteed to be stored in the
-order written.</p>]]>
-  </doc>
-</package>
-<package name="org.apache.hadoop.hdfs.client">
-  <!-- start class org.apache.hadoop.hdfs.client.BlockReportOptions -->
-  <class name="BlockReportOptions" extends="java.lang.Object"
+  <!-- end class org.apache.hadoop.hdfs.inotify.Event -->
+  <!-- start class org.apache.hadoop.hdfs.inotify.EventBatch -->
+  <class name="EventBatch" extends="java.lang.Object"
     abstract="false"
-    static="false" final="true" visibility="public"
+    static="false" final="false" visibility="public"
     deprecated="not deprecated">
-    <method name="isIncremental" return="boolean"
+    <constructor name="EventBatch" type="long, org.apache.hadoop.hdfs.inotify.Event[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getTxid" return="long"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
-    <method name="toString" return="java.lang.String"
+    <method name="getEvents" return="org.apache.hadoop.hdfs.inotify.Event[]"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
     <doc>
-    <![CDATA[Options that can be specified when manually triggering a block report.]]>
+    <![CDATA[A batch of events that all happened on the same transaction ID.]]>
     </doc>
   </class>
-  <!-- end class org.apache.hadoop.hdfs.client.BlockReportOptions -->
-  <!-- start class org.apache.hadoop.hdfs.client.BlockReportOptions.Factory -->
-  <class name="BlockReportOptions.Factory" extends="java.lang.Object"
+  <!-- end class org.apache.hadoop.hdfs.inotify.EventBatch -->
+  <!-- start class org.apache.hadoop.hdfs.inotify.MissingEventsException -->
+  <class name="MissingEventsException" extends="java.lang.Exception"
     abstract="false"
-    static="true" final="false" visibility="public"
+    static="false" final="false" visibility="public"
     deprecated="not deprecated">
-    <constructor name="Factory"
+    <constructor name="MissingEventsException"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="MissingEventsException" type="long, long"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
-    <method name="setIncremental" return="org.apache.hadoop.hdfs.client.BlockRe

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[56/57] [abbrv] hadoop git commit: HDFS-10690. Optimize insertion/removal of replica in ShortCircuitCache. Contributed by Fenghua Hu.

Posted by in...@apache.org.
HDFS-10690. Optimize insertion/removal of replica in ShortCircuitCache. Contributed by Fenghua Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/607705c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/607705c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/607705c4

Branch: refs/heads/HDFS-10467
Commit: 607705c488fa5263d851cee578a2d319e6e52ecd
Parents: de7a0a9
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Mon Oct 3 10:53:21 2016 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon Oct 3 10:53:21 2016 -0700

----------------------------------------------------------------------
 .../hdfs/shortcircuit/ShortCircuitCache.java    | 88 ++++++++++++--------
 .../hadoop/fs/TestEnhancedByteBufferAccess.java | 17 ++--
 .../shortcircuit/TestShortCircuitCache.java     |  9 +-
 3 files changed, 69 insertions(+), 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/607705c4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
index 62ade70..bd02a97 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
@@ -26,13 +26,14 @@ import java.nio.MappedByteBuffer;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.TreeMap;
+import java.util.NoSuchElementException;
 import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.ReentrantLock;
 
+import org.apache.commons.collections.map.LinkedMap;
 import org.apache.commons.lang.mutable.MutableBoolean;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.ExtendedBlockId;
@@ -107,16 +108,20 @@ public class ShortCircuitCache implements Closeable {
 
         int numDemoted = demoteOldEvictableMmaped(curMs);
         int numPurged = 0;
-        Long evictionTimeNs = (long) 0;
+        Long evictionTimeNs;
         while (true) {
-          Entry<Long, ShortCircuitReplica> entry =
-              evictable.ceilingEntry(evictionTimeNs);
-          if (entry == null) break;
-          evictionTimeNs = entry.getKey();
+          Object eldestKey;
+          try {
+            eldestKey = evictable.firstKey();
+          } catch (NoSuchElementException e) {
+            break;
+          }
+          evictionTimeNs = (Long)eldestKey;
           long evictionTimeMs =
               TimeUnit.MILLISECONDS.convert(evictionTimeNs, TimeUnit.NANOSECONDS);
           if (evictionTimeMs + maxNonMmappedEvictableLifespanMs >= curMs) break;
-          ShortCircuitReplica replica = entry.getValue();
+          ShortCircuitReplica replica = (ShortCircuitReplica)evictable.get(
+              eldestKey);
           if (LOG.isTraceEnabled()) {
             LOG.trace("CacheCleaner: purging " + replica + ": " +
                 StringUtils.getStackTrace(Thread.currentThread()));
@@ -263,11 +268,11 @@ public class ShortCircuitCache implements Closeable {
   private CacheCleaner cacheCleaner;
 
   /**
-   * Tree of evictable elements.
+   * LinkedMap of evictable elements.
    *
    * Maps (unique) insertion time in nanoseconds to the element.
    */
-  private final TreeMap<Long, ShortCircuitReplica> evictable = new TreeMap<>();
+  private final LinkedMap evictable = new LinkedMap();
 
   /**
    * Maximum total size of the cache, including both mmapped and
@@ -281,12 +286,11 @@ public class ShortCircuitCache implements Closeable {
   private long maxNonMmappedEvictableLifespanMs;
 
   /**
-   * Tree of mmaped evictable elements.
+   * LinkedMap of mmaped evictable elements.
    *
    * Maps (unique) insertion time in nanoseconds to the element.
    */
-  private final TreeMap<Long, ShortCircuitReplica> evictableMmapped =
-      new TreeMap<>();
+  private final LinkedMap evictableMmapped = new LinkedMap();
 
   /**
    * Maximum number of mmaped evictable elements.
@@ -482,13 +486,16 @@ public class ShortCircuitCache implements Closeable {
   private int demoteOldEvictableMmaped(long now) {
     int numDemoted = 0;
     boolean needMoreSpace = false;
-    Long evictionTimeNs = (long) 0;
+    Long evictionTimeNs;
 
     while (true) {
-      Entry<Long, ShortCircuitReplica> entry =
-          evictableMmapped.ceilingEntry(evictionTimeNs);
-      if (entry == null) break;
-      evictionTimeNs = entry.getKey();
+      Object eldestKey;
+      try {
+        eldestKey = evictableMmapped.firstKey();
+      } catch (NoSuchElementException e) {
+        break;
+      }
+      evictionTimeNs = (Long)eldestKey;
       long evictionTimeMs =
           TimeUnit.MILLISECONDS.convert(evictionTimeNs, TimeUnit.NANOSECONDS);
       if (evictionTimeMs + maxEvictableMmapedLifespanMs >= now) {
@@ -497,7 +504,8 @@ public class ShortCircuitCache implements Closeable {
         }
         needMoreSpace = true;
       }
-      ShortCircuitReplica replica = entry.getValue();
+      ShortCircuitReplica replica = (ShortCircuitReplica)evictableMmapped.get(
+          eldestKey);
       if (LOG.isTraceEnabled()) {
         String rationale = needMoreSpace ? "because we need more space" :
             "because it's too old";
@@ -527,10 +535,15 @@ public class ShortCircuitCache implements Closeable {
         return;
       }
       ShortCircuitReplica replica;
-      if (evictableSize == 0) {
-        replica = evictableMmapped.firstEntry().getValue();
-      } else {
-        replica = evictable.firstEntry().getValue();
+      try {
+        if (evictableSize == 0) {
+          replica = (ShortCircuitReplica)evictableMmapped.get(evictableMmapped
+              .firstKey());
+        } else {
+          replica = (ShortCircuitReplica)evictable.get(evictable.firstKey());
+        }
+      } catch (NoSuchElementException e) {
+        break;
       }
       if (LOG.isTraceEnabled()) {
         LOG.trace(this + ": trimEvictionMaps is purging " + replica +
@@ -573,10 +586,11 @@ public class ShortCircuitCache implements Closeable {
    * @param map       The map to remove it from.
    */
   private void removeEvictable(ShortCircuitReplica replica,
-      TreeMap<Long, ShortCircuitReplica> map) {
+      LinkedMap map) {
     Long evictableTimeNs = replica.getEvictableTimeNs();
     Preconditions.checkNotNull(evictableTimeNs);
-    ShortCircuitReplica removed = map.remove(evictableTimeNs);
+    ShortCircuitReplica removed = (ShortCircuitReplica)map.remove(
+        evictableTimeNs);
     Preconditions.checkState(removed == replica,
         "failed to make %s unevictable", replica);
     replica.setEvictableTimeNs(null);
@@ -593,7 +607,7 @@ public class ShortCircuitCache implements Closeable {
    * @param map              The map to insert it into.
    */
   private void insertEvictable(Long evictionTimeNs,
-      ShortCircuitReplica replica, TreeMap<Long, ShortCircuitReplica> map) {
+      ShortCircuitReplica replica, LinkedMap map) {
     while (map.containsKey(evictionTimeNs)) {
       evictionTimeNs++;
     }
@@ -861,14 +875,22 @@ public class ShortCircuitCache implements Closeable {
       IOUtilsClient.cleanup(LOG, cacheCleaner);
       // Purge all replicas.
       while (true) {
-        Entry<Long, ShortCircuitReplica> entry = evictable.firstEntry();
-        if (entry == null) break;
-        purge(entry.getValue());
+        Object eldestKey;
+        try {
+          eldestKey = evictable.firstKey();
+        } catch (NoSuchElementException e) {
+          break;
+        }
+        purge((ShortCircuitReplica)evictable.get(eldestKey));
       }
       while (true) {
-        Entry<Long, ShortCircuitReplica> entry = evictableMmapped.firstEntry();
-        if (entry == null) break;
-        purge(entry.getValue());
+        Object eldestKey;
+        try {
+          eldestKey = evictableMmapped.firstKey();
+        } catch (NoSuchElementException e) {
+          break;
+        }
+        purge((ShortCircuitReplica)evictableMmapped.get(eldestKey));
       }
     } finally {
       lock.unlock();
@@ -909,8 +931,8 @@ public class ShortCircuitCache implements Closeable {
     void visit(int numOutstandingMmaps,
         Map<ExtendedBlockId, ShortCircuitReplica> replicas,
         Map<ExtendedBlockId, InvalidToken> failedLoads,
-        Map<Long, ShortCircuitReplica> evictable,
-        Map<Long, ShortCircuitReplica> evictableMmapped);
+        LinkedMap evictable,
+        LinkedMap evictableMmapped);
   }
 
   @VisibleForTesting // ONLY for testing

http://git-wip-us.apache.org/repos/asf/hadoop/blob/607705c4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
index 0ccc07a..9cd46c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
@@ -34,6 +34,7 @@ import java.util.Map;
 import java.util.Random;
 import java.util.concurrent.TimeoutException;
 
+import org.apache.commons.collections.map.LinkedMap;
 import org.apache.commons.lang.SystemUtils;
 import org.apache.commons.lang.mutable.MutableBoolean;
 import org.apache.commons.logging.Log;
@@ -307,8 +308,8 @@ public class TestEnhancedByteBufferAccess {
     public void visit(int numOutstandingMmaps,
         Map<ExtendedBlockId, ShortCircuitReplica> replicas,
         Map<ExtendedBlockId, InvalidToken> failedLoads,
-        Map<Long, ShortCircuitReplica> evictable,
-        Map<Long, ShortCircuitReplica> evictableMmapped) {
+        LinkedMap evictable,
+        LinkedMap evictableMmapped) {
       if (expectedNumOutstandingMmaps >= 0) {
         Assert.assertEquals(expectedNumOutstandingMmaps, numOutstandingMmaps);
       }
@@ -373,8 +374,8 @@ public class TestEnhancedByteBufferAccess {
       public void visit(int numOutstandingMmaps,
           Map<ExtendedBlockId, ShortCircuitReplica> replicas,
           Map<ExtendedBlockId, InvalidToken> failedLoads, 
-          Map<Long, ShortCircuitReplica> evictable,
-          Map<Long, ShortCircuitReplica> evictableMmapped) {
+          LinkedMap evictable,
+          LinkedMap evictableMmapped) {
         ShortCircuitReplica replica = replicas.get(
             new ExtendedBlockId(firstBlock.getBlockId(), firstBlock.getBlockPoolId()));
         Assert.assertNotNull(replica);
@@ -410,8 +411,8 @@ public class TestEnhancedByteBufferAccess {
           public void visit(int numOutstandingMmaps,
               Map<ExtendedBlockId, ShortCircuitReplica> replicas,
               Map<ExtendedBlockId, InvalidToken> failedLoads,
-              Map<Long, ShortCircuitReplica> evictable,
-              Map<Long, ShortCircuitReplica> evictableMmapped) {
+              LinkedMap evictable,
+              LinkedMap evictableMmapped) {
             finished.setValue(evictableMmapped.isEmpty());
           }
         });
@@ -685,8 +686,8 @@ public class TestEnhancedByteBufferAccess {
           public void visit(int numOutstandingMmaps,
               Map<ExtendedBlockId, ShortCircuitReplica> replicas,
               Map<ExtendedBlockId, InvalidToken> failedLoads,
-              Map<Long, ShortCircuitReplica> evictable,
-              Map<Long, ShortCircuitReplica> evictableMmapped) {
+              LinkedMap evictable,
+              LinkedMap evictableMmapped) {
             Assert.assertEquals(expectedOutstandingMmaps, numOutstandingMmaps);
             ShortCircuitReplica replica =
                 replicas.get(ExtendedBlockId.fromExtendedBlock(block));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/607705c4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
index ac14438..8e217c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
@@ -34,6 +34,7 @@ import java.util.Iterator;
 import java.util.Map;
 import java.util.concurrent.TimeoutException;
 
+import org.apache.commons.collections.map.LinkedMap;
 import org.apache.commons.lang.mutable.MutableBoolean;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -502,8 +503,8 @@ public class TestShortCircuitCache {
       public void visit(int numOutstandingMmaps,
           Map<ExtendedBlockId, ShortCircuitReplica> replicas,
           Map<ExtendedBlockId, InvalidToken> failedLoads,
-          Map<Long, ShortCircuitReplica> evictable,
-          Map<Long, ShortCircuitReplica> evictableMmapped) {
+          LinkedMap evictable,
+          LinkedMap evictableMmapped) {
         ShortCircuitReplica replica = replicas.get(
             ExtendedBlockId.fromExtendedBlock(block));
         Assert.assertNotNull(replica);
@@ -518,8 +519,8 @@ public class TestShortCircuitCache {
       public void visit(int numOutstandingMmaps,
           Map<ExtendedBlockId, ShortCircuitReplica> replicas,
           Map<ExtendedBlockId, InvalidToken> failedLoads,
-          Map<Long, ShortCircuitReplica> evictable,
-          Map<Long, ShortCircuitReplica> evictableMmapped) {
+          LinkedMap evictable,
+          LinkedMap evictableMmapped) {
         ShortCircuitReplica replica = replicas.get(
             ExtendedBlockId.fromExtendedBlock(block));
         Assert.assertNotNull(replica);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/57] [abbrv] hadoop git commit: HDFS-10850. getEZForPath should NOT throw FNF. Contributed by Andrew Wang.

Posted by in...@apache.org.
HDFS-10850. getEZForPath should NOT throw FNF. Contributed by Andrew Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0670149c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0670149c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0670149c

Branch: refs/heads/HDFS-10467
Commit: 0670149c88852cd7c4d6774bff06c7c588558739
Parents: 82c55dc
Author: Kihwal Lee <ki...@apache.org>
Authored: Fri Sep 30 08:44:18 2016 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Fri Sep 30 08:44:18 2016 -0500

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  4 +-
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    |  7 +--
 .../server/namenode/FSDirEncryptionZoneOp.java  |  4 --
 .../apache/hadoop/hdfs/TestEncryptionZones.java | 59 +++++++-------------
 4 files changed, 24 insertions(+), 50 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0670149c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 4c2a967..93c0ff0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2599,8 +2599,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     try (TraceScope ignored = newPathTraceScope("getEZForPath", src)) {
       return namenode.getEZForPath(src);
     } catch (RemoteException re) {
-      throw re.unwrapRemoteException(FileNotFoundException.class,
-          AccessControlException.class, UnresolvedPathException.class);
+      throw re.unwrapRemoteException(AccessControlException.class,
+          UnresolvedPathException.class);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0670149c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
index 946b79d..bac2809 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
@@ -328,14 +328,13 @@ public class HdfsAdmin {
    * Get the path of the encryption zone for a given file or directory.
    *
    * @param path The path to get the ez for.
-   *
-   * @return The EncryptionZone of the ez, or null if path is not in an ez.
+   * @return An EncryptionZone, or null if path does not exist or is not in an
+   * ez.
    * @throws IOException            if there was a general IO exception
    * @throws AccessControlException if the caller does not have access to path
-   * @throws FileNotFoundException  if the path does not exist
    */
   public EncryptionZone getEncryptionZoneForPath(Path path)
-    throws IOException, AccessControlException, FileNotFoundException {
+      throws IOException, AccessControlException {
     return dfs.getEZForPath(path);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0670149c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
index 7501fc3..5457f08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_FILE_ENCRYPTION_INFO;
 
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.security.GeneralSecurityException;
 import java.security.PrivilegedExceptionAction;
@@ -184,9 +183,6 @@ final class FSDirEncryptionZoneOp {
     fsd.readLock();
     try {
       iip = fsd.resolvePath(pc, srcArg);
-      if (iip.getLastINode() == null) {
-        throw new FileNotFoundException("Path not found: " + iip.getPath());
-      }
       if (fsd.isPermissionEnabled()) {
         fsd.checkPathAccess(pc, iip, FsAction.READ);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0670149c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index 9168ca6..18a0800 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -97,6 +97,7 @@ import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 
+import static org.junit.Assert.assertNotNull;
 import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Matchers.anyLong;
 import static org.mockito.Matchers.anyObject;
@@ -604,13 +605,8 @@ public class TestEncryptionZones {
           assertExceptionContains("Permission denied:", e);
         }
 
-        try {
-          userAdmin.getEncryptionZoneForPath(nonexistent);
-          fail("FileNotFoundException should be thrown for a non-existent"
-              + " file path");
-        } catch (FileNotFoundException e) {
-          assertExceptionContains("Path not found: " + nonexistent, e);
-        }
+        assertNull("expected null for nonexistent path",
+            userAdmin.getEncryptionZoneForPath(nonexistent));
 
         // Check operation with non-ez paths
         assertNull("expected null for non-ez path",
@@ -638,20 +634,10 @@ public class TestEncryptionZones {
         assertEquals("expected ez path", allPath.toString(),
             userAdmin.getEncryptionZoneForPath(
                 new Path(snapshottedAllPath)).getPath().toString());
-        try {
-          userAdmin.getEncryptionZoneForPath(allPathFile);
-          fail("FileNotFoundException should be thrown for a non-existent"
-              + " file path");
-        } catch (FileNotFoundException e) {
-          assertExceptionContains("Path not found: " + allPathFile, e);
-        }
-        try {
-          userAdmin.getEncryptionZoneForPath(allPath);
-          fail("FileNotFoundException should be thrown for a non-existent"
-              + " file path");
-        } catch (FileNotFoundException e) {
-          assertExceptionContains("Path not found: " + allPath, e);
-        }
+        assertNull("expected null for deleted file path",
+            userAdmin.getEncryptionZoneForPath(allPathFile));
+        assertNull("expected null for deleted directory path",
+            userAdmin.getEncryptionZoneForPath(allPath));
         return null;
       }
     });
@@ -1498,25 +1484,18 @@ public class TestEncryptionZones {
   }
 
   @Test(timeout = 60000)
-  public void testGetEncryptionZoneOnANonExistentZoneFile() throws Exception {
-    final Path ez = new Path("/ez");
-    fs.mkdirs(ez);
-    dfsAdmin.createEncryptionZone(ez, TEST_KEY, NO_TRASH);
-    Path zoneFile = new Path(ez, "file");
-    try {
-      fs.getEZForPath(zoneFile);
-      fail("FileNotFoundException should be thrown for a non-existent"
-          + " file path");
-    } catch (FileNotFoundException e) {
-      assertExceptionContains("Path not found: " + zoneFile, e);
-    }
-    try {
-      dfsAdmin.getEncryptionZoneForPath(zoneFile);
-      fail("FileNotFoundException should be thrown for a non-existent"
-          + " file path");
-    } catch (FileNotFoundException e) {
-      assertExceptionContains("Path not found: " + zoneFile, e);
-    }
+  public void testGetEncryptionZoneOnANonExistentPaths() throws Exception {
+    final Path ezPath = new Path("/ez");
+    fs.mkdirs(ezPath);
+    dfsAdmin.createEncryptionZone(ezPath, TEST_KEY, NO_TRASH);
+    Path zoneFile = new Path(ezPath, "file");
+    EncryptionZone ez = fs.getEZForPath(zoneFile);
+    assertNotNull("Expected EZ for non-existent path in EZ", ez);
+    ez = dfsAdmin.getEncryptionZoneForPath(zoneFile);
+    assertNotNull("Expected EZ for non-existent path in EZ", ez);
+    ez = dfsAdmin.getEncryptionZoneForPath(
+        new Path("/does/not/exist"));
+    assertNull("Expected null for non-existent path not in EZ", ez);
   }
 
   @Test(timeout = 120000)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/57] [abbrv] hadoop git commit: YARN-5693. Reduce loglevel to Debug in ContainerManagementProtocolProxy and AMRMClientImpl (yufeigu via rkanter)

Posted by in...@apache.org.
YARN-5693. Reduce loglevel to Debug in ContainerManagementProtocolProxy and AMRMClientImpl (yufeigu via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2549ee9d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2549ee9d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2549ee9d

Branch: refs/heads/HDFS-10467
Commit: 2549ee9d4c4ddd3ebccdebb8623df30c0a8f27d2
Parents: 434c5ea
Author: Robert Kanter <rk...@apache.org>
Authored: Fri Sep 30 13:30:55 2016 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Fri Sep 30 13:30:55 2016 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/client/api/impl/AMRMClientImpl.java       | 10 ++++++----
 .../client/api/impl/ContainerManagementProtocolProxy.java |  7 +++++--
 2 files changed, 11 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2549ee9d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
index 6f6bb85..3221661 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
@@ -455,10 +455,12 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
   protected void populateNMTokens(List<NMToken> nmTokens) {
     for (NMToken token : nmTokens) {
       String nodeId = token.getNodeId().toString();
-      if (getNMTokenCache().containsToken(nodeId)) {
-        LOG.info("Replacing token for : " + nodeId);
-      } else {
-        LOG.info("Received new token for : " + nodeId);
+      if (LOG.isDebugEnabled()) {
+        if (getNMTokenCache().containsToken(nodeId)) {
+          LOG.debug("Replacing token for : " + nodeId);
+        } else {
+          LOG.debug("Received new token for : " + nodeId);
+        }
       }
       getNMTokenCache().setToken(nodeId, token.getToken());
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2549ee9d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
index b2bce22..c619e8a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
@@ -78,8 +78,11 @@ public class ContainerManagementProtocolProxy {
           YarnConfiguration.NM_CLIENT_MAX_NM_PROXIES
               + " (" + maxConnectedNMs + ") can not be less than 0.");
     }
-    LOG.info(YarnConfiguration.NM_CLIENT_MAX_NM_PROXIES + " : "
-        + maxConnectedNMs);
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(YarnConfiguration.NM_CLIENT_MAX_NM_PROXIES + " : " +
+          maxConnectedNMs);
+    }
 
     if (maxConnectedNMs > 0) {
       cmProxy =


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/57] [abbrv] hadoop git commit: HADOOP-13671. Fix ClassFormatException in trunk build. Contributed by Kihwal Lee.

Posted by in...@apache.org.
HADOOP-13671. Fix ClassFormatException in trunk build. Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6afcf36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6afcf36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6afcf36

Branch: refs/heads/HDFS-10467
Commit: d6afcf364c3d5a8bdb7dbd99626fab70fe6dc47a
Parents: a0730aa
Author: Kihwal Lee <ki...@apache.org>
Authored: Fri Sep 30 13:26:28 2016 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Fri Sep 30 13:26:28 2016 -0500

----------------------------------------------------------------------
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6afcf36/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index d9a01a0..26ac16c 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -114,7 +114,7 @@
     <maven-jar-plugin.version>2.5</maven-jar-plugin.version>
     <maven-war-plugin.version>2.4</maven-war-plugin.version>
     <maven-source-plugin.version>2.3</maven-source-plugin.version>
-    <maven-project-info-reports-plugin.version>2.7</maven-project-info-reports-plugin.version>
+    <maven-project-info-reports-plugin.version>2.9</maven-project-info-reports-plugin.version>
     <maven-pdf-plugin.version>1.2</maven-pdf-plugin.version>
     <maven-remote-resources-plugin.version>1.5</maven-remote-resources-plugin.version>
     <build-helper-maven-plugin.version>1.9</build-helper-maven-plugin.version>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/57] [abbrv] hadoop git commit: HADOOP-13164 Optimize S3AFileSystem::deleteUnnecessaryFakeDirectories. Contributed by Rajesh Balamohan.

Posted by in...@apache.org.
HADOOP-13164 Optimize S3AFileSystem::deleteUnnecessaryFakeDirectories. Contributed by Rajesh Balamohan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee0c722d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee0c722d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee0c722d

Branch: refs/heads/HDFS-10467
Commit: ee0c722dc8fb81ec902cd1da5958ce5adb0ab08f
Parents: a1b8251
Author: Steve Loughran <st...@apache.org>
Authored: Thu Sep 29 16:59:33 2016 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Thu Sep 29 17:01:00 2016 +0100

----------------------------------------------------------------------
 .../fs/contract/AbstractFSContractTestBase.java |  2 +-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 65 +++++++--------
 .../hadoop/fs/s3a/S3AInstrumentation.java       | 10 +++
 .../org/apache/hadoop/fs/s3a/Statistic.java     |  4 +
 .../fs/s3a/ITestS3AFileOperationCost.java       | 85 ++++++++++++++++++++
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  | 13 ++-
 6 files changed, 144 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee0c722d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java
index baea968..b2e68f5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java
@@ -359,7 +359,7 @@ public abstract class AbstractFSContractTestBase extends Assert
     assertEquals(text + " wrong read result " + result, -1, result);
   }
 
-  boolean rename(Path src, Path dst) throws IOException {
+  protected boolean rename(Path src, Path dst) throws IOException {
     return getFileSystem().rename(src, dst);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee0c722d/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index dffef15..e3b2c63 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -668,7 +668,7 @@ public class S3AFileSystem extends FileSystem {
           copyFile(summary.getKey(), newDstKey, summary.getSize());
 
           if (keysToDelete.size() == MAX_ENTRIES_TO_DELETE) {
-            removeKeys(keysToDelete, true);
+            removeKeys(keysToDelete, true, false);
           }
         }
 
@@ -676,7 +676,7 @@ public class S3AFileSystem extends FileSystem {
           objects = continueListObjects(objects);
         } else {
           if (!keysToDelete.isEmpty()) {
-            removeKeys(keysToDelete, false);
+            removeKeys(keysToDelete, false, false);
           }
           break;
         }
@@ -924,17 +924,25 @@ public class S3AFileSystem extends FileSystem {
    * @param keysToDelete collection of keys to delete on the s3-backend
    * @param clearKeys clears the keysToDelete-list after processing the list
    *            when set to true
+   * @param deleteFakeDir indicates whether this is for deleting fake dirs
    */
   private void removeKeys(List<DeleteObjectsRequest.KeyVersion> keysToDelete,
-          boolean clearKeys) throws AmazonClientException {
+      boolean clearKeys, boolean deleteFakeDir) throws AmazonClientException {
+    if (keysToDelete.isEmpty()) {
+      // no keys
+      return;
+    }
     if (enableMultiObjectsDelete) {
       deleteObjects(new DeleteObjectsRequest(bucket).withKeys(keysToDelete));
-      instrumentation.fileDeleted(keysToDelete.size());
     } else {
       for (DeleteObjectsRequest.KeyVersion keyVersion : keysToDelete) {
         deleteObject(keyVersion.getKey());
       }
+    }
+    if (!deleteFakeDir) {
       instrumentation.fileDeleted(keysToDelete.size());
+    } else {
+      instrumentation.fakeDirsDeleted(keysToDelete.size());
     }
     if (clearKeys) {
       keysToDelete.clear();
@@ -1017,7 +1025,7 @@ public class S3AFileSystem extends FileSystem {
             LOG.debug("Got object to delete {}", summary.getKey());
 
             if (keys.size() == MAX_ENTRIES_TO_DELETE) {
-              removeKeys(keys, true);
+              removeKeys(keys, true, false);
             }
           }
 
@@ -1025,7 +1033,7 @@ public class S3AFileSystem extends FileSystem {
             objects = continueListObjects(objects);
           } else {
             if (!keys.isEmpty()) {
-              removeKeys(keys, false);
+              removeKeys(keys, false, false);
             }
             break;
           }
@@ -1504,37 +1512,30 @@ public class S3AFileSystem extends FileSystem {
   /**
    * Delete mock parent directories which are no longer needed.
    * This code swallows IO exceptions encountered
-   * @param f path
-   */
-  private void deleteUnnecessaryFakeDirectories(Path f) {
-    while (true) {
-      String key = "";
-      try {
-        key = pathToKey(f);
-        if (key.isEmpty()) {
-          break;
-        }
-
-        S3AFileStatus status = getFileStatus(f);
-
-        if (status.isDirectory() && status.isEmptyDirectory()) {
-          LOG.debug("Deleting fake directory {}/", key);
-          deleteObject(key + "/");
+   * @param path path
+   */
+  private void deleteUnnecessaryFakeDirectories(Path path) {
+    List<DeleteObjectsRequest.KeyVersion> keysToRemove = new ArrayList<>();
+    while (!path.isRoot()) {
+      String key = pathToKey(path);
+      key = (key.endsWith("/")) ? key : (key + "/");
+      keysToRemove.add(new DeleteObjectsRequest.KeyVersion(key));
+      path = path.getParent();
+    }
+    try {
+      removeKeys(keysToRemove, false, true);
+    } catch(AmazonClientException e) {
+      instrumentation.errorIgnored();
+      if (LOG.isDebugEnabled()) {
+        StringBuilder sb = new StringBuilder();
+        for(DeleteObjectsRequest.KeyVersion kv : keysToRemove) {
+          sb.append(kv.getKey()).append(",");
         }
-      } catch (IOException | AmazonClientException e) {
-        LOG.debug("While deleting key {} ", key, e);
-        instrumentation.errorIgnored();
+        LOG.debug("While deleting keys {} ", sb.toString(), e);
       }
-
-      if (f.isRoot()) {
-        break;
-      }
-
-      f = f.getParent();
     }
   }
 
-
   private void createFakeDirectory(final String objectName)
       throws AmazonClientException, AmazonServiceException,
       InterruptedIOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee0c722d/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
index b4c4063..26b5b51 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
@@ -75,6 +75,7 @@ public class S3AInstrumentation {
   private final MutableCounterLong numberOfFilesCopied;
   private final MutableCounterLong bytesOfFilesCopied;
   private final MutableCounterLong numberOfFilesDeleted;
+  private final MutableCounterLong numberOfFakeDirectoryDeletes;
   private final MutableCounterLong numberOfDirectoriesCreated;
   private final MutableCounterLong numberOfDirectoriesDeleted;
   private final Map<String, MutableCounterLong> streamMetrics =
@@ -135,6 +136,7 @@ public class S3AInstrumentation {
     numberOfFilesCopied = counter(FILES_COPIED);
     bytesOfFilesCopied = counter(FILES_COPIED_BYTES);
     numberOfFilesDeleted = counter(FILES_DELETED);
+    numberOfFakeDirectoryDeletes = counter(FAKE_DIRECTORIES_DELETED);
     numberOfDirectoriesCreated = counter(DIRECTORIES_CREATED);
     numberOfDirectoriesDeleted = counter(DIRECTORIES_DELETED);
     ignoredErrors = counter(IGNORED_ERRORS);
@@ -296,6 +298,14 @@ public class S3AInstrumentation {
   }
 
   /**
+   * Indicate that fake directory request was made.
+   * @param count number of directory entries included in the delete request.
+   */
+  public void fakeDirsDeleted(int count) {
+    numberOfFakeDirectoryDeletes.incr(count);
+  }
+
+  /**
    * Indicate that S3A created a directory.
    */
   public void directoryCreated() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee0c722d/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
index cbc34d6..d84a355 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Statistic.java
@@ -42,6 +42,10 @@ public enum Statistic {
       "Total number of files created through the object store."),
   FILES_DELETED("files_deleted",
       "Total number of files deleted from the object store."),
+  FAKE_DIRECTORIES_CREATED("fake_directories_created",
+      "Total number of fake directory entries created in the object store."),
+  FAKE_DIRECTORIES_DELETED("fake_directories_deleted",
+      "Total number of fake directory deletes submitted to object store."),
   IGNORED_ERRORS("ignored_errors", "Errors caught and ignored"),
   INVOCATION_COPY_FROM_LOCAL_FILE(CommonStatisticNames.OP_COPY_FROM_LOCAL_FILE,
       "Calls of copyFromLocalFile()"),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee0c722d/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java
index 2a6ba0c..f19ea95 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java
@@ -188,4 +188,89 @@ public class ITestS3AFileOperationCost extends AbstractFSContractTestBase {
       tmpFile.delete();
     }
   }
+
+  private void reset(MetricDiff... diffs) {
+    for (MetricDiff diff : diffs) {
+      diff.reset();
+    }
+  }
+
+  @Test
+  public void testFakeDirectoryDeletion() throws Throwable {
+    describe("Verify whether create file works after renaming a file. "
+        + "In S3, rename deletes any fake directories as a part of "
+        + "clean up activity");
+    S3AFileSystem fs = getFileSystem();
+    Path srcBaseDir = path("src");
+    mkdirs(srcBaseDir);
+    MetricDiff deleteRequests =
+        new MetricDiff(fs, Statistic.OBJECT_DELETE_REQUESTS);
+    MetricDiff directoriesDeleted =
+        new MetricDiff(fs, Statistic.DIRECTORIES_DELETED);
+    MetricDiff fakeDirectoriesDeleted =
+        new MetricDiff(fs, Statistic.FAKE_DIRECTORIES_DELETED);
+    MetricDiff directoriesCreated =
+        new MetricDiff(fs, Statistic.DIRECTORIES_CREATED);
+
+    Path srcDir = new Path(srcBaseDir, "1/2/3/4/5/6");
+    Path srcFilePath = new Path(srcDir, "source.txt");
+    int srcDirDepth = directoriesInPath(srcDir);
+    // one dir created, one removed
+    mkdirs(srcDir);
+    String state = "after mkdir(srcDir)";
+    directoriesCreated.assertDiffEquals(state, 1);
+/*  TODO: uncomment once HADOOP-13222 is in
+    deleteRequests.assertDiffEquals(state, 1);
+    directoriesDeleted.assertDiffEquals(state, 0);
+    fakeDirectoriesDeleted.assertDiffEquals(state, srcDirDepth);
+*/
+    reset(deleteRequests, directoriesCreated, directoriesDeleted,
+        fakeDirectoriesDeleted);
+
+    // creating a file should trigger demise of the src dir
+    touch(fs, srcFilePath);
+    state = "after touch(fs, srcFilePath)";
+    deleteRequests.assertDiffEquals(state, 1);
+    directoriesCreated.assertDiffEquals(state, 0);
+    directoriesDeleted.assertDiffEquals(state, 0);
+    fakeDirectoriesDeleted.assertDiffEquals(state, srcDirDepth);
+
+    reset(deleteRequests, directoriesCreated, directoriesDeleted,
+        fakeDirectoriesDeleted);
+
+    Path destBaseDir = path("dest");
+    Path destDir = new Path(destBaseDir, "1/2/3/4/5/6");
+    Path destFilePath = new Path(destDir, "dest.txt");
+    mkdirs(destDir);
+    state = "after mkdir(destDir)";
+
+    int destDirDepth = directoriesInPath(destDir);
+    directoriesCreated.assertDiffEquals(state, 1);
+/*  TODO: uncomment once HADOOP-13222 is in
+    deleteRequests.assertDiffEquals(state,1);
+    directoriesDeleted.assertDiffEquals(state,0);
+    fakeDirectoriesDeleted.assertDiffEquals(state,destDirDepth);
+*/
+    reset(deleteRequests, directoriesCreated, directoriesDeleted,
+        fakeDirectoriesDeleted);
+
+    fs.rename(srcFilePath, destFilePath);
+    state = "after rename(srcFilePath, destFilePath)";
+    directoriesCreated.assertDiffEquals(state, 1);
+    // one for the renamed file, one for the parent
+    deleteRequests.assertDiffEquals(state, 2);
+    directoriesDeleted.assertDiffEquals(state, 0);
+    fakeDirectoriesDeleted.assertDiffEquals(state, destDirDepth);
+
+    reset(deleteRequests, directoriesCreated, directoriesDeleted,
+        fakeDirectoriesDeleted);
+
+    assertIsFile(destFilePath);
+    assertIsDirectory(srcDir);
+  }
+
+  private int directoriesInPath(Path path) {
+    return path.isRoot() ? 0 : 1 + directoriesInPath(path.getParent());
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee0c722d/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
index e45db48..95f6d4b 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
@@ -298,14 +298,23 @@ public class S3ATestUtils {
 
     /**
      * Assert that the value of {@link #diff()} matches that expected.
+     * @param message message to print; metric name is appended
      * @param expected expected value.
      */
-    public void assertDiffEquals(long expected) {
-      Assert.assertEquals("Count of " + this,
+    public void assertDiffEquals(String message, long expected) {
+      Assert.assertEquals(message + ": " + statistic.getSymbol(),
           expected, diff());
     }
 
     /**
+     * Assert that the value of {@link #diff()} matches that expected.
+     * @param expected expected value.
+     */
+    public void assertDiffEquals(long expected) {
+      assertDiffEquals("Count of " + this, expected);
+    }
+
+    /**
      * Assert that the value of {@link #diff()} matches that of another
      * instance.
      * @param that the other metric diff instance.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/57] [abbrv] hadoop git commit: HADOOP-13640. Fix findbugs warning in VersionInfoMojo.java. Contributed by Yuanbo Liu.

Posted by in...@apache.org.
HADOOP-13640. Fix findbugs warning in VersionInfoMojo.java. Contributed by Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82c55dcb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82c55dcb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82c55dcb

Branch: refs/heads/HDFS-10467
Commit: 82c55dcbc8e3d5314aae9f8f600c660759213e45
Parents: 10be459
Author: Akira Ajisaka <aa...@apache.org>
Authored: Fri Sep 30 18:17:30 2016 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Fri Sep 30 18:17:30 2016 +0900

----------------------------------------------------------------------
 .../apache/hadoop/maven/plugin/versioninfo/VersionInfoMojo.java    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82c55dcb/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/versioninfo/VersionInfoMojo.java
----------------------------------------------------------------------
diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/versioninfo/VersionInfoMojo.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/versioninfo/VersionInfoMojo.java
index cd2651b..f6faea0 100644
--- a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/versioninfo/VersionInfoMojo.java
+++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/versioninfo/VersionInfoMojo.java
@@ -160,7 +160,7 @@ public class VersionInfoMojo extends AbstractMojo {
       if (index > -1) {
         res[0] = path.substring(0, index - 1);
         int branchIndex = index + "branches".length() + 1;
-        index = path.indexOf("/", branchIndex);
+        index = path.indexOf('/', branchIndex);
         if (index > -1) {
           res[1] = path.substring(branchIndex, index);
         } else {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/57] [abbrv] hadoop git commit: HADOOP-13537. Support external calls in the RPC call queue. Contributed by Daryn Sharp.

Posted by in...@apache.org.
HADOOP-13537. Support external calls in the RPC call queue. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/236ac773
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/236ac773
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/236ac773

Branch: refs/heads/HDFS-10467
Commit: 236ac773c964fa21d6d5f1496023cd61818dd3b1
Parents: ee0c722
Author: Kihwal Lee <ki...@apache.org>
Authored: Thu Sep 29 13:27:30 2016 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Thu Sep 29 13:27:30 2016 -0500

----------------------------------------------------------------------
 .../dev-support/findbugsExcludeFile.xml         |  5 ++
 .../org/apache/hadoop/ipc/ExternalCall.java     | 91 ++++++++++++++++++++
 .../main/java/org/apache/hadoop/ipc/Server.java | 63 +++++++++-----
 .../java/org/apache/hadoop/ipc/TestRPC.java     | 85 ++++++++++++++++++
 4 files changed, 221 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/236ac773/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index ec7c396..bded4b99 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -405,4 +405,9 @@
     <Bug pattern="NP_NULL_PARAM_DEREF"/>
   </Match>
 
+  <Match>
+    <Class name="org.apache.hadoop.ipc.ExternalCall"/>
+    <Filed name="done"/>
+    <Bug pattern="JLM_JSR166_UTILCONCURRENT_MONITORENTER"/>
+  </Match>
 </FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/236ac773/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
new file mode 100644
index 0000000..9b4cbcf
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.hadoop.ipc.Server.Call;
+import org.apache.hadoop.security.UserGroupInformation;
+
+public abstract class ExternalCall<T> extends Call {
+  private final PrivilegedExceptionAction<T> action;
+  private final AtomicBoolean done = new AtomicBoolean();
+  private T result;
+  private Throwable error;
+
+  public ExternalCall(PrivilegedExceptionAction<T> action) {
+    this.action = action;
+  }
+
+  public abstract UserGroupInformation getRemoteUser();
+
+  public final T get() throws IOException, InterruptedException {
+    waitForCompletion();
+    if (error != null) {
+      if (error instanceof IOException) {
+        throw (IOException)error;
+      } else {
+        throw new IOException(error);
+      }
+    }
+    return result;
+  }
+
+  // wait for response to be triggered to support postponed calls
+  private void waitForCompletion() throws InterruptedException {
+    synchronized(done) {
+      while (!done.get()) {
+        try {
+          done.wait();
+        } catch (InterruptedException ie) {
+          if (Thread.interrupted()) {
+            throw ie;
+          }
+        }
+      }
+    }
+  }
+
+  boolean isDone() {
+    return done.get();
+  }
+
+  // invoked by ipc handler
+  @Override
+  public final Void run() throws IOException {
+    try {
+      result = action.run();
+      sendResponse();
+    } catch (Throwable t) {
+      abortResponse(t);
+    }
+    return null;
+  }
+
+  @Override
+  final void doResponse(Throwable t) {
+    synchronized(done) {
+      error = t;
+      done.set(true);
+      done.notify();
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/236ac773/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index f509d71..1c7e76a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -384,6 +384,11 @@ public abstract class Server {
     return (call != null) ? call.getRemoteUser() : null;
   }
 
+  public static String getProtocol() {
+    Call call = CurCall.get();
+    return (call != null) ? call.getProtocol() : null;
+  }
+
   /** Return true if the invocation was through an RPC.
    */
   public static boolean isRpcInvocation() {
@@ -672,6 +677,11 @@ public abstract class Server {
     private int priorityLevel;
     // the priority level assigned by scheduler, 0 by default
 
+    Call() {
+      this(RpcConstants.INVALID_CALL_ID, RpcConstants.INVALID_RETRY_COUNT,
+        RPC.RpcKind.RPC_BUILTIN, RpcConstants.DUMMY_CLIENT_ID);
+    }
+
     Call(Call call) {
       this(call.callId, call.retryCount, call.rpcKind, call.clientId,
           call.traceScope, call.callerContext);
@@ -703,6 +713,7 @@ public abstract class Server {
       return "Call#" + callId + " Retry#" + retryCount;
     }
 
+    @Override
     public Void run() throws Exception {
       return null;
     }
@@ -718,6 +729,10 @@ public abstract class Server {
       return (addr != null) ? addr.getHostAddress() : null;
     }
 
+    public String getProtocol() {
+      return null;
+    }
+
     /**
      * Allow a IPC response to be postponed instead of sent immediately
      * after the handler returns from the proxy method.  The intended use
@@ -800,6 +815,11 @@ public abstract class Server {
     }
 
     @Override
+    public String getProtocol() {
+      return "rpc";
+    }
+
+    @Override
     public UserGroupInformation getRemoteUser() {
       return connection.user;
     }
@@ -2333,33 +2353,15 @@ public abstract class Server {
       // Save the priority level assignment by the scheduler
       call.setPriorityLevel(callQueue.getPriorityLevel(call));
 
-      if (callQueue.isClientBackoffEnabled()) {
-        // if RPC queue is full, we will ask the RPC client to back off by
-        // throwing RetriableException. Whether RPC client will honor
-        // RetriableException and retry depends on client ipc retry policy.
-        // For example, FailoverOnNetworkExceptionRetry handles
-        // RetriableException.
-        queueRequestOrAskClientToBackOff(call);
-      } else {
-        callQueue.put(call);              // queue the call; maybe blocked here
+      try {
+        queueCall(call);
+      } catch (IOException ioe) {
+        throw new WrappedRpcServerException(
+            RpcErrorCodeProto.ERROR_RPC_SERVER, ioe);
       }
       incRpcCount();  // Increment the rpc count
     }
 
-    private void queueRequestOrAskClientToBackOff(Call call)
-        throws WrappedRpcServerException, InterruptedException {
-      // If rpc scheduler indicates back off based on performance
-      // degradation such as response time or rpc queue is full,
-      // we will ask the client to back off.
-      if (callQueue.shouldBackOff(call) || !callQueue.offer(call)) {
-        rpcMetrics.incrClientBackoff();
-        RetriableException retriableException =
-            new RetriableException("Server is too busy.");
-        throw new WrappedRpcServerExceptionSuppressed(
-            RpcErrorCodeProto.ERROR_RPC_SERVER, retriableException);
-      }
-    }
-
     /**
      * Establish RPC connection setup by negotiating SASL if required, then
      * reading and authorizing the connection header
@@ -2487,6 +2489,21 @@ public abstract class Server {
     }
   }
 
+  public void queueCall(Call call) throws IOException, InterruptedException {
+    if (!callQueue.isClientBackoffEnabled()) {
+      callQueue.put(call); // queue the call; maybe blocked here
+    } else if (callQueue.shouldBackOff(call) || !callQueue.offer(call)) {
+      // If rpc scheduler indicates back off based on performance degradation
+      // such as response time or rpc queue is full, we will ask the client
+      // to back off by throwing RetriableException. Whether the client will
+      // honor RetriableException and retry depends the client and its policy.
+      // For example, IPC clients using FailoverOnNetworkExceptionRetry handle
+      // RetriableException.
+      rpcMetrics.incrClientBackoff();
+      throw new RetriableException("Server is too busy.");
+    }
+  }
+
   /** Handles queued calls . */
   private class Handler extends Thread {
     public Handler(int instanceNumber) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/236ac773/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index ff6b25e..92d9183 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -64,6 +64,7 @@ import java.net.ConnectException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.SocketTimeoutException;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -926,6 +927,90 @@ public class TestRPC extends TestRpcBase {
     }
   }
 
+  @Test(timeout=30000)
+  public void testExternalCall() throws Exception {
+    final UserGroupInformation ugi = UserGroupInformation
+        .createUserForTesting("user123", new String[0]);
+    final IOException expectedIOE = new IOException("boom");
+
+    // use 1 handler so the callq can be plugged
+    final Server server = setupTestServer(conf, 1);
+    try {
+      final AtomicBoolean result = new AtomicBoolean();
+
+      ExternalCall<String> remoteUserCall = newExtCall(ugi,
+          new PrivilegedExceptionAction<String>() {
+            @Override
+            public String run() throws Exception {
+              return UserGroupInformation.getCurrentUser().getUserName();
+            }
+          });
+
+      ExternalCall<String> exceptionCall = newExtCall(ugi,
+          new PrivilegedExceptionAction<String>() {
+            @Override
+            public String run() throws Exception {
+              throw expectedIOE;
+            }
+          });
+
+      final CountDownLatch latch = new CountDownLatch(1);
+      final CyclicBarrier barrier = new CyclicBarrier(2);
+
+      ExternalCall<Void> barrierCall = newExtCall(ugi,
+          new PrivilegedExceptionAction<Void>() {
+            @Override
+            public Void run() throws Exception {
+              // notify we are in a handler and then wait to keep the callq
+              // plugged up
+              latch.countDown();
+              barrier.await();
+              return null;
+            }
+          });
+
+      server.queueCall(barrierCall);
+      server.queueCall(exceptionCall);
+      server.queueCall(remoteUserCall);
+
+      // wait for barrier call to enter the handler, check that the other 2
+      // calls are actually queued
+      latch.await();
+      assertEquals(2, server.getCallQueueLen());
+
+      // unplug the callq
+      barrier.await();
+      barrierCall.get();
+
+      // verify correct ugi is used
+      String answer = remoteUserCall.get();
+      assertEquals(ugi.getUserName(), answer);
+
+      try {
+        exceptionCall.get();
+        fail("didn't throw");
+      } catch (IOException ioe) {
+        assertEquals(expectedIOE.getMessage(), ioe.getMessage());
+      }
+    } finally {
+      server.stop();
+    }
+  }
+
+  private <T> ExternalCall<T> newExtCall(UserGroupInformation ugi,
+      PrivilegedExceptionAction<T> callable) {
+    return new ExternalCall<T>(callable) {
+      @Override
+      public String getProtocol() {
+        return "test";
+      }
+      @Override
+      public UserGroupInformation getRemoteUser() {
+        return ugi;
+      }
+    };
+  }
+
   @Test
   public void testRpcMetrics() throws Exception {
     Server server;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/57] [abbrv] hadoop git commit: YARN-5599. Publish AM launch command to ATS (Rohith Sharma K S via Varun Saxena)

Posted by in...@apache.org.
YARN-5599. Publish AM launch command to ATS (Rohith Sharma K S via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b0fd01d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b0fd01d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b0fd01d

Branch: refs/heads/HDFS-10467
Commit: 9b0fd01d2ee002ac4c30c2862e18ca8f1626fa8d
Parents: bc2656f
Author: Varun Saxena <va...@apache.org>
Authored: Wed Sep 28 16:10:10 2016 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Sep 28 16:10:10 2016 +0530

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     | 12 -----------
 .../src/main/resources/yarn-default.xml         | 13 ------------
 .../metrics/ApplicationMetricsConstants.java    |  3 +++
 .../resourcemanager/amlauncher/AMLauncher.java  | 21 --------------------
 .../metrics/TimelineServiceV1Publisher.java     |  6 ++++++
 .../metrics/TimelineServiceV2Publisher.java     |  5 +++++
 .../metrics/TestSystemMetricsPublisher.java     | 18 +++++++++++++++++
 .../TestSystemMetricsPublisherForV2.java        |  9 +++++++++
 8 files changed, 41 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b0fd01d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index f3009a1..1421873 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -534,18 +534,6 @@ public class YarnConfiguration extends Configuration {
   public static final int
       DEFAULT_RM_SYSTEM_METRICS_PUBLISHER_DISPATCHER_POOL_SIZE = 10;
 
-  /**
-   * The {@code AMLauncher.createAMContainerLaunchContext()} method will log the
-   * command being executed to the RM log if this property is true. Commands
-   * may contain sensitive information, such as application or service
-   * passwords, making logging the commands a security risk. In cases where
-   * the cluster may be running applications with such commands, this property
-   * should be set to false. Commands are only logged at the debug level.
-   */
-  public static final String RM_AMLAUNCHER_LOG_COMMAND =
-      RM_PREFIX + "amlauncher.log.command";
-  public static final boolean DEFAULT_RM_AMLAUNCHER_LOG_COMMAND = false;
-
   //RM delegation token related keys
   public static final String RM_DELEGATION_KEY_UPDATE_INTERVAL_KEY =
     RM_PREFIX + "delegation.key.update-interval";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b0fd01d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index d6c33a2..965b575 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -299,19 +299,6 @@
   </property>
 
   <property>
-    <description>
-      The resource manager will log all commands being executed to the RM log
-      if this property is true. Commands may contain sensitive information,
-      such as application or service passwords, making logging the commands a
-      security risk. In cases where the cluster may be running applications with
-      such commands this property should be set to false. Commands are only
-      logged at the debug level.
-    </description>
-    <name>yarn.resourcemanager.amlauncher.log.command</name>
-    <value>false</value>
-  </property>
-
-  <property>
     <description>The class to use as the resource scheduler.</description>
     <name>yarn.resourcemanager.scheduler.class</name>
     <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b0fd01d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
index d06b7cb..1774208 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
@@ -98,4 +98,7 @@ public class ApplicationMetricsConstants {
 
   public static final String AM_NODE_LABEL_EXPRESSION =
       "YARN_AM_NODE_LABEL_EXPRESSION";
+
+  public static final String AM_CONTAINER_LAUNCH_COMMAND =
+      "YARN_AM_CONTAINER_LAUNCH_COMMAND";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b0fd01d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
index 181463a..d33360b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
@@ -66,7 +66,6 @@ import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Joiner;
 
 /**
  * The launch of the AM itself.
@@ -82,7 +81,6 @@ public class AMLauncher implements Runnable {
   private final AMLauncherEventType eventType;
   private final RMContext rmContext;
   private final Container masterContainer;
-  private final boolean logCommandLine;
 
   @SuppressWarnings("rawtypes")
   private final EventHandler handler;
@@ -95,9 +93,6 @@ public class AMLauncher implements Runnable {
     this.rmContext = rmContext;
     this.handler = rmContext.getDispatcher().getEventHandler();
     this.masterContainer = application.getMasterContainer();
-    this.logCommandLine =
-        conf.getBoolean(YarnConfiguration.RM_AMLAUNCHER_LOG_COMMAND,
-          YarnConfiguration.DEFAULT_RM_AMLAUNCHER_LOG_COMMAND);
   }
 
   private void connect() throws IOException {
@@ -194,22 +189,6 @@ public class AMLauncher implements Runnable {
     ContainerLaunchContext container =
         applicationMasterContext.getAMContainerSpec();
 
-    if (LOG.isDebugEnabled()) {
-      StringBuilder message = new StringBuilder("Command to launch container ");
-
-      message.append(containerID).append(" : ");
-
-      if (logCommandLine) {
-        message.append(Joiner.on(",").join(container.getCommands()));
-      } else {
-        message.append("<REDACTED> -- Set ");
-        message.append(YarnConfiguration.RM_AMLAUNCHER_LOG_COMMAND);
-        message.append(" to true to reenable command logging");
-      }
-
-      LOG.debug(message.toString());
-    }
-
     // Populate the current queue name in the environment variable.
     setupQueueNameEnv(container, applicationMasterContext);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b0fd01d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
index 7f4ed33..ffbc747 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
@@ -103,6 +104,11 @@ public class TimelineServiceV1Publisher extends AbstractSystemMetricsPublisher {
       }
     }
 
+    ContainerLaunchContext amContainerSpec =
+        app.getApplicationSubmissionContext().getAMContainerSpec();
+    entityInfo.put(ApplicationMetricsConstants.AM_CONTAINER_LAUNCH_COMMAND,
+        amContainerSpec.getCommands());
+
     entity.setOtherInfo(entityInfo);
     TimelineEvent tEvent = new TimelineEvent();
     tEvent.setEventType(ApplicationMetricsConstants.CREATED_EVENT_TYPE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b0fd01d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
index a248199..1485b91 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV2Publisher.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationAttemptEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.ContainerEntity;
@@ -128,6 +129,10 @@ public class TimelineServiceV2Publisher extends AbstractSystemMetricsPublisher {
             app.getCallerContext().getSignature());
       }
     }
+    ContainerLaunchContext amContainerSpec =
+        app.getApplicationSubmissionContext().getAMContainerSpec();
+    entityInfo.put(ApplicationMetricsConstants.AM_CONTAINER_LAUNCH_COMMAND,
+        amContainerSpec.getCommands());
 
     entity.setInfo(entityInfo);
     TimelineEvent tEvent = new TimelineEvent();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b0fd01d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
index 1e279d5..386932d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
@@ -22,6 +22,7 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 import java.util.Collection;
+import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.Map;
@@ -33,6 +34,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -118,6 +120,11 @@ public class TestSystemMetricsPublisher {
         when(asc.getUnmanagedAM()).thenReturn(false);
         when(asc.getPriority()).thenReturn(Priority.newInstance(1));
         when(asc.getNodeLabelExpression()).thenReturn("high-cpu");
+        ContainerLaunchContext containerLaunchContext =
+            mock(ContainerLaunchContext.class);
+        when(containerLaunchContext.getCommands())
+            .thenReturn(Collections.singletonList("java -Xmx1024m"));
+        when(asc.getAMContainerSpec()).thenReturn(containerLaunchContext);
         when(app.getApplicationSubmissionContext()).thenReturn(asc);
         metricsPublisher.appUpdated(app, 4L);
       } else {
@@ -197,6 +204,12 @@ public class TestSystemMetricsPublisher {
         Assert.assertEquals("uers1,user2",
             entity.getOtherInfo().get(
                 ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO));
+
+        Assert.assertEquals(
+            app.getApplicationSubmissionContext().getAMContainerSpec()
+                .getCommands(),
+            entity.getOtherInfo()
+                .get(ApplicationMetricsConstants.AM_CONTAINER_LAUNCH_COMMAND));
       } else {
         Assert.assertEquals(
             "",
@@ -492,6 +505,11 @@ public class TestSystemMetricsPublisher {
     when(asc.getUnmanagedAM()).thenReturn(false);
     when(asc.getPriority()).thenReturn(Priority.newInstance(10));
     when(asc.getNodeLabelExpression()).thenReturn("high-cpu");
+    ContainerLaunchContext containerLaunchContext =
+        mock(ContainerLaunchContext.class);
+    when(containerLaunchContext.getCommands())
+        .thenReturn(Collections.singletonList("java -Xmx1024m"));
+    when(asc.getAMContainerSpec()).thenReturn(containerLaunchContext);
     when(app.getApplicationSubmissionContext()).thenReturn(asc);
     when(app.getAppNodeLabelExpression()).thenCallRealMethod();
     ResourceRequest amReq = mock(ResourceRequest.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b0fd01d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
index 3ea4714..13aa806 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -354,6 +355,14 @@ public class TestSystemMetricsPublisherForV2 {
         mock(ApplicationSubmissionContext.class);
     when(appSubmissionContext.getPriority())
         .thenReturn(Priority.newInstance(0));
+
+    ContainerLaunchContext containerLaunchContext =
+        mock(ContainerLaunchContext.class);
+    when(containerLaunchContext.getCommands())
+        .thenReturn(Collections.singletonList("java -Xmx1024m"));
+    when(appSubmissionContext.getAMContainerSpec())
+        .thenReturn(containerLaunchContext);
+
     when(app.getApplicationSubmissionContext())
         .thenReturn(appSubmissionContext);
     return app;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[54/57] [abbrv] hadoop git commit: HDFS-10940. Reduce performance penalty of block caching when not used. Contributed by Daryn Sharp.

Posted by in...@apache.org.
HDFS-10940. Reduce performance penalty of block caching when not used. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74420843
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74420843
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74420843

Branch: refs/heads/HDFS-10467
Commit: 744208431f7365bf054e6b773b86af2583001e1d
Parents: 9002062
Author: Kihwal Lee <ki...@apache.org>
Authored: Mon Oct 3 11:27:23 2016 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Mon Oct 3 11:27:23 2016 -0500

----------------------------------------------------------------------
 .../hdfs/server/blockmanagement/BlockManager.java  | 10 +++++++++-
 .../hadoop/hdfs/server/namenode/CacheManager.java  | 12 +++++++++++-
 .../server/namenode/FSDirStatAndListingOp.java     | 17 +----------------
 .../hdfs/server/namenode/TestCacheDirectives.java  | 10 ++++++++++
 4 files changed, 31 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74420843/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 886984a..9b426bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -103,6 +103,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.util.FoldedTreeSet;
 import org.apache.hadoop.hdfs.util.LightWeightHashSet;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.server.namenode.CacheManager;
 
 import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
 
@@ -1145,9 +1146,16 @@ public class BlockManager implements BlockStatsMXBean {
             fileSizeExcludeBlocksUnderConstruction, mode);
         isComplete = true;
       }
-      return new LocatedBlocks(fileSizeExcludeBlocksUnderConstruction,
+      LocatedBlocks locations = new LocatedBlocks(
+          fileSizeExcludeBlocksUnderConstruction,
           isFileUnderConstruction, locatedblocks, lastlb, isComplete, feInfo,
           ecPolicy);
+      // Set caching information for the located blocks.
+      CacheManager cm = namesystem.getCacheManager();
+      if (cm != null) {
+        cm.setCachedLocations(locations);
+      }
+      return locations;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74420843/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
index 366dd9b..24bf751 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
@@ -63,6 +63,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
@@ -902,7 +903,16 @@ public final class CacheManager {
     return new BatchedListEntries<CachePoolEntry>(results, false);
   }
 
-  public void setCachedLocations(LocatedBlock block) {
+  public void setCachedLocations(LocatedBlocks locations) {
+    // don't attempt lookups if there are no cached blocks
+    if (cachedBlocks.size() > 0) {
+      for (LocatedBlock lb : locations.getLocatedBlocks()) {
+        setCachedLocations(lb);
+      }
+    }
+  }
+
+  private void setCachedLocations(LocatedBlock block) {
     CachedBlock cachedBlock =
         new CachedBlock(block.getBlock().getBlockId(),
             (short)0, false);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74420843/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 4876fb1..f56d83d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
@@ -156,7 +155,6 @@ class FSDirStatAndListingOp {
         "Negative offset is not supported. File: " + src);
     Preconditions.checkArgument(length >= 0,
         "Negative length is not supported. File: " + src);
-    CacheManager cm = fsd.getFSNamesystem().getCacheManager();
     BlockManager bm = fsd.getBlockManager();
     fsd.readLock();
     try {
@@ -190,11 +188,6 @@ class FSDirStatAndListingOp {
           inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset,
           length, needBlockToken, iip.isSnapshot(), feInfo, ecPolicy);
 
-      // Set caching information for the located blocks.
-      for (LocatedBlock lb : blocks.getLocatedBlocks()) {
-        cm.setCachedLocations(lb);
-      }
-
       final long now = now();
       boolean updateAccessTime = fsd.isAccessTimeSupported()
           && !iip.isSnapshot()
@@ -461,7 +454,7 @@ class FSDirStatAndListingOp {
         node.asDirectory().getChildrenNum(snapshot) : 0;
 
     INodeAttributes nodeAttrs = fsd.getAttributes(iip);
-    HdfsFileStatus status = createFileStatus(
+    return createFileStatus(
         size,
         node.isDirectory(),
         replication,
@@ -479,14 +472,6 @@ class FSDirStatAndListingOp {
         storagePolicy,
         ecPolicy,
         loc);
-    // Set caching information for the located blocks.
-    if (loc != null) {
-      CacheManager cacheManager = fsd.getFSNamesystem().getCacheManager();
-      for (LocatedBlock lb: loc.getLocatedBlocks()) {
-        cacheManager.setCachedLocations(lb);
-      }
-    }
-    return status;
   }
 
   private static HdfsFileStatus createFileStatus(long length, boolean isdir,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74420843/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
index efb5cf8..658e4ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
@@ -72,6 +72,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolStats;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
@@ -89,6 +90,7 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 import com.google.common.base.Supplier;
 
@@ -1531,4 +1533,12 @@ public class TestCacheDirectives {
       DataNodeTestUtils.setCacheReportsDisabledForTests(cluster, false);
     }
   }
+
+  @Test
+  public void testNoLookupsWhenNotUsed() throws Exception {
+    CacheManager cm = cluster.getNamesystem().getCacheManager();
+    LocatedBlocks locations = Mockito.mock(LocatedBlocks.class);
+    cm.setCachedLocations(locations);
+    Mockito.verifyZeroInteractions(locations);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/57] [abbrv] hadoop git commit: HDFS-10914. Move remnants of oah.hdfs.client to hadoop-hdfs-client.

Posted by in...@apache.org.
HDFS-10914. Move remnants of oah.hdfs.client to hadoop-hdfs-client.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92e5e915
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92e5e915
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92e5e915

Branch: refs/heads/HDFS-10467
Commit: 92e5e9159850c01635091ea6ded0d8ee76691a9a
Parents: 5f34402
Author: Andrew Wang <wa...@apache.org>
Authored: Wed Sep 28 16:00:51 2016 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Wed Sep 28 16:01:03 2016 -0700

----------------------------------------------------------------------
 .../hdfs/client/CreateEncryptionZoneFlag.java   |  70 +++
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    | 523 ++++++++++++++++++
 .../apache/hadoop/hdfs/client/HdfsUtils.java    |  86 +++
 .../apache/hadoop/hdfs/client/package-info.java |  27 +
 .../hdfs/client/CreateEncryptionZoneFlag.java   |  71 ---
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    | 524 -------------------
 .../apache/hadoop/hdfs/client/HdfsUtils.java    |  86 ---
 .../apache/hadoop/hdfs/client/package-info.java |  27 -
 8 files changed, 706 insertions(+), 708 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92e5e915/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java
new file mode 100644
index 0000000..ad4cea6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * CreateEncryptionZoneFlag is used in
+ * {@link HdfsAdmin#createEncryptionZone(Path, String, EnumSet)} to indicate
+ * what should be done when creating an encryption zone.
+ *
+ * Use CreateEncryptionZoneFlag as follows:
+ * <ol>
+ *   <li>PROVISION_TRASH - provision a trash directory for the encryption zone
+ *   to support soft delete.</li>
+ * </ol>
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public enum CreateEncryptionZoneFlag {
+
+  /**
+   * Do not provision a trash directory in the encryption zone.
+   *
+   * @see CreateEncryptionZoneFlag#NO_TRASH
+   */
+  NO_TRASH((short) 0x00),
+  /**
+   * Provision a trash directory .Trash/ in the
+   * encryption zone.
+   *
+   * @see CreateEncryptionZoneFlag#PROVISION_TRASH
+   */
+  PROVISION_TRASH((short) 0x01);
+
+  private final short mode;
+
+  CreateEncryptionZoneFlag(short mode) {
+    this.mode = mode;
+  }
+
+  public static CreateEncryptionZoneFlag valueOf(short mode) {
+    for (CreateEncryptionZoneFlag flag : CreateEncryptionZoneFlag.values()) {
+      if (flag.getMode() == mode) {
+        return flag;
+      }
+    }
+    return null;
+  }
+
+  public short getMode() {
+    return mode;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92e5e915/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
new file mode 100644
index 0000000..946b79d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
@@ -0,0 +1,523 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
+import java.util.Collection;
+import java.util.EnumSet;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockStoragePolicySpi;
+import org.apache.hadoop.fs.CacheFlag;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSInotifyEventInputStream;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
+import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+
+/**
+ * The public API for performing administrative functions on HDFS. Those writing
+ * applications against HDFS should prefer this interface to directly accessing
+ * functionality in DistributedFileSystem or DFSClient.
+ *
+ * Note that this is distinct from the similarly-named DFSAdmin, which
+ * is a class that provides the functionality for the CLI `hdfs dfsadmin ...'
+ * commands.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class HdfsAdmin {
+
+  private DistributedFileSystem dfs;
+  private static final FsPermission TRASH_PERMISSION = new FsPermission(
+      FsAction.ALL, FsAction.ALL, FsAction.ALL, true);
+
+  /**
+   * Create a new HdfsAdmin client.
+   *
+   * @param uri the unique URI of the HDFS file system to administer
+   * @param conf configuration
+   * @throws IOException in the event the file system could not be created
+   */
+  public HdfsAdmin(URI uri, Configuration conf) throws IOException {
+    FileSystem fs = FileSystem.get(uri, conf);
+    if (!(fs instanceof DistributedFileSystem)) {
+      throw new IllegalArgumentException("'" + uri + "' is not an HDFS URI.");
+    } else {
+      dfs = (DistributedFileSystem)fs;
+    }
+  }
+
+  /**
+   * Set the namespace quota (count of files, directories, and sym links) for a
+   * directory.
+   *
+   * @param src the path to set the quota for
+   * @param quota the value to set for the quota
+   * @throws IOException in the event of error
+   */
+  public void setQuota(Path src, long quota) throws IOException {
+    dfs.setQuota(src, quota, HdfsConstants.QUOTA_DONT_SET);
+  }
+
+  /**
+   * Clear the namespace quota (count of files, directories and sym links) for a
+   * directory.
+   *
+   * @param src the path to clear the quota of
+   * @throws IOException in the event of error
+   */
+  public void clearQuota(Path src) throws IOException {
+    dfs.setQuota(src, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET);
+  }
+
+  /**
+   * Set the storage space quota (size of files) for a directory. Note that
+   * directories and sym links do not occupy storage space.
+   *
+   * @param src the path to set the space quota of
+   * @param spaceQuota the value to set for the space quota
+   * @throws IOException in the event of error
+   */
+  public void setSpaceQuota(Path src, long spaceQuota) throws IOException {
+    dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, spaceQuota);
+  }
+
+  /**
+   * Clear the storage space quota (size of files) for a directory. Note that
+   * directories and sym links do not occupy storage space.
+   *
+   * @param src the path to clear the space quota of
+   * @throws IOException in the event of error
+   */
+  public void clearSpaceQuota(Path src) throws IOException {
+    dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_RESET);
+  }
+
+  /**
+   * Set the quota by storage type for a directory. Note that
+   * directories and sym links do not occupy storage type quota.
+   *
+   * @param src the target directory to set the quota by storage type
+   * @param type the storage type to set for quota by storage type
+   * @param quota the value to set for quota by storage type
+   * @throws IOException in the event of error
+   */
+  public void setQuotaByStorageType(Path src, StorageType type, long quota)
+      throws IOException {
+    dfs.setQuotaByStorageType(src, type, quota);
+  }
+
+  /**
+   * Clear the space quota by storage type for a directory. Note that
+   * directories and sym links do not occupy storage type quota.
+   *
+   * @param src the target directory to clear the quota by storage type
+   * @param type the storage type to clear for quota by storage type
+   * @throws IOException in the event of error
+   */
+  public void clearQuotaByStorageType(Path src, StorageType type) throws IOException {
+    dfs.setQuotaByStorageType(src, type, HdfsConstants.QUOTA_RESET);
+  }
+
+  /**
+   * Allow snapshot on a directory.
+   * @param path The path of the directory where snapshots will be taken.
+   */
+  public void allowSnapshot(Path path) throws IOException {
+    dfs.allowSnapshot(path);
+  }
+
+  /**
+   * Disallow snapshot on a directory.
+   * @param path The path of the snapshottable directory.
+   */
+  public void disallowSnapshot(Path path) throws IOException {
+    dfs.disallowSnapshot(path);
+  }
+
+  /**
+   * Add a new CacheDirectiveInfo.
+   *
+   * @param info Information about a directive to add.
+   * @param flags {@link CacheFlag}s to use for this operation.
+   * @return the ID of the directive that was created.
+   * @throws IOException if the directive could not be added
+   */
+  public long addCacheDirective(CacheDirectiveInfo info,
+      EnumSet<CacheFlag> flags) throws IOException {
+  return dfs.addCacheDirective(info, flags);
+  }
+
+  /**
+   * Modify a CacheDirective.
+   *
+   * @param info Information about the directive to modify. You must set the ID
+   *          to indicate which CacheDirective you want to modify.
+   * @param flags {@link CacheFlag}s to use for this operation.
+   * @throws IOException if the directive could not be modified
+   */
+  public void modifyCacheDirective(CacheDirectiveInfo info,
+      EnumSet<CacheFlag> flags) throws IOException {
+    dfs.modifyCacheDirective(info, flags);
+  }
+
+  /**
+   * Remove a CacheDirective.
+   *
+   * @param id identifier of the CacheDirectiveInfo to remove
+   * @throws IOException if the directive could not be removed
+   */
+  public void removeCacheDirective(long id)
+      throws IOException {
+    dfs.removeCacheDirective(id);
+  }
+
+  /**
+   * List cache directives. Incrementally fetches results from the server.
+   *
+   * @param filter Filter parameters to use when listing the directives, null to
+   *               list all directives visible to us.
+   * @return A RemoteIterator which returns CacheDirectiveInfo objects.
+   */
+  public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
+      CacheDirectiveInfo filter) throws IOException {
+    return dfs.listCacheDirectives(filter);
+  }
+
+  /**
+   * Add a cache pool.
+   *
+   * @param info
+   *          The request to add a cache pool.
+   * @throws IOException
+   *          If the request could not be completed.
+   */
+  public void addCachePool(CachePoolInfo info) throws IOException {
+    dfs.addCachePool(info);
+  }
+
+  /**
+   * Modify an existing cache pool.
+   *
+   * @param info
+   *          The request to modify a cache pool.
+   * @throws IOException
+   *          If the request could not be completed.
+   */
+  public void modifyCachePool(CachePoolInfo info) throws IOException {
+    dfs.modifyCachePool(info);
+  }
+
+  /**
+   * Remove a cache pool.
+   *
+   * @param poolName
+   *          Name of the cache pool to remove.
+   * @throws IOException
+   *          if the cache pool did not exist, or could not be removed.
+   */
+  public void removeCachePool(String poolName) throws IOException {
+    dfs.removeCachePool(poolName);
+  }
+
+  /**
+   * List all cache pools.
+   *
+   * @return A remote iterator from which you can get CachePoolEntry objects.
+   *          Requests will be made as needed.
+   * @throws IOException
+   *          If there was an error listing cache pools.
+   */
+  public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
+    return dfs.listCachePools();
+  }
+
+  /**
+   * Create an encryption zone rooted at an empty existing directory, using the
+   * specified encryption key. An encryption zone has an associated encryption
+   * key used when reading and writing files within the zone.
+   *
+   * @param path    The path of the root of the encryption zone. Must refer to
+   *                an empty, existing directory.
+   * @param keyName Name of key available at the KeyProvider.
+   * @throws IOException            if there was a general IO exception
+   * @throws AccessControlException if the caller does not have access to path
+   * @throws FileNotFoundException  if the path does not exist
+   */
+  @Deprecated
+  public void createEncryptionZone(Path path, String keyName)
+      throws IOException, AccessControlException, FileNotFoundException {
+    dfs.createEncryptionZone(path, keyName);
+  }
+
+  /**
+   * Create an encryption zone rooted at an empty existing directory, using the
+   * specified encryption key. An encryption zone has an associated encryption
+   * key used when reading and writing files within the zone.
+   *
+   * Additional options, such as provisioning the trash directory, can be
+   * specified using {@link CreateEncryptionZoneFlag} flags.
+   *
+   * @param path    The path of the root of the encryption zone. Must refer to
+   *                an empty, existing directory.
+   * @param keyName Name of key available at the KeyProvider.
+   * @param flags   flags for this operation.
+   * @throws IOException            if there was a general IO exception
+   * @throws AccessControlException if the caller does not have access to path
+   * @throws FileNotFoundException  if the path does not exist
+   * @throws HadoopIllegalArgumentException if the flags are invalid
+   */
+  public void createEncryptionZone(Path path, String keyName,
+      EnumSet<CreateEncryptionZoneFlag> flags)
+      throws IOException, AccessControlException, FileNotFoundException,
+      HadoopIllegalArgumentException{
+    dfs.createEncryptionZone(path, keyName);
+    if (flags.contains(CreateEncryptionZoneFlag.PROVISION_TRASH)) {
+      if (flags.contains(CreateEncryptionZoneFlag.NO_TRASH)) {
+        throw new HadoopIllegalArgumentException(
+            "can not have both PROVISION_TRASH and NO_TRASH flags");
+      }
+      this.provisionEZTrash(path);
+    }
+  }
+
+  /**
+   * Provision a trash directory for a given encryption zone.
+
+   * @param path the root of the encryption zone
+   * @throws IOException if the trash directory can not be created.
+   */
+  public void provisionEncryptionZoneTrash(Path path) throws IOException {
+    this.provisionEZTrash(path);
+  }
+
+  /**
+   * Get the path of the encryption zone for a given file or directory.
+   *
+   * @param path The path to get the ez for.
+   *
+   * @return The EncryptionZone of the ez, or null if path is not in an ez.
+   * @throws IOException            if there was a general IO exception
+   * @throws AccessControlException if the caller does not have access to path
+   * @throws FileNotFoundException  if the path does not exist
+   */
+  public EncryptionZone getEncryptionZoneForPath(Path path)
+    throws IOException, AccessControlException, FileNotFoundException {
+    return dfs.getEZForPath(path);
+  }
+
+  /**
+   * Returns a RemoteIterator which can be used to list the encryption zones
+   * in HDFS. For large numbers of encryption zones, the iterator will fetch
+   * the list of zones in a number of small batches.
+   * <p/>
+   * Since the list is fetched in batches, it does not represent a
+   * consistent snapshot of the entire list of encryption zones.
+   * <p/>
+   * This method can only be called by HDFS superusers.
+   */
+  public RemoteIterator<EncryptionZone> listEncryptionZones()
+      throws IOException {
+    return dfs.listEncryptionZones();
+  }
+
+  /**
+   * Exposes a stream of namesystem events. Only events occurring after the
+   * stream is created are available.
+   * See {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream}
+   * for information on stream usage.
+   * See {@link org.apache.hadoop.hdfs.inotify.Event}
+   * for information on the available events.
+   * <p/>
+   * Inotify users may want to tune the following HDFS parameters to
+   * ensure that enough extra HDFS edits are saved to support inotify clients
+   * that fall behind the current state of the namespace while reading events.
+   * The default parameter values should generally be reasonable. If edits are
+   * deleted before their corresponding events can be read, clients will see a
+   * {@link org.apache.hadoop.hdfs.inotify.MissingEventsException} on
+   * {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream} method calls.
+   *
+   * It should generally be sufficient to tune these parameters:
+   * dfs.namenode.num.extra.edits.retained
+   * dfs.namenode.max.extra.edits.segments.retained
+   *
+   * Parameters that affect the number of created segments and the number of
+   * edits that are considered necessary, i.e. do not count towards the
+   * dfs.namenode.num.extra.edits.retained quota):
+   * dfs.namenode.checkpoint.period
+   * dfs.namenode.checkpoint.txns
+   * dfs.namenode.num.checkpoints.retained
+   * dfs.ha.log-roll.period
+   * <p/>
+   * It is recommended that local journaling be configured
+   * (dfs.namenode.edits.dir) for inotify (in addition to a shared journal)
+   * so that edit transfers from the shared journal can be avoided.
+   *
+   * @throws IOException If there was an error obtaining the stream.
+   */
+  public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
+    return dfs.getInotifyEventStream();
+  }
+
+  /**
+   * A version of {@link HdfsAdmin#getInotifyEventStream()} meant for advanced
+   * users who are aware of HDFS edits up to lastReadTxid (e.g. because they
+   * have access to an FSImage inclusive of lastReadTxid) and only want to read
+   * events after this point.
+   */
+  public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid)
+      throws IOException {
+    return dfs.getInotifyEventStream(lastReadTxid);
+  }
+
+  /**
+   * Set the source path to the specified storage policy.
+   *
+   * @param src The source path referring to either a directory or a file.
+   * @param policyName The name of the storage policy.
+   */
+  public void setStoragePolicy(final Path src, final String policyName)
+      throws IOException {
+    dfs.setStoragePolicy(src, policyName);
+  }
+
+  /**
+   * Unset the storage policy set for a given file or directory.
+   *
+   * @param src file or directory path.
+   * @throws IOException
+   */
+  public void unsetStoragePolicy(final Path src) throws IOException {
+    dfs.unsetStoragePolicy(src);
+  }
+
+  /**
+   * Query the effective storage policy ID for the given file or directory.
+   *
+   * @param src file or directory path.
+   * @return storage policy for the given file or directory.
+   * @throws IOException
+   */
+  public BlockStoragePolicySpi getStoragePolicy(final Path src)
+      throws IOException {
+    return dfs.getStoragePolicy(src);
+  }
+
+  /**
+   * Retrieve all the storage policies supported by HDFS file system.
+   *
+   * @return all storage policies supported by HDFS file system.
+   * @throws IOException
+   */
+  public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
+      throws IOException {
+    return dfs.getAllStoragePolicies();
+  }
+
+  /**
+   * Set the source path to the specified erasure coding policy.
+   *
+   * @param path The source path referring to a directory.
+   * @param ecPolicy The erasure coding policy for the directory.
+   *                 If null, the default will be used.
+   * @throws IOException
+   */
+  public void setErasureCodingPolicy(final Path path,
+      final ErasureCodingPolicy ecPolicy) throws IOException {
+    dfs.setErasureCodingPolicy(path, ecPolicy);
+  }
+
+  /**
+   * Get the erasure coding policy information for the specified path
+   *
+   * @param path
+   * @return Returns the policy information if file or directory on the path is
+   *          erasure coded. Null otherwise.
+   * @throws IOException
+   */
+  public ErasureCodingPolicy getErasureCodingPolicy(final Path path)
+      throws IOException {
+    return dfs.getErasureCodingPolicy(path);
+  }
+
+  /**
+   * Get the Erasure coding policies supported.
+   *
+   * @throws IOException
+   */
+  public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
+    return dfs.getClient().getErasureCodingPolicies();
+  }
+
+  private void provisionEZTrash(Path path) throws IOException {
+    // make sure the path is an EZ
+    EncryptionZone ez = dfs.getEZForPath(path);
+    if (ez == null) {
+      throw new IllegalArgumentException(path + " is not an encryption zone.");
+    }
+
+    String ezPath = ez.getPath();
+    if (!path.toString().equals(ezPath)) {
+      throw new IllegalArgumentException(path + " is not the root of an " +
+          "encryption zone. Do you mean " + ez.getPath() + "?");
+    }
+
+    // check if the trash directory exists
+
+    Path trashPath = new Path(ez.getPath(), FileSystem.TRASH_PREFIX);
+
+    if (dfs.exists(trashPath)) {
+      String errMessage = "Will not provision new trash directory for " +
+          "encryption zone " + ez.getPath() + ". Path already exists.";
+      FileStatus trashFileStatus = dfs.getFileStatus(trashPath);
+      if (!trashFileStatus.isDirectory()) {
+        errMessage += "\r\n" +
+            "Warning: " + trashPath.toString() + " is not a directory";
+      }
+      if (!trashFileStatus.getPermission().equals(TRASH_PERMISSION)) {
+        errMessage += "\r\n" +
+            "Warning: the permission of " +
+            trashPath.toString() + " is not " + TRASH_PERMISSION;
+      }
+      throw new IOException(errMessage);
+    }
+
+    // Update the permission bits
+    dfs.mkdir(trashPath, TRASH_PERMISSION);
+    dfs.setPermission(trashPath, TRASH_PERMISSION);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92e5e915/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
new file mode 100644
index 0000000..3b77a3f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client;
+
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The public utility API for HDFS.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class HdfsUtils {
+  public static final Logger LOG = LoggerFactory.getLogger(HdfsUtils.class);
+
+  /**
+   * Is the HDFS healthy?
+   * HDFS is considered as healthy if it is up and not in safemode.
+   *
+   * @param uri the HDFS URI.  Note that the URI path is ignored.
+   * @return true if HDFS is healthy; false, otherwise.
+   */
+  public static boolean isHealthy(URI uri) {
+    //check scheme
+    final String scheme = uri.getScheme();
+    if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(scheme)) {
+      throw new IllegalArgumentException("The scheme is not "
+          + HdfsConstants.HDFS_URI_SCHEME + ", uri=" + uri);
+    }
+
+    final Configuration conf = new Configuration();
+    //disable FileSystem cache
+    conf.setBoolean(String.format("fs.%s.impl.disable.cache", scheme), true);
+    //disable client retry for rpc connection and rpc calls
+    conf.setBoolean(HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, false);
+    conf.setInt(
+        CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
+
+    DistributedFileSystem fs = null;
+    try {
+      fs = (DistributedFileSystem)FileSystem.get(uri, conf);
+      final boolean safemode = fs.setSafeMode(SafeModeAction.SAFEMODE_GET);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Is namenode in safemode? " + safemode + "; uri=" + uri);
+      }
+
+      fs.close();
+      fs = null;
+      return !safemode;
+    } catch(IOException e) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Got an exception for uri=" + uri, e);
+      }
+      return false;
+    } finally {
+      IOUtils.closeQuietly(fs);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92e5e915/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/package-info.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/package-info.java
new file mode 100644
index 0000000..95eceb7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/package-info.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package provides the administrative APIs for HDFS.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+package org.apache.hadoop.hdfs.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92e5e915/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java
deleted file mode 100644
index ccf9193..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.client;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * CreateEncryptionZoneFlag is used in
- * {@link HdfsAdmin#createEncryptionZone(Path, String, EnumSet)} to indicate
- * what should be done when creating an encryption zone.
- *
- * Use CreateEncryptionZoneFlag as follows:
- * <ol>
- *   <li>PROVISION_TRASH - provision a trash directory for the encryption zone
- *   to support soft delete.</li>
- * </ol>
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public enum CreateEncryptionZoneFlag {
-
-  /**
-   * Do not provision a trash directory in the encryption zone.
-   *
-   * @see CreateEncryptionZoneFlag#NO_TRASH
-   */
-  NO_TRASH((short) 0x00),
-  /**
-   * Provision a trash directory .Trash/ in the
-   * encryption zone.
-   *
-   * @see CreateEncryptionZoneFlag#PROVISION_TRASH
-   */
-  PROVISION_TRASH((short) 0x01);
-
-  private final short mode;
-
-  CreateEncryptionZoneFlag(short mode) {
-    this.mode = mode;
-  }
-
-  public static CreateEncryptionZoneFlag valueOf(short mode) {
-    for (CreateEncryptionZoneFlag flag : CreateEncryptionZoneFlag.values()) {
-      if (flag.getMode() == mode) {
-        return flag;
-      }
-    }
-    return null;
-  }
-
-  public short getMode() {
-    return mode;
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92e5e915/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
deleted file mode 100644
index b9cf5fb..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
+++ /dev/null
@@ -1,524 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.client;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URI;
-import java.util.Collection;
-import java.util.EnumSet;
-
-import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockStoragePolicySpi;
-import org.apache.hadoop.fs.CacheFlag;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSInotifyEventInputStream;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
-import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
-import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
-import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
-import org.apache.hadoop.hdfs.protocol.EncryptionZone;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.hdfs.tools.DFSAdmin;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-
-/**
- * The public API for performing administrative functions on HDFS. Those writing
- * applications against HDFS should prefer this interface to directly accessing
- * functionality in DistributedFileSystem or DFSClient.
- * 
- * Note that this is distinct from the similarly-named {@link DFSAdmin}, which
- * is a class that provides the functionality for the CLI `hdfs dfsadmin ...'
- * commands.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public class HdfsAdmin {
-  
-  private DistributedFileSystem dfs;
-  private static final FsPermission TRASH_PERMISSION = new FsPermission(
-      FsAction.ALL, FsAction.ALL, FsAction.ALL, true);
-  
-  /**
-   * Create a new HdfsAdmin client.
-   * 
-   * @param uri the unique URI of the HDFS file system to administer
-   * @param conf configuration
-   * @throws IOException in the event the file system could not be created
-   */
-  public HdfsAdmin(URI uri, Configuration conf) throws IOException {
-    FileSystem fs = FileSystem.get(uri, conf);
-    if (!(fs instanceof DistributedFileSystem)) {
-      throw new IllegalArgumentException("'" + uri + "' is not an HDFS URI.");
-    } else {
-      dfs = (DistributedFileSystem)fs;
-    }
-  }
-  
-  /**
-   * Set the namespace quota (count of files, directories, and sym links) for a
-   * directory.
-   * 
-   * @param src the path to set the quota for
-   * @param quota the value to set for the quota
-   * @throws IOException in the event of error
-   */
-  public void setQuota(Path src, long quota) throws IOException {
-    dfs.setQuota(src, quota, HdfsConstants.QUOTA_DONT_SET);
-  }
-  
-  /**
-   * Clear the namespace quota (count of files, directories and sym links) for a
-   * directory.
-   * 
-   * @param src the path to clear the quota of
-   * @throws IOException in the event of error
-   */
-  public void clearQuota(Path src) throws IOException {
-    dfs.setQuota(src, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET);
-  }
-  
-  /**
-   * Set the storage space quota (size of files) for a directory. Note that
-   * directories and sym links do not occupy storage space.
-   * 
-   * @param src the path to set the space quota of
-   * @param spaceQuota the value to set for the space quota
-   * @throws IOException in the event of error
-   */
-  public void setSpaceQuota(Path src, long spaceQuota) throws IOException {
-    dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, spaceQuota);
-  }
-  
-  /**
-   * Clear the storage space quota (size of files) for a directory. Note that
-   * directories and sym links do not occupy storage space.
-   * 
-   * @param src the path to clear the space quota of
-   * @throws IOException in the event of error
-   */
-  public void clearSpaceQuota(Path src) throws IOException {
-    dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_RESET);
-  }
-
-  /**
-   * Set the quota by storage type for a directory. Note that
-   * directories and sym links do not occupy storage type quota.
-   *
-   * @param src the target directory to set the quota by storage type
-   * @param type the storage type to set for quota by storage type
-   * @param quota the value to set for quota by storage type
-   * @throws IOException in the event of error
-   */
-  public void setQuotaByStorageType(Path src, StorageType type, long quota)
-      throws IOException {
-    dfs.setQuotaByStorageType(src, type, quota);
-  }
-
-  /**
-   * Clear the space quota by storage type for a directory. Note that
-   * directories and sym links do not occupy storage type quota.
-   *
-   * @param src the target directory to clear the quota by storage type
-   * @param type the storage type to clear for quota by storage type
-   * @throws IOException in the event of error
-   */
-  public void clearQuotaByStorageType(Path src, StorageType type) throws IOException {
-    dfs.setQuotaByStorageType(src, type, HdfsConstants.QUOTA_RESET);
-  }
-  
-  /**
-   * Allow snapshot on a directory.
-   * @param path The path of the directory where snapshots will be taken.
-   */
-  public void allowSnapshot(Path path) throws IOException {
-    dfs.allowSnapshot(path);
-  }
-  
-  /**
-   * Disallow snapshot on a directory.
-   * @param path The path of the snapshottable directory.
-   */
-  public void disallowSnapshot(Path path) throws IOException {
-    dfs.disallowSnapshot(path);
-  }
-
-  /**
-   * Add a new CacheDirectiveInfo.
-   * 
-   * @param info Information about a directive to add.
-   * @param flags {@link CacheFlag}s to use for this operation.
-   * @return the ID of the directive that was created.
-   * @throws IOException if the directive could not be added
-   */
-  public long addCacheDirective(CacheDirectiveInfo info,
-      EnumSet<CacheFlag> flags) throws IOException {
-  return dfs.addCacheDirective(info, flags);
-  }
-  
-  /**
-   * Modify a CacheDirective.
-   * 
-   * @param info Information about the directive to modify. You must set the ID
-   *          to indicate which CacheDirective you want to modify.
-   * @param flags {@link CacheFlag}s to use for this operation.
-   * @throws IOException if the directive could not be modified
-   */
-  public void modifyCacheDirective(CacheDirectiveInfo info,
-      EnumSet<CacheFlag> flags) throws IOException {
-    dfs.modifyCacheDirective(info, flags);
-  }
-
-  /**
-   * Remove a CacheDirective.
-   * 
-   * @param id identifier of the CacheDirectiveInfo to remove
-   * @throws IOException if the directive could not be removed
-   */
-  public void removeCacheDirective(long id)
-      throws IOException {
-    dfs.removeCacheDirective(id);
-  }
-
-  /**
-   * List cache directives. Incrementally fetches results from the server.
-   * 
-   * @param filter Filter parameters to use when listing the directives, null to
-   *               list all directives visible to us.
-   * @return A RemoteIterator which returns CacheDirectiveInfo objects.
-   */
-  public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
-      CacheDirectiveInfo filter) throws IOException {
-    return dfs.listCacheDirectives(filter);
-  }
-
-  /**
-   * Add a cache pool.
-   *
-   * @param info
-   *          The request to add a cache pool.
-   * @throws IOException 
-   *          If the request could not be completed.
-   */
-  public void addCachePool(CachePoolInfo info) throws IOException {
-    dfs.addCachePool(info);
-  }
-
-  /**
-   * Modify an existing cache pool.
-   *
-   * @param info
-   *          The request to modify a cache pool.
-   * @throws IOException 
-   *          If the request could not be completed.
-   */
-  public void modifyCachePool(CachePoolInfo info) throws IOException {
-    dfs.modifyCachePool(info);
-  }
-    
-  /**
-   * Remove a cache pool.
-   *
-   * @param poolName
-   *          Name of the cache pool to remove.
-   * @throws IOException 
-   *          if the cache pool did not exist, or could not be removed.
-   */
-  public void removeCachePool(String poolName) throws IOException {
-    dfs.removeCachePool(poolName);
-  }
-
-  /**
-   * List all cache pools.
-   *
-   * @return A remote iterator from which you can get CachePoolEntry objects.
-   *          Requests will be made as needed.
-   * @throws IOException
-   *          If there was an error listing cache pools.
-   */
-  public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
-    return dfs.listCachePools();
-  }
-
-  /**
-   * Create an encryption zone rooted at an empty existing directory, using the
-   * specified encryption key. An encryption zone has an associated encryption
-   * key used when reading and writing files within the zone.
-   *
-   * @param path    The path of the root of the encryption zone. Must refer to
-   *                an empty, existing directory.
-   * @param keyName Name of key available at the KeyProvider.
-   * @throws IOException            if there was a general IO exception
-   * @throws AccessControlException if the caller does not have access to path
-   * @throws FileNotFoundException  if the path does not exist
-   */
-  @Deprecated
-  public void createEncryptionZone(Path path, String keyName)
-      throws IOException, AccessControlException, FileNotFoundException {
-    dfs.createEncryptionZone(path, keyName);
-  }
-
-  /**
-   * Create an encryption zone rooted at an empty existing directory, using the
-   * specified encryption key. An encryption zone has an associated encryption
-   * key used when reading and writing files within the zone.
-   *
-   * Additional options, such as provisioning the trash directory, can be
-   * specified using {@link CreateEncryptionZoneFlag} flags.
-   *
-   * @param path    The path of the root of the encryption zone. Must refer to
-   *                an empty, existing directory.
-   * @param keyName Name of key available at the KeyProvider.
-   * @param flags   flags for this operation.
-   * @throws IOException            if there was a general IO exception
-   * @throws AccessControlException if the caller does not have access to path
-   * @throws FileNotFoundException  if the path does not exist
-   * @throws HadoopIllegalArgumentException if the flags are invalid
-   */
-  public void createEncryptionZone(Path path, String keyName,
-      EnumSet<CreateEncryptionZoneFlag> flags)
-      throws IOException, AccessControlException, FileNotFoundException,
-      HadoopIllegalArgumentException{
-    dfs.createEncryptionZone(path, keyName);
-    if (flags.contains(CreateEncryptionZoneFlag.PROVISION_TRASH)) {
-      if (flags.contains(CreateEncryptionZoneFlag.NO_TRASH)) {
-        throw new HadoopIllegalArgumentException(
-            "can not have both PROVISION_TRASH and NO_TRASH flags");
-      }
-      this.provisionEZTrash(path);
-    }
-  }
-
-  /**
-   * Provision a trash directory for a given encryption zone.
-
-   * @param path the root of the encryption zone
-   * @throws IOException if the trash directory can not be created.
-   */
-  public void provisionEncryptionZoneTrash(Path path) throws IOException {
-    this.provisionEZTrash(path);
-  }
-
-  /**
-   * Get the path of the encryption zone for a given file or directory.
-   *
-   * @param path The path to get the ez for.
-   *
-   * @return The EncryptionZone of the ez, or null if path is not in an ez.
-   * @throws IOException            if there was a general IO exception
-   * @throws AccessControlException if the caller does not have access to path
-   * @throws FileNotFoundException  if the path does not exist
-   */
-  public EncryptionZone getEncryptionZoneForPath(Path path)
-    throws IOException, AccessControlException, FileNotFoundException {
-    return dfs.getEZForPath(path);
-  }
-
-  /**
-   * Returns a RemoteIterator which can be used to list the encryption zones
-   * in HDFS. For large numbers of encryption zones, the iterator will fetch
-   * the list of zones in a number of small batches.
-   * <p/>
-   * Since the list is fetched in batches, it does not represent a
-   * consistent snapshot of the entire list of encryption zones.
-   * <p/>
-   * This method can only be called by HDFS superusers.
-   */
-  public RemoteIterator<EncryptionZone> listEncryptionZones()
-      throws IOException {
-    return dfs.listEncryptionZones();
-  }
-
-  /**
-   * Exposes a stream of namesystem events. Only events occurring after the
-   * stream is created are available.
-   * See {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream}
-   * for information on stream usage.
-   * See {@link org.apache.hadoop.hdfs.inotify.Event}
-   * for information on the available events.
-   * <p/>
-   * Inotify users may want to tune the following HDFS parameters to
-   * ensure that enough extra HDFS edits are saved to support inotify clients
-   * that fall behind the current state of the namespace while reading events.
-   * The default parameter values should generally be reasonable. If edits are
-   * deleted before their corresponding events can be read, clients will see a
-   * {@link org.apache.hadoop.hdfs.inotify.MissingEventsException} on
-   * {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream} method calls.
-   *
-   * It should generally be sufficient to tune these parameters:
-   * dfs.namenode.num.extra.edits.retained
-   * dfs.namenode.max.extra.edits.segments.retained
-   *
-   * Parameters that affect the number of created segments and the number of
-   * edits that are considered necessary, i.e. do not count towards the
-   * dfs.namenode.num.extra.edits.retained quota):
-   * dfs.namenode.checkpoint.period
-   * dfs.namenode.checkpoint.txns
-   * dfs.namenode.num.checkpoints.retained
-   * dfs.ha.log-roll.period
-   * <p/>
-   * It is recommended that local journaling be configured
-   * (dfs.namenode.edits.dir) for inotify (in addition to a shared journal)
-   * so that edit transfers from the shared journal can be avoided.
-   *
-   * @throws IOException If there was an error obtaining the stream.
-   */
-  public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
-    return dfs.getInotifyEventStream();
-  }
-
-  /**
-   * A version of {@link HdfsAdmin#getInotifyEventStream()} meant for advanced
-   * users who are aware of HDFS edits up to lastReadTxid (e.g. because they
-   * have access to an FSImage inclusive of lastReadTxid) and only want to read
-   * events after this point.
-   */
-  public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid)
-      throws IOException {
-    return dfs.getInotifyEventStream(lastReadTxid);
-  }
-
-  /**
-   * Set the source path to the specified storage policy.
-   *
-   * @param src The source path referring to either a directory or a file.
-   * @param policyName The name of the storage policy.
-   */
-  public void setStoragePolicy(final Path src, final String policyName)
-      throws IOException {
-    dfs.setStoragePolicy(src, policyName);
-  }
-
-  /**
-   * Unset the storage policy set for a given file or directory.
-   *
-   * @param src file or directory path.
-   * @throws IOException
-   */
-  public void unsetStoragePolicy(final Path src) throws IOException {
-    dfs.unsetStoragePolicy(src);
-  }
-
-  /**
-   * Query the effective storage policy ID for the given file or directory.
-   *
-   * @param src file or directory path.
-   * @return storage policy for the given file or directory.
-   * @throws IOException
-   */
-  public BlockStoragePolicySpi getStoragePolicy(final Path src)
-      throws IOException {
-    return dfs.getStoragePolicy(src);
-  }
-
-  /**
-   * Retrieve all the storage policies supported by HDFS file system.
-   *
-   * @return all storage policies supported by HDFS file system.
-   * @throws IOException
-   */
-  public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
-      throws IOException {
-    return dfs.getAllStoragePolicies();
-  }
-
-  /**
-   * Set the source path to the specified erasure coding policy.
-   *
-   * @param path The source path referring to a directory.
-   * @param ecPolicy The erasure coding policy for the directory.
-   *                 If null, the default will be used.
-   * @throws IOException
-   */
-  public void setErasureCodingPolicy(final Path path,
-      final ErasureCodingPolicy ecPolicy) throws IOException {
-    dfs.setErasureCodingPolicy(path, ecPolicy);
-  }
-
-  /**
-   * Get the erasure coding policy information for the specified path
-   *
-   * @param path
-   * @return Returns the policy information if file or directory on the path is
-   *          erasure coded. Null otherwise.
-   * @throws IOException
-   */
-  public ErasureCodingPolicy getErasureCodingPolicy(final Path path)
-      throws IOException {
-    return dfs.getErasureCodingPolicy(path);
-  }
-
-  /**
-   * Get the Erasure coding policies supported.
-   *
-   * @throws IOException
-   */
-  public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
-    return dfs.getClient().getErasureCodingPolicies();
-  }
-
-  private void provisionEZTrash(Path path) throws IOException {
-    // make sure the path is an EZ
-    EncryptionZone ez = dfs.getEZForPath(path);
-    if (ez == null) {
-      throw new IllegalArgumentException(path + " is not an encryption zone.");
-    }
-
-    String ezPath = ez.getPath();
-    if (!path.toString().equals(ezPath)) {
-      throw new IllegalArgumentException(path + " is not the root of an " +
-          "encryption zone. Do you mean " + ez.getPath() + "?");
-    }
-
-    // check if the trash directory exists
-
-    Path trashPath = new Path(ez.getPath(), FileSystem.TRASH_PREFIX);
-
-    if (dfs.exists(trashPath)) {
-      String errMessage = "Will not provision new trash directory for " +
-          "encryption zone " + ez.getPath() + ". Path already exists.";
-      FileStatus trashFileStatus = dfs.getFileStatus(trashPath);
-      if (!trashFileStatus.isDirectory()) {
-        errMessage += "\r\n" +
-            "Warning: " + trashPath.toString() + " is not a directory";
-      }
-      if (!trashFileStatus.getPermission().equals(TRASH_PERMISSION)) {
-        errMessage += "\r\n" +
-            "Warning: the permission of " +
-            trashPath.toString() + " is not " + TRASH_PERMISSION;
-      }
-      throw new IOException(errMessage);
-    }
-
-    // Update the permission bits
-    dfs.mkdir(trashPath, TRASH_PERMISSION);
-    dfs.setPermission(trashPath, TRASH_PERMISSION);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92e5e915/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
deleted file mode 100644
index f87de97..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.client;
-
-import java.io.IOException;
-import java.net.URI;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.io.IOUtils;
-
-/**
- * The public utility API for HDFS.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public class HdfsUtils {
-  private static final Log LOG = LogFactory.getLog(HdfsUtils.class);
-
-  /**
-   * Is the HDFS healthy?
-   * HDFS is considered as healthy if it is up and not in safemode.
-   *
-   * @param uri the HDFS URI.  Note that the URI path is ignored.
-   * @return true if HDFS is healthy; false, otherwise.
-   */
-  public static boolean isHealthy(URI uri) {
-    //check scheme
-    final String scheme = uri.getScheme();
-    if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(scheme)) {
-      throw new IllegalArgumentException("The scheme is not "
-          + HdfsConstants.HDFS_URI_SCHEME + ", uri=" + uri);
-    }
-    
-    final Configuration conf = new Configuration();
-    //disable FileSystem cache
-    conf.setBoolean(String.format("fs.%s.impl.disable.cache", scheme), true);
-    //disable client retry for rpc connection and rpc calls
-    conf.setBoolean(HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, false);
-    conf.setInt(
-        CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
-
-    DistributedFileSystem fs = null;
-    try {
-      fs = (DistributedFileSystem)FileSystem.get(uri, conf);
-      final boolean safemode = fs.setSafeMode(SafeModeAction.SAFEMODE_GET);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Is namenode in safemode? " + safemode + "; uri=" + uri);
-      }
-
-      fs.close();
-      fs = null;
-      return !safemode;
-    } catch(IOException e) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Got an exception for uri=" + uri, e);
-      }
-      return false;
-    } finally {
-      IOUtils.cleanup(LOG, fs);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92e5e915/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/package-info.java
deleted file mode 100644
index 95eceb7..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/package-info.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package provides the administrative APIs for HDFS.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-package org.apache.hadoop.hdfs.client;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/57] [abbrv] hadoop git commit: HDFS-10851. FSDirStatAndListingOp: stop passing path as string. Contributed by Daryn Sharp.

Posted by in...@apache.org.
HDFS-10851. FSDirStatAndListingOp: stop passing path as string. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a0730aa5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a0730aa5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a0730aa5

Branch: refs/heads/HDFS-10467
Commit: a0730aa5ced7666a8c92f9fb830b615f5f9f477a
Parents: 0670149
Author: Kihwal Lee <ki...@apache.org>
Authored: Fri Sep 30 13:03:24 2016 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Fri Sep 30 13:03:24 2016 -0500

----------------------------------------------------------------------
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java |   4 +-
 .../server/namenode/FSDirStatAndListingOp.java  | 256 +++++++------------
 .../hdfs/server/namenode/FSDirXAttrOp.java      |   6 +-
 .../hdfs/server/namenode/FSDirectory.java       |  17 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  18 +-
 .../server/namenode/INodeAttributeProvider.java |  10 +-
 .../hdfs/server/namenode/INodesInPath.java      |   2 +-
 .../server/namenode/TestSnapshotPathINodes.java |   6 +
 8 files changed, 126 insertions(+), 193 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0730aa5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index 2153f02..afafd78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -152,7 +152,6 @@ class FSDirAclOp {
     fsd.readLock();
     try {
       INodesInPath iip = fsd.resolvePath(pc, src);
-      src = iip.getPath();
       // There is no real inode for the path ending in ".snapshot", so return a
       // non-null, unpopulated AclStatus.  This is similar to getFileInfo.
       if (iip.isDotSnapshotDir() && fsd.getINode4DotSnapshot(iip) != null) {
@@ -163,8 +162,7 @@ class FSDirAclOp {
       }
       INode inode = FSDirectory.resolveLastINode(iip);
       int snapshotId = iip.getPathSnapshotId();
-      List<AclEntry> acl = AclStorage.readINodeAcl(fsd.getAttributes(src,
-              inode.getLocalNameBytes(), inode, snapshotId));
+      List<AclEntry> acl = AclStorage.readINodeAcl(fsd.getAttributes(iip));
       FsPermission fsPermission = inode.getFsPermission(snapshotId);
       return new AclStatus.Builder()
           .owner(inode.getUserName()).group(inode.getGroupName())

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0730aa5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 5072d68..4876fb1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -53,15 +53,12 @@ import static org.apache.hadoop.util.Time.now;
 class FSDirStatAndListingOp {
   static DirectoryListing getListingInt(FSDirectory fsd, final String srcArg,
       byte[] startAfter, boolean needLocation) throws IOException {
-    String src = null;
-
     final INodesInPath iip;
     if (fsd.isPermissionEnabled()) {
       FSPermissionChecker pc = fsd.getPermissionChecker();
       iip = fsd.resolvePath(pc, srcArg);
-      src = iip.getPath();
     } else {
-      src = FSDirectory.resolvePath(srcArg, fsd);
+      String src = FSDirectory.resolvePath(srcArg, fsd);
       iip = fsd.getINodesInPath(src, true);
     }
 
@@ -92,7 +89,7 @@ class FSDirStatAndListingOp {
       }
       isSuperUser = pc.isSuperUser();
     }
-    return getListing(fsd, iip, src, startAfter, needLocation, isSuperUser);
+    return getListing(fsd, iip, startAfter, needLocation, isSuperUser);
   }
 
   /**
@@ -161,7 +158,6 @@ class FSDirStatAndListingOp {
         "Negative length is not supported. File: " + src);
     CacheManager cm = fsd.getFSNamesystem().getCacheManager();
     BlockManager bm = fsd.getBlockManager();
-    boolean isReservedName = FSDirectory.isReservedRawName(src);
     fsd.readLock();
     try {
       final INodesInPath iip = fsd.resolvePath(pc, src);
@@ -184,7 +180,7 @@ class FSDirStatAndListingOp {
         isUc = false;
       }
 
-      final FileEncryptionInfo feInfo = isReservedName ? null
+      final FileEncryptionInfo feInfo = iip.isRaw() ? null
           : FSDirEncryptionZoneOp.getFileEncryptionInfo(fsd, inode,
           iip.getPathSnapshotId(), iip);
       final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.
@@ -225,42 +221,39 @@ class FSDirStatAndListingOp {
    * @param fsd FSDirectory
    * @param iip the INodesInPath instance containing all the INodes along the
    *            path
-   * @param src the directory name
    * @param startAfter the name to start listing after
    * @param needLocation if block locations are returned
+   * @param includeStoragePolicy if storage policy is returned
    * @return a partial listing starting after startAfter
    */
   private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip,
-      String src, byte[] startAfter, boolean needLocation, boolean isSuperUser)
+      byte[] startAfter, boolean needLocation, boolean includeStoragePolicy)
       throws IOException {
-    String srcs = FSDirectory.normalizePath(src);
-    if (FSDirectory.isExactReservedName(srcs)) {
+    if (FSDirectory.isExactReservedName(iip.getPathComponents())) {
       return getReservedListing(fsd);
     }
 
     fsd.readLock();
     try {
-      if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
-        return getSnapshotsListing(fsd, srcs, startAfter);
+      if (iip.isDotSnapshotDir()) {
+        return getSnapshotsListing(fsd, iip, startAfter);
       }
       final int snapshot = iip.getPathSnapshotId();
       final INode targetNode = iip.getLastINode();
-      if (targetNode == null)
+      if (targetNode == null) {
         return null;
-      byte parentStoragePolicy = isSuperUser ?
-          targetNode.getStoragePolicyID() : HdfsConstants
-          .BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
+      }
+
+      byte parentStoragePolicy = includeStoragePolicy
+          ? targetNode.getStoragePolicyID()
+          : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
 
       if (!targetNode.isDirectory()) {
         // return the file's status. note that the iip already includes the
         // target INode
-        INodeAttributes nodeAttrs = getINodeAttributes(
-            fsd, src, HdfsFileStatus.EMPTY_NAME, targetNode,
-            snapshot);
         return new DirectoryListing(
             new HdfsFileStatus[]{ createFileStatus(
-                fsd, HdfsFileStatus.EMPTY_NAME, nodeAttrs,
-                needLocation, parentStoragePolicy, iip)
+                fsd, iip, null, parentStoragePolicy, needLocation)
             }, 0);
       }
 
@@ -274,20 +267,15 @@ class FSDirStatAndListingOp {
       int listingCnt = 0;
       HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
       for (int i = 0; i < numOfListing && locationBudget > 0; i++) {
-        INode cur = contents.get(startChild+i);
-        byte curPolicy = isSuperUser && !cur.isSymlink()?
-            cur.getLocalStoragePolicyID():
-            HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
-        INodeAttributes nodeAttrs = getINodeAttributes(
-            fsd, src, cur.getLocalNameBytes(), cur,
-            snapshot);
-        final INodesInPath iipWithChild = INodesInPath.append(iip, cur,
-            cur.getLocalNameBytes());
-        listing[i] = createFileStatus(fsd, cur.getLocalNameBytes(), nodeAttrs,
-            needLocation, getStoragePolicyID(curPolicy, parentStoragePolicy),
-            iipWithChild);
+        INode child = contents.get(startChild+i);
+        byte childStoragePolicy = (includeStoragePolicy && !child.isSymlink())
+            ? getStoragePolicyID(child.getLocalStoragePolicyID(),
+                                 parentStoragePolicy)
+            : parentStoragePolicy;
+        listing[i] =
+            createFileStatus(fsd, iip, child, childStoragePolicy, needLocation);
         listingCnt++;
-        if (needLocation) {
+        if (listing[i] instanceof HdfsLocatedFileStatus) {
             // Once we  hit lsLimit locations, stop.
             // This helps to prevent excessively large response payloads.
             // Approximate #locations with locatedBlockCount() * repl_factor
@@ -312,17 +300,16 @@ class FSDirStatAndListingOp {
    * Get a listing of all the snapshots of a snapshottable directory
    */
   private static DirectoryListing getSnapshotsListing(
-      FSDirectory fsd, String src, byte[] startAfter)
+      FSDirectory fsd, INodesInPath iip, byte[] startAfter)
       throws IOException {
     Preconditions.checkState(fsd.hasReadLock());
-    Preconditions.checkArgument(
-        src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
-        "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
-
-    final String dirPath = FSDirectory.normalizePath(src.substring(0,
-        src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
-
-    final INode node = fsd.getINode(dirPath);
+    Preconditions.checkArgument(iip.isDotSnapshotDir(),
+        "%s does not end with %s",
+        iip.getPath(), HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
+    // drop off the null .snapshot component
+    iip = iip.getParentINodesInPath();
+    final String dirPath = iip.getPath();
+    final INode node = iip.getLastINode();
     final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
     final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
     if (sf == null) {
@@ -336,13 +323,8 @@ class FSDirStatAndListingOp {
     final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
     for (int i = 0; i < numOfListing; i++) {
       Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
-      INodeAttributes nodeAttrs = getINodeAttributes(
-          fsd, src, sRoot.getLocalNameBytes(),
-          node, Snapshot.CURRENT_STATE_ID);
-      listing[i] = createFileStatus(
-          fsd, sRoot.getLocalNameBytes(), nodeAttrs,
-          HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
-          INodesInPath.fromINode(sRoot));
+      listing[i] = createFileStatus(fsd, iip, sRoot,
+          HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, false);
     }
     return new DirectoryListing(
         listing, snapshots.size() - skipSize - numOfListing);
@@ -360,7 +342,6 @@ class FSDirStatAndListingOp {
   /** Get the file info for a specific file.
    * @param fsd FSDirectory
    * @param iip The path to the file, the file is included
-   * @param isRawPath true if a /.reserved/raw pathname was passed by the user
    * @param includeStoragePolicy whether to include storage policy
    * @return object containing information regarding the file
    *         or null if file not found
@@ -373,15 +354,10 @@ class FSDirStatAndListingOp {
       if (node == null) {
         return null;
       }
-
-      byte policyId = includeStoragePolicy && !node.isSymlink() ?
-          node.getStoragePolicyID() :
-          HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
-      INodeAttributes nodeAttrs = getINodeAttributes(fsd, iip.getPath(),
-                                                     HdfsFileStatus.EMPTY_NAME,
-                                                     node, iip.getPathSnapshotId());
-      return createFileStatus(fsd, HdfsFileStatus.EMPTY_NAME, nodeAttrs,
-                              policyId, iip);
+      byte policy = (includeStoragePolicy && !node.isSymlink())
+          ? node.getStoragePolicyID()
+          : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
+      return createFileStatus(fsd, iip, null, policy, false);
     } finally {
       fsd.readUnlock();
     }
@@ -408,48 +384,41 @@ class FSDirStatAndListingOp {
   }
 
   /**
-   * create an hdfs file status from an inode
+   * create a hdfs file status from an iip.
+   * @param fsd FSDirectory
+   * @param iip The INodesInPath containing the INodeFile and its ancestors
+   * @return HdfsFileStatus without locations or storage policy
+   */
+  static HdfsFileStatus createFileStatusForEditLog(
+      FSDirectory fsd, INodesInPath iip) throws IOException {
+    return createFileStatus(fsd, iip,
+        null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, false);
+  }
+
+  /**
+   * create a hdfs file status from an iip.
    *
    * @param fsd FSDirectory
-   * @param path the local name
+   * @param iip The INodesInPath containing the INodeFile and its ancestors.
+   * @param child for a directory listing of the iip, else null
+   * @param storagePolicy for the path or closest ancestor
    * @param needLocation if block locations need to be included or not
-   * @param isRawPath true if this is being called on behalf of a path in
-   *                  /.reserved/raw
-   * @param iip the INodesInPath containing the target INode and its ancestors
+   * @param includeStoragePolicy if storage policy should be returned
    * @return a file status
    * @throws java.io.IOException if any error occurs
    */
   private static HdfsFileStatus createFileStatus(
-      FSDirectory fsd, byte[] path, INodeAttributes nodeAttrs,
-      boolean needLocation, byte storagePolicy, INodesInPath iip)
-      throws IOException {
-    if (needLocation) {
-      return createLocatedFileStatus(fsd, path, nodeAttrs, storagePolicy, iip);
-    } else {
-      return createFileStatus(fsd, path, nodeAttrs, storagePolicy, iip);
+      FSDirectory fsd, INodesInPath iip, INode child, byte storagePolicy,
+      boolean needLocation) throws IOException {
+    assert fsd.hasReadLock();
+    // only directory listing sets the status name.
+    byte[] name = HdfsFileStatus.EMPTY_NAME;
+    if (child != null) {
+      name = child.getLocalNameBytes();
+      // have to do this for EC and EZ lookups...
+      iip = INodesInPath.append(iip, child, name);
     }
-  }
 
-  /**
-   * Create FileStatus for an given INodeFile.
-   * @param iip The INodesInPath containing the INodeFile and its ancestors
-   */
-  static HdfsFileStatus createFileStatusForEditLog(
-      FSDirectory fsd, String fullPath, byte[] path,
-      byte storagePolicy, int snapshot, boolean isRawPath,
-      INodesInPath iip) throws IOException {
-    INodeAttributes nodeAttrs = getINodeAttributes(
-        fsd, fullPath, path, iip.getLastINode(), snapshot);
-    return createFileStatus(fsd, path, nodeAttrs, storagePolicy, iip);
-  }
-
-  /**
-   * create file status for a given INode
-   * @param iip the INodesInPath containing the target INode and its ancestors
-   */
-  static HdfsFileStatus createFileStatus(
-      FSDirectory fsd, byte[] path, INodeAttributes nodeAttrs,
-      byte storagePolicy, INodesInPath iip) throws IOException {
     long size = 0;     // length is zero for directories
     short replication = 0;
     long blocksize = 0;
@@ -457,6 +426,7 @@ class FSDirStatAndListingOp {
     final INode node = iip.getLastINode();
     final int snapshot = iip.getPathSnapshotId();
     final boolean isRawPath = iip.isRaw();
+    LocatedBlocks loc = null;
 
     final FileEncryptionInfo feInfo = isRawPath ? null : FSDirEncryptionZoneOp
         .getFileEncryptionInfo(fsd, node, snapshot, iip);
@@ -471,6 +441,18 @@ class FSDirStatAndListingOp {
       blocksize = fileNode.getPreferredBlockSize();
       isEncrypted = (feInfo != null)
           || (isRawPath && FSDirEncryptionZoneOp.isInAnEZ(fsd, iip));
+      if (needLocation) {
+        final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID;
+        final boolean isUc = !inSnapshot && fileNode.isUnderConstruction();
+        final long fileSize = !inSnapshot && isUc
+            ? fileNode.computeFileSizeNotIncludingLastUcBlock() : size;
+        loc = fsd.getBlockManager().createLocatedBlocks(
+            fileNode.getBlocks(snapshot), fileSize, isUc, 0L, size, false,
+            inSnapshot, feInfo, ecPolicy);
+        if (loc == null) {
+          loc = new LocatedBlocks();
+        }
+      }
     } else {
       isEncrypted = FSDirEncryptionZoneOp.isInAnEZ(fsd, iip);
     }
@@ -478,7 +460,8 @@ class FSDirStatAndListingOp {
     int childrenNum = node.isDirectory() ?
         node.asDirectory().getChildrenNum(snapshot) : 0;
 
-    return new HdfsFileStatus(
+    INodeAttributes nodeAttrs = fsd.getAttributes(iip);
+    HdfsFileStatus status = createFileStatus(
         size,
         node.isDirectory(),
         replication,
@@ -489,73 +472,13 @@ class FSDirStatAndListingOp {
         nodeAttrs.getUserName(),
         nodeAttrs.getGroupName(),
         node.isSymlink() ? node.asSymlink().getSymlink() : null,
-        path,
+        name,
         node.getId(),
         childrenNum,
         feInfo,
         storagePolicy,
-        ecPolicy);
-  }
-
-  private static INodeAttributes getINodeAttributes(
-      FSDirectory fsd, String fullPath, byte[] path, INode node, int snapshot) {
-    return fsd.getAttributes(fullPath, path, node, snapshot);
-  }
-
-  /**
-   * Create FileStatus with location info by file INode
-   * @param iip the INodesInPath containing the target INode and its ancestors
-   */
-  private static HdfsFileStatus createLocatedFileStatus(
-      FSDirectory fsd, byte[] path, INodeAttributes nodeAttrs,
-      byte storagePolicy, INodesInPath iip) throws IOException {
-    assert fsd.hasReadLock();
-    long size = 0; // length is zero for directories
-    short replication = 0;
-    long blocksize = 0;
-    LocatedBlocks loc = null;
-    final boolean isEncrypted;
-    final INode node = iip.getLastINode();
-    final int snapshot = iip.getPathSnapshotId();
-    final boolean isRawPath = iip.isRaw();
-
-    final FileEncryptionInfo feInfo = isRawPath ? null : FSDirEncryptionZoneOp
-        .getFileEncryptionInfo(fsd, node, snapshot, iip);
-    final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.getErasureCodingPolicy(
-        fsd.getFSNamesystem(), iip);
-    if (node.isFile()) {
-      final INodeFile fileNode = node.asFile();
-      size = fileNode.computeFileSize(snapshot);
-      replication = fileNode.getFileReplication(snapshot);
-      blocksize = fileNode.getPreferredBlockSize();
-
-      final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID;
-      final boolean isUc = !inSnapshot && fileNode.isUnderConstruction();
-      final long fileSize = !inSnapshot && isUc ?
-          fileNode.computeFileSizeNotIncludingLastUcBlock() : size;
-
-      loc = fsd.getBlockManager().createLocatedBlocks(
-          fileNode.getBlocks(snapshot), fileSize, isUc, 0L, size, false,
-          inSnapshot, feInfo, ecPolicy);
-      if (loc == null) {
-        loc = new LocatedBlocks();
-      }
-      isEncrypted = (feInfo != null)
-          || (isRawPath && FSDirEncryptionZoneOp.isInAnEZ(fsd, iip));
-    } else {
-      isEncrypted = FSDirEncryptionZoneOp.isInAnEZ(fsd, iip);
-    }
-    int childrenNum = node.isDirectory() ?
-        node.asDirectory().getChildrenNum(snapshot) : 0;
-
-    HdfsLocatedFileStatus status =
-        new HdfsLocatedFileStatus(size, node.isDirectory(), replication,
-          blocksize, node.getModificationTime(snapshot),
-          node.getAccessTime(snapshot),
-          getPermissionForFileStatus(nodeAttrs, isEncrypted),
-          nodeAttrs.getUserName(), nodeAttrs.getGroupName(),
-          node.isSymlink() ? node.asSymlink().getSymlink() : null, path,
-          node.getId(), loc, childrenNum, feInfo, storagePolicy, ecPolicy);
+        ecPolicy,
+        loc);
     // Set caching information for the located blocks.
     if (loc != null) {
       CacheManager cacheManager = fsd.getFSNamesystem().getCacheManager();
@@ -566,6 +489,23 @@ class FSDirStatAndListingOp {
     return status;
   }
 
+  private static HdfsFileStatus createFileStatus(long length, boolean isdir,
+      int replication, long blocksize, long mtime,
+      long atime, FsPermission permission, String owner, String group,
+      byte[] symlink, byte[] path, long fileId, int childrenNum,
+      FileEncryptionInfo feInfo, byte storagePolicy,
+      ErasureCodingPolicy ecPolicy, LocatedBlocks locations) {
+    if (locations == null) {
+      return new HdfsFileStatus(length, isdir, replication, blocksize,
+          mtime, atime, permission, owner, group, symlink, path, fileId,
+          childrenNum, feInfo, storagePolicy, ecPolicy);
+    } else {
+      return new HdfsLocatedFileStatus(length, isdir, replication, blocksize,
+          mtime, atime, permission, owner, group, symlink, path, fileId,
+          locations, childrenNum, feInfo, storagePolicy, ecPolicy);
+    }
+  }
+
   /**
    * Returns an inode's FsPermission for use in an outbound FileStatus.  If the
    * inode has an ACL or is for an encrypted file/dir, then this method will

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0730aa5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 746fdb7..08016c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -429,11 +429,7 @@ class FSDirXAttrOp {
       throws IOException {
     fsd.readLock();
     try {
-      String src = iip.getPath();
-      INode inode = FSDirectory.resolveLastINode(iip);
-      int snapshotId = iip.getPathSnapshotId();
-      return XAttrStorage.readINodeXAttrs(fsd.getAttributes(src,
-              inode.getLocalNameBytes(), inode, snapshotId));
+      return XAttrStorage.readINodeXAttrs(fsd.getAttributes(iip));
     } finally {
       fsd.readUnlock();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0730aa5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 2c7a268..7db2106 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -1807,14 +1807,19 @@ public class FSDirectory implements Closeable {
     inodeId.setCurrentValue(newValue);
   }
 
-  INodeAttributes getAttributes(String fullPath, byte[] path,
-      INode node, int snapshot) {
+  INodeAttributes getAttributes(INodesInPath iip)
+      throws FileNotFoundException {
+    INode node = FSDirectory.resolveLastINode(iip);
+    int snapshot = iip.getPathSnapshotId();
     INodeAttributes nodeAttrs = node.getSnapshotINode(snapshot);
     if (attributeProvider != null) {
-      fullPath = fullPath
-          + (fullPath.endsWith(Path.SEPARATOR) ? "" : Path.SEPARATOR)
-          + DFSUtil.bytes2String(path);
-      nodeAttrs = attributeProvider.getAttributes(fullPath, nodeAttrs);
+      // permission checking sends the full components array including the
+      // first empty component for the root.  however file status
+      // related calls are expected to strip out the root component according
+      // to TestINodeAttributeProvider.
+      byte[][] components = iip.getPathComponents();
+      components = Arrays.copyOfRange(components, 1, components.length);
+      nodeAttrs = attributeProvider.getAttributes(components, nodeAttrs);
     }
     return nodeAttrs;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0730aa5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 25f5a4f..ee6206a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -383,10 +383,8 @@ public class FSEditLogLoader {
 
         // add the op into retry cache if necessary
         if (toAddRetryCache) {
-          HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog(
-              fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME,
-              HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
-              false, iip);
+          HdfsFileStatus stat =
+              FSDirStatAndListingOp.createFileStatusForEditLog(fsDir, iip);
           fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId,
               addCloseOp.rpcCallId, stat);
         }
@@ -402,10 +400,8 @@ public class FSEditLogLoader {
               false);
           // add the op into retry cache if necessary
           if (toAddRetryCache) {
-            HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog(
-                fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME,
-                HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
-                Snapshot.CURRENT_STATE_ID, false, iip);
+            HdfsFileStatus stat =
+                FSDirStatAndListingOp.createFileStatusForEditLog(fsDir, iip);
             fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId,
                 addCloseOp.rpcCallId, new LastBlockWithStatus(lb, stat));
           }
@@ -480,10 +476,8 @@ public class FSEditLogLoader {
             false, false);
         // add the op into retry cache if necessary
         if (toAddRetryCache) {
-          HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog(
-              fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME,
-              HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
-              Snapshot.CURRENT_STATE_ID, false, iip);
+          HdfsFileStatus stat =
+              FSDirStatAndListingOp.createFileStatusForEditLog(fsDir, iip);
           fsNamesys.addCacheEntryWithPayload(appendOp.rpcClientId,
               appendOp.rpcCallId, new LastBlockWithStatus(lb, stat));
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0730aa5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java
index 2e0775b..2f9bc37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java
@@ -17,13 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-
-import com.google.common.annotations.VisibleForTesting;
-
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -87,7 +80,7 @@ public abstract class INodeAttributeProvider {
    */
   public abstract void stop();
 
-  @VisibleForTesting
+  @Deprecated
   String[] getPathElements(String path) {
     path = path.trim();
     if (path.charAt(0) != Path.SEPARATOR_CHAR) {
@@ -115,6 +108,7 @@ public abstract class INodeAttributeProvider {
     return pathElements;
   }
 
+  @Deprecated
   public INodeAttributes getAttributes(String fullPath, INodeAttributes inode) {
     return getAttributes(getPathElements(fullPath), inode);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0730aa5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index 8f65ff8..04d3bda 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -399,7 +399,7 @@ public class INodesInPath {
    */
   private INodesInPath getAncestorINodesInPath(int length) {
     Preconditions.checkArgument(length >= 0 && length < inodes.length);
-    Preconditions.checkState(!isSnapshot());
+    Preconditions.checkState(isDotSnapshotDir() || !isSnapshot());
     final INode[] anodes = new INode[length];
     final byte[][] apath = new byte[length][];
     System.arraycopy(this.inodes, 0, anodes, 0, length);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0730aa5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
index 24ec1a2..3a318bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
@@ -166,6 +166,9 @@ public class TestSnapshotPathINodes {
     assertEquals(sub1.toString(), nodesInPath.getPath(2));
     assertEquals(file1.toString(), nodesInPath.getPath(3));
 
+    assertEquals(file1.getParent().toString(),
+                 nodesInPath.getParentINodesInPath().getPath());
+
     nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
     assertEquals(nodesInPath.length(), components.length);
     assertSnapshot(nodesInPath, false, null, -1);
@@ -212,6 +215,9 @@ public class TestSnapshotPathINodes {
     // The number of INodes returned should still be components.length
     // since we put a null in the inode array for ".snapshot"
     assertEquals(nodesInPath.length(), components.length);
+    // ensure parent inodes can strip the .snapshot
+    assertEquals(sub1.toString(),
+        nodesInPath.getParentINodesInPath().getPath());
 
     // No SnapshotRoot dir is included in the resolved inodes  
     assertSnapshot(nodesInPath, true, snapshot, -1);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/57] [abbrv] hadoop git commit: HDFS-10896. Move lock logging logic from FSNamesystem into FSNamesystemLock. Contributed by Erik Krogen.

Posted by in...@apache.org.
HDFS-10896. Move lock logging logic from FSNamesystem into FSNamesystemLock. Contributed by Erik Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/434c5ea7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/434c5ea7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/434c5ea7

Branch: refs/heads/HDFS-10467
Commit: 434c5ea75dc3d87513e49290ac9999148ff5163c
Parents: 57aec2b
Author: Zhe Zhang <zh...@apache.org>
Authored: Fri Sep 30 13:15:59 2016 -0700
Committer: Zhe Zhang <zh...@apache.org>
Committed: Fri Sep 30 13:16:08 2016 -0700

----------------------------------------------------------------------
 .../hdfs/server/namenode/FSNamesystem.java      | 147 +--------
 .../hdfs/server/namenode/FSNamesystemLock.java  | 187 ++++++++++-
 .../hdfs/server/namenode/TestFSNamesystem.java  | 292 -----------------
 .../server/namenode/TestFSNamesystemLock.java   | 317 +++++++++++++++++++
 4 files changed, 496 insertions(+), 447 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/434c5ea7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 4700263..1721b2c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -71,12 +71,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_DEFAULT;
@@ -129,8 +123,6 @@ import java.util.TreeMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -284,7 +276,6 @@ import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Timer;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.log4j.Appender;
 import org.apache.log4j.AsyncAppender;
@@ -713,12 +704,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       LOG.info("Enabling async auditlog");
       enableAsyncAuditLog();
     }
-    boolean fair = conf.getBoolean("dfs.namenode.fslock.fair", true);
-    LOG.info("fsLock is fair:" + fair);
-    fsLock = new FSNamesystemLock(fair);
-    cond = fsLock.writeLock().newCondition();
+    fsLock = new FSNamesystemLock(conf);
+    cond = fsLock.newWriteLockCondition();
     cpLock = new ReentrantLock();
-    setTimer(new Timer());
 
     this.fsImage = fsImage;
     try {
@@ -827,17 +815,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
           DFS_NAMENODE_MAX_LOCK_HOLD_TO_RELEASE_LEASE_MS_KEY,
           DFS_NAMENODE_MAX_LOCK_HOLD_TO_RELEASE_LEASE_MS_DEFAULT);
 
-      this.writeLockReportingThreshold = conf.getLong(
-          DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY,
-          DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT);
-      this.readLockReportingThreshold = conf.getLong(
-          DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY,
-          DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT);
-
-      this.lockSuppressWarningInterval = conf.getTimeDuration(
-          DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY,
-          DFS_LOCK_SUPPRESS_WARNING_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
-
       // For testing purposes, allow the DT secret manager to be started regardless
       // of whether security is enabled.
       alwaysUseDelegationTokensForTests = conf.getBoolean(
@@ -1516,131 +1493,25 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return Util.stringCollectionAsURIs(dirNames);
   }
 
-  private final long lockSuppressWarningInterval;
-  /** Threshold (ms) for long holding write lock report. */
-  private final long writeLockReportingThreshold;
-  private int numWriteLockWarningsSuppressed = 0;
-  private long timeStampOfLastWriteLockReport = 0;
-  private long longestWriteLockHeldInterval = 0;
-  /** Last time stamp for write lock. Keep the longest one for multi-entrance.*/
-  private long writeLockHeldTimeStamp;
-  /** Threshold (ms) for long holding read lock report. */
-  private long readLockReportingThreshold;
-  private AtomicInteger numReadLockWarningsSuppressed = new AtomicInteger(0);
-  private AtomicLong timeStampOfLastReadLockReport = new AtomicLong(0);
-  private AtomicLong longestReadLockHeldInterval = new AtomicLong(0);
-  private Timer timer;
-  /**
-   * Last time stamp for read lock. Keep the longest one for
-   * multi-entrance. This is ThreadLocal since there could be
-   * many read locks held simultaneously.
-   */
-  private static ThreadLocal<Long> readLockHeldTimeStamp =
-      new ThreadLocal<Long>() {
-        @Override
-        public Long initialValue() {
-          return Long.MAX_VALUE;
-        }
-      };
-
   @Override
   public void readLock() {
-    this.fsLock.readLock().lock();
-    if (this.fsLock.getReadHoldCount() == 1) {
-      readLockHeldTimeStamp.set(timer.monotonicNow());
-    }
+    this.fsLock.readLock();
   }
   @Override
   public void readUnlock() {
-    final boolean needReport = this.fsLock.getReadHoldCount() == 1;
-    final long readLockInterval = timer.monotonicNow() -
-        readLockHeldTimeStamp.get();
-    if (needReport) {
-      readLockHeldTimeStamp.remove();
-    }
-
-    this.fsLock.readLock().unlock();
-
-    if (needReport && readLockInterval >= this.readLockReportingThreshold) {
-      long localLongestReadLock;
-      do {
-        localLongestReadLock = longestReadLockHeldInterval.get();
-      } while (localLongestReadLock - readLockInterval < 0
-          && !longestReadLockHeldInterval.compareAndSet(localLongestReadLock,
-                                                        readLockInterval));
-
-      long localTimeStampOfLastReadLockReport;
-      long now;
-      do {
-        now = timer.monotonicNow();
-        localTimeStampOfLastReadLockReport = timeStampOfLastReadLockReport
-            .get();
-        if (now - localTimeStampOfLastReadLockReport <
-            lockSuppressWarningInterval) {
-          numReadLockWarningsSuppressed.incrementAndGet();
-          return;
-        }
-      } while (!timeStampOfLastReadLockReport.compareAndSet(
-          localTimeStampOfLastReadLockReport, now));
-      int numSuppressedWarnings = numReadLockWarningsSuppressed.getAndSet(0);
-      long longestLockHeldInterval = longestReadLockHeldInterval.getAndSet(0);
-      LOG.info("FSNamesystem read lock held for " + readLockInterval +
-          " ms via\n" + StringUtils.getStackTrace(Thread.currentThread()) +
-          "\tNumber of suppressed read-lock reports: " +
-          numSuppressedWarnings + "\n\tLongest read-lock held interval: " +
-          longestLockHeldInterval);
-    }
+    this.fsLock.readUnlock();
   }
   @Override
   public void writeLock() {
-    this.fsLock.writeLock().lock();
-    if (fsLock.getWriteHoldCount() == 1) {
-      writeLockHeldTimeStamp = timer.monotonicNow();
-    }
+    this.fsLock.writeLock();
   }
   @Override
   public void writeLockInterruptibly() throws InterruptedException {
-    this.fsLock.writeLock().lockInterruptibly();
-    if (fsLock.getWriteHoldCount() == 1) {
-      writeLockHeldTimeStamp = timer.monotonicNow();
-    }
+    this.fsLock.writeLockInterruptibly();
   }
   @Override
   public void writeUnlock() {
-    final boolean needReport = fsLock.getWriteHoldCount() == 1 &&
-        fsLock.isWriteLockedByCurrentThread();
-    final long currentTime = timer.monotonicNow();
-    final long writeLockInterval = currentTime - writeLockHeldTimeStamp;
-
-    boolean logReport = false;
-    int numSuppressedWarnings = 0;
-    long longestLockHeldInterval = 0;
-    if (needReport && writeLockInterval >= this.writeLockReportingThreshold) {
-      if (writeLockInterval > longestWriteLockHeldInterval) {
-        longestWriteLockHeldInterval = writeLockInterval;
-      }
-      if (currentTime - timeStampOfLastWriteLockReport > this
-          .lockSuppressWarningInterval) {
-        logReport = true;
-        numSuppressedWarnings = numWriteLockWarningsSuppressed;
-        numWriteLockWarningsSuppressed = 0;
-        longestLockHeldInterval = longestWriteLockHeldInterval;
-        longestWriteLockHeldInterval = 0;
-        timeStampOfLastWriteLockReport = currentTime;
-      } else {
-        numWriteLockWarningsSuppressed++;
-      }
-    }
-
-    this.fsLock.writeLock().unlock();
-
-    if (logReport) {
-      LOG.info("FSNamesystem write lock held for " + writeLockInterval +
-          " ms via\n" + StringUtils.getStackTrace(Thread.currentThread()) +
-          "\tNumber of suppressed write-lock reports: " +
-          numSuppressedWarnings + "\n\tLongest write-lock held interval: " +
-              longestLockHeldInterval);
-    }
+    this.fsLock.writeUnlock();
   }
   @Override
   public boolean hasWriteLock() {
@@ -7173,9 +7044,5 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         .size();
   }
 
-  @VisibleForTesting
-  void setTimer(Timer newTimer) {
-    this.timer = newTimer;
-  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/434c5ea7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
index d239796..043f569 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
@@ -19,33 +19,186 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Timer;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY;
 
 /**
- * Mimics a ReentrantReadWriteLock so more sophisticated locking capabilities
- * are possible.
+ * Mimics a ReentrantReadWriteLock but does not directly implement the interface
+ * so more sophisticated locking capabilities and logging/metrics are possible.
  */
-class FSNamesystemLock implements ReadWriteLock {
+class FSNamesystemLock {
   @VisibleForTesting
   protected ReentrantReadWriteLock coarseLock;
-  
-  FSNamesystemLock(boolean fair) {
+
+  private final Timer timer;
+
+  /**
+   * Log statements about long lock hold times will not be produced more
+   * frequently than this interval.
+   */
+  private final long lockSuppressWarningInterval;
+
+  /** Threshold (ms) for long holding write lock report. */
+  private final long writeLockReportingThreshold;
+  /** Last time stamp for write lock. Keep the longest one for multi-entrance.*/
+  private long writeLockHeldTimeStamp;
+  private int numWriteLockWarningsSuppressed = 0;
+  private long timeStampOfLastWriteLockReport = 0;
+  private long longestWriteLockHeldInterval = 0;
+
+  /** Threshold (ms) for long holding read lock report. */
+  private final long readLockReportingThreshold;
+  /**
+   * Last time stamp for read lock. Keep the longest one for
+   * multi-entrance. This is ThreadLocal since there could be
+   * many read locks held simultaneously.
+   */
+  private final ThreadLocal<Long> readLockHeldTimeStamp =
+      new ThreadLocal<Long>() {
+        @Override
+        public Long initialValue() {
+          return Long.MAX_VALUE;
+        }
+      };
+  private final AtomicInteger numReadLockWarningsSuppressed =
+      new AtomicInteger(0);
+  private final AtomicLong timeStampOfLastReadLockReport = new AtomicLong(0);
+  private final AtomicLong longestReadLockHeldInterval = new AtomicLong(0);
+
+  FSNamesystemLock(Configuration conf) {
+    this(conf, new Timer());
+  }
+
+  @VisibleForTesting
+  FSNamesystemLock(Configuration conf, Timer timer) {
+    boolean fair = conf.getBoolean("dfs.namenode.fslock.fair", true);
+    FSNamesystem.LOG.info("fsLock is fair: " + fair);
     this.coarseLock = new ReentrantReadWriteLock(fair);
+    this.timer = timer;
+
+    this.writeLockReportingThreshold = conf.getLong(
+        DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY,
+        DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT);
+    this.readLockReportingThreshold = conf.getLong(
+        DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY,
+        DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT);
+    this.lockSuppressWarningInterval = conf.getTimeDuration(
+        DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY,
+        DFS_LOCK_SUPPRESS_WARNING_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
   }
-  
-  @Override
-  public Lock readLock() {
-    return coarseLock.readLock();
+
+  public void readLock() {
+    coarseLock.readLock().lock();
+    if (coarseLock.getReadHoldCount() == 1) {
+      readLockHeldTimeStamp.set(timer.monotonicNow());
+    }
+  }
+
+  public void readUnlock() {
+    final boolean needReport = coarseLock.getReadHoldCount() == 1;
+    final long readLockInterval =
+        timer.monotonicNow() - readLockHeldTimeStamp.get();
+    coarseLock.readLock().unlock();
+
+    if (needReport) {
+      readLockHeldTimeStamp.remove();
+    }
+    if (needReport && readLockInterval >= this.readLockReportingThreshold) {
+      long localLongestReadLock;
+      do {
+        localLongestReadLock = longestReadLockHeldInterval.get();
+      } while (localLongestReadLock - readLockInterval < 0 &&
+          !longestReadLockHeldInterval.compareAndSet(localLongestReadLock,
+              readLockInterval));
+
+      long localTimeStampOfLastReadLockReport;
+      long now;
+      do {
+        now = timer.monotonicNow();
+        localTimeStampOfLastReadLockReport =
+            timeStampOfLastReadLockReport.get();
+        if (now - localTimeStampOfLastReadLockReport <
+            lockSuppressWarningInterval) {
+          numReadLockWarningsSuppressed.incrementAndGet();
+          return;
+        }
+      } while (!timeStampOfLastReadLockReport.compareAndSet(
+          localTimeStampOfLastReadLockReport, now));
+      int numSuppressedWarnings = numReadLockWarningsSuppressed.getAndSet(0);
+      long longestLockHeldInterval = longestReadLockHeldInterval.getAndSet(0);
+      FSNamesystem.LOG.info("FSNamesystem read lock held for " +
+          readLockInterval + " ms via\n" +
+          StringUtils.getStackTrace(Thread.currentThread()) +
+          "\tNumber of suppressed read-lock reports: " + numSuppressedWarnings +
+          "\n\tLongest read-lock held interval: " + longestLockHeldInterval);
+    }
   }
   
-  @Override
-  public Lock writeLock() {
-    return coarseLock.writeLock();
+  public void writeLock() {
+    coarseLock.writeLock().lock();
+    if (coarseLock.getWriteHoldCount() == 1) {
+      writeLockHeldTimeStamp = timer.monotonicNow();
+    }
+  }
+
+  public void writeLockInterruptibly() throws InterruptedException {
+    coarseLock.writeLock().lockInterruptibly();
+    if (coarseLock.getWriteHoldCount() == 1) {
+      writeLockHeldTimeStamp = timer.monotonicNow();
+    }
+  }
+
+  public void writeUnlock() {
+    final boolean needReport = coarseLock.getWriteHoldCount() == 1 &&
+        coarseLock.isWriteLockedByCurrentThread();
+    final long currentTime = timer.monotonicNow();
+    final long writeLockInterval = currentTime - writeLockHeldTimeStamp;
+
+    boolean logReport = false;
+    int numSuppressedWarnings = 0;
+    long longestLockHeldInterval = 0;
+    if (needReport && writeLockInterval >= this.writeLockReportingThreshold) {
+      if (writeLockInterval > longestWriteLockHeldInterval) {
+        longestWriteLockHeldInterval = writeLockInterval;
+      }
+      if (currentTime - timeStampOfLastWriteLockReport >
+          this.lockSuppressWarningInterval) {
+        logReport = true;
+        numSuppressedWarnings = numWriteLockWarningsSuppressed;
+        numWriteLockWarningsSuppressed = 0;
+        longestLockHeldInterval = longestWriteLockHeldInterval;
+        longestWriteLockHeldInterval = 0;
+        timeStampOfLastWriteLockReport = currentTime;
+      } else {
+        numWriteLockWarningsSuppressed++;
+      }
+    }
+
+    coarseLock.writeLock().unlock();
+
+    if (logReport) {
+      FSNamesystem.LOG.info("FSNamesystem write lock held for " +
+          writeLockInterval + " ms via\n" +
+          StringUtils.getStackTrace(Thread.currentThread()) +
+          "\tNumber of suppressed write-lock reports: " +
+          numSuppressedWarnings + "\n\tLongest write-lock held interval: " +
+          longestLockHeldInterval);
+    }
   }
 
   public int getReadHoldCount() {
@@ -60,6 +213,10 @@ class FSNamesystemLock implements ReadWriteLock {
     return coarseLock.isWriteLockedByCurrentThread();
   }
 
+  public Condition newWriteLockCondition() {
+    return coarseLock.writeLock().newCondition();
+  }
+
   /**
    * Returns the QueueLength of waiting threads.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/434c5ea7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
index 47d549b..f02c679 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
-import org.apache.hadoop.util.FakeTimer;
 import static org.hamcrest.CoreMatchers.either;
 import static org.hamcrest.CoreMatchers.instanceOf;
 import static org.junit.Assert.*;
@@ -31,7 +30,6 @@ import java.net.InetAddress;
 import java.net.URI;
 import java.util.Collection;
 
-import com.google.common.base.Supplier;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
@@ -45,22 +43,12 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.apache.log4j.Level;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.internal.util.reflection.Whitebox;
 
 import java.util.List;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.regex.Pattern;
 
 public class TestFSNamesystem {
 
@@ -165,59 +153,6 @@ public class TestFSNamesystem {
     assertTrue("Replication queues weren't being populated after entering "
       + "safemode 2nd time", bm.isPopulatingReplQueues());
   }
-  
-  @Test
-  public void testFsLockFairness() throws IOException, InterruptedException{
-    Configuration conf = new Configuration();
-
-    FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
-    FSImage fsImage = Mockito.mock(FSImage.class);
-    Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
-
-    conf.setBoolean("dfs.namenode.fslock.fair", true);
-    FSNamesystem fsNamesystem = new FSNamesystem(conf, fsImage);
-    assertTrue(fsNamesystem.getFsLockForTests().isFair());
-    
-    conf.setBoolean("dfs.namenode.fslock.fair", false);
-    fsNamesystem = new FSNamesystem(conf, fsImage);
-    assertFalse(fsNamesystem.getFsLockForTests().isFair());
-  }  
-  
-  @Test
-  public void testFSNamesystemLockCompatibility() {
-    FSNamesystemLock rwLock = new FSNamesystemLock(true);
-
-    assertEquals(0, rwLock.getReadHoldCount());
-    rwLock.readLock().lock();
-    assertEquals(1, rwLock.getReadHoldCount());
-
-    rwLock.readLock().lock();
-    assertEquals(2, rwLock.getReadHoldCount());
-
-    rwLock.readLock().unlock();
-    assertEquals(1, rwLock.getReadHoldCount());
-
-    rwLock.readLock().unlock();
-    assertEquals(0, rwLock.getReadHoldCount());
-
-    assertFalse(rwLock.isWriteLockedByCurrentThread());
-    assertEquals(0, rwLock.getWriteHoldCount());
-    rwLock.writeLock().lock();
-    assertTrue(rwLock.isWriteLockedByCurrentThread());
-    assertEquals(1, rwLock.getWriteHoldCount());
-    
-    rwLock.writeLock().lock();
-    assertTrue(rwLock.isWriteLockedByCurrentThread());
-    assertEquals(2, rwLock.getWriteHoldCount());
-
-    rwLock.writeLock().unlock();
-    assertTrue(rwLock.isWriteLockedByCurrentThread());
-    assertEquals(1, rwLock.getWriteHoldCount());
-
-    rwLock.writeLock().unlock();
-    assertFalse(rwLock.isWriteLockedByCurrentThread());
-    assertEquals(0, rwLock.getWriteHoldCount());
-  }
 
   @Test
   public void testReset() throws Exception {
@@ -258,233 +193,6 @@ public class TestFSNamesystem {
   }
 
   @Test
-  public void testFSLockGetWaiterCount() throws InterruptedException {
-    final int threadCount = 3;
-    final CountDownLatch latch = new CountDownLatch(threadCount);
-    final FSNamesystemLock rwLock = new FSNamesystemLock(true);
-    rwLock.writeLock().lock();
-    ExecutorService helper = Executors.newFixedThreadPool(threadCount);
-
-    for (int x = 0; x < threadCount; x++) {
-      helper.execute(new Runnable() {
-        @Override
-        public void run() {
-          latch.countDown();
-          rwLock.readLock().lock();
-        }
-      });
-    }
-
-    latch.await();
-    try {
-      GenericTestUtils.waitFor(new Supplier<Boolean>() {
-        @Override
-        public Boolean get() {
-          return (threadCount == rwLock.getQueueLength());
-        }
-      }, 10, 1000);
-    } catch (TimeoutException e) {
-      fail("Expected number of blocked thread not found");
-    }
-  }
-
-  /**
-   * Test when FSNamesystem write lock is held for a long time,
-   * logger will report it.
-   */
-  @Test(timeout=45000)
-  public void testFSWriteLockLongHoldingReport() throws Exception {
-    final long writeLockReportingThreshold = 100L;
-    final long writeLockSuppressWarningInterval = 10000L;
-    Configuration conf = new Configuration();
-    conf.setLong(DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY,
-        writeLockReportingThreshold);
-    conf.setTimeDuration(DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY,
-        writeLockSuppressWarningInterval, TimeUnit.MILLISECONDS);
-    FSImage fsImage = Mockito.mock(FSImage.class);
-    FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
-    Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
-    FSNamesystem fsn = new FSNamesystem(conf, fsImage);
-
-    FakeTimer timer = new FakeTimer();
-    fsn.setTimer(timer);
-    timer.advance(writeLockSuppressWarningInterval);
-
-    LogCapturer logs = LogCapturer.captureLogs(FSNamesystem.LOG);
-    GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.INFO);
-
-    // Don't report if the write lock is held for a short time
-    fsn.writeLock();
-    fsn.writeUnlock();
-    assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()));
-
-    // Report the first write lock warning if it is held for a long time
-    fsn.writeLock();
-    timer.advance(writeLockReportingThreshold + 10);
-    logs.clearOutput();
-    fsn.writeUnlock();
-    assertTrue(logs.getOutput().contains(GenericTestUtils.getMethodName()));
-
-    // Track but do not Report if the write lock is held (interruptibly) for
-    // a long time but time since last report does not exceed the suppress
-    // warning interval
-    fsn.writeLockInterruptibly();
-    timer.advance(writeLockReportingThreshold + 10);
-    logs.clearOutput();
-    fsn.writeUnlock();
-    assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()));
-
-    // Track but do not Report if it's held for a long time when re-entering
-    // write lock but time since last report does not exceed the suppress
-    // warning interval
-    fsn.writeLock();
-    timer.advance(writeLockReportingThreshold/ 2 + 1);
-    fsn.writeLockInterruptibly();
-    timer.advance(writeLockReportingThreshold/ 2 + 1);
-    fsn.writeLock();
-    timer.advance(writeLockReportingThreshold/ 2);
-    logs.clearOutput();
-    fsn.writeUnlock();
-    assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()));
-    logs.clearOutput();
-    fsn.writeUnlock();
-    assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()));
-    logs.clearOutput();
-    fsn.writeUnlock();
-    assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()));
-
-    // Report if it's held for a long time and time since last report exceeds
-    // the supress warning interval
-    timer.advance(writeLockSuppressWarningInterval);
-    fsn.writeLock();
-    timer.advance(writeLockReportingThreshold + 100);
-    logs.clearOutput();
-    fsn.writeUnlock();
-    assertTrue(logs.getOutput().contains(GenericTestUtils.getMethodName()));
-    assertTrue(logs.getOutput().contains("Number of suppressed write-lock " +
-        "reports: 2"));
-  }
-
-  /**
-   * Test when FSNamesystem read lock is held for a long time,
-   * logger will report it.
-   */
-  @Test(timeout=45000)
-  public void testFSReadLockLongHoldingReport() throws Exception {
-    final long readLockReportingThreshold = 100L;
-    final long readLockSuppressWarningInterval = 10000L;
-    final String readLockLogStmt = "FSNamesystem read lock held for ";
-    Configuration conf = new Configuration();
-    conf.setLong(
-        DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY,
-        readLockReportingThreshold);
-    conf.setTimeDuration(DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY,
-        readLockSuppressWarningInterval, TimeUnit.MILLISECONDS);
-    FSImage fsImage = Mockito.mock(FSImage.class);
-    FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
-    Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
-    FSNamesystem fsn = new FSNamesystem(conf, fsImage);
-
-    FakeTimer timer = new FakeTimer();
-    fsn.setTimer(timer);
-    timer.advance(readLockSuppressWarningInterval);
-
-    LogCapturer logs = LogCapturer.captureLogs(FSNamesystem.LOG);
-    GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.INFO);
-
-    // Don't report if the read lock is held for a short time
-    fsn.readLock();
-    fsn.readUnlock();
-    assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()) &&
-        logs.getOutput().contains(readLockLogStmt));
-
-    // Report the first read lock warning if it is held for a long time
-    fsn.readLock();
-    timer.advance(readLockReportingThreshold + 10);
-    logs.clearOutput();
-    fsn.readUnlock();
-    assertTrue(logs.getOutput().contains(GenericTestUtils.getMethodName())
-        && logs.getOutput().contains(readLockLogStmt));
-
-    // Track but do not Report if the write lock is held for a long time but
-    // time since last report does not exceed the suppress warning interval
-    fsn.readLock();
-    timer.advance(readLockReportingThreshold + 10);
-    logs.clearOutput();
-    fsn.readUnlock();
-    assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName())
-        && logs.getOutput().contains(readLockLogStmt));
-
-    // Track but do not Report if it's held for a long time when re-entering
-    // read lock but time since last report does not exceed the suppress
-    // warning interval
-    fsn.readLock();
-    timer.advance(readLockReportingThreshold / 2 + 1);
-    fsn.readLock();
-    timer.advance(readLockReportingThreshold / 2 + 1);
-    logs.clearOutput();
-    fsn.readUnlock();
-    assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()) ||
-        logs.getOutput().contains(readLockLogStmt));
-    logs.clearOutput();
-    fsn.readUnlock();
-    assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()) &&
-        logs.getOutput().contains(readLockLogStmt));
-
-    // Report if it's held for a long time (and time since last report
-    // exceeds the suppress warning interval) while another thread also has the
-    // read lock. Let one thread hold the lock long enough to activate an
-    // alert, then have another thread grab the read lock to ensure that this
-    // doesn't reset the timing.
-    timer.advance(readLockSuppressWarningInterval);
-    logs.clearOutput();
-    CountDownLatch barrier = new CountDownLatch(1);
-    CountDownLatch barrier2 = new CountDownLatch(1);
-    Thread t1 = new Thread() {
-      @Override
-      public void run() {
-        try {
-          fsn.readLock();
-          timer.advance(readLockReportingThreshold + 1);
-          barrier.countDown(); // Allow for t2 to acquire the read lock
-          barrier2.await(); // Wait until t2 has the read lock
-          fsn.readUnlock();
-        } catch (InterruptedException e) {
-          fail("Interrupted during testing");
-        }
-      }
-    };
-    Thread t2 = new Thread() {
-      @Override
-      public void run() {
-        try {
-          barrier.await(); // Wait until t1 finishes sleeping
-          fsn.readLock();
-          barrier2.countDown(); // Allow for t1 to unlock
-          fsn.readUnlock();
-        } catch (InterruptedException e) {
-          fail("Interrupted during testing");
-        }
-      }
-    };
-    t1.start();
-    t2.start();
-    t1.join();
-    t2.join();
-    // Look for the differentiating class names in the stack trace
-    String stackTracePatternString =
-        String.format("INFO.+%s(.+\n){4}\\Q%%s\\E\\.run", readLockLogStmt);
-    Pattern t1Pattern = Pattern.compile(
-        String.format(stackTracePatternString, t1.getClass().getName()));
-    assertTrue(t1Pattern.matcher(logs.getOutput()).find());
-    Pattern t2Pattern = Pattern.compile(
-        String.format(stackTracePatternString, t2.getClass().getName()));
-    assertFalse(t2Pattern.matcher(logs.getOutput()).find());
-    assertTrue(logs.getOutput().contains("Number of suppressed read-lock " +
-        "reports: 2"));
-  }
-
-  @Test
   public void testSafemodeReplicationConf() throws IOException {
     Configuration conf = new Configuration();
     FSImage fsImage = Mockito.mock(FSImage.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/434c5ea7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
new file mode 100644
index 0000000..08900ec
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
@@ -0,0 +1,317 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import com.google.common.base.Supplier;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.util.FakeTimer;
+import org.apache.log4j.Level;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.regex.Pattern;
+
+import static org.junit.Assert.*;
+
+/**
+ * Tests the FSNamesystemLock, looking at lock compatibilities and
+ * proper logging of lock hold times.
+ */
+public class TestFSNamesystemLock {
+
+  @Test
+  public void testFsLockFairness() throws IOException, InterruptedException{
+    Configuration conf = new Configuration();
+
+    conf.setBoolean("dfs.namenode.fslock.fair", true);
+    FSNamesystemLock fsnLock = new FSNamesystemLock(conf);
+    assertTrue(fsnLock.coarseLock.isFair());
+
+    conf.setBoolean("dfs.namenode.fslock.fair", false);
+    fsnLock = new FSNamesystemLock(conf);
+    assertFalse(fsnLock.coarseLock.isFair());
+  }
+
+  @Test
+  public void testFSNamesystemLockCompatibility() {
+    FSNamesystemLock rwLock = new FSNamesystemLock(new Configuration());
+
+    assertEquals(0, rwLock.getReadHoldCount());
+    rwLock.readLock();
+    assertEquals(1, rwLock.getReadHoldCount());
+
+    rwLock.readLock();
+    assertEquals(2, rwLock.getReadHoldCount());
+
+    rwLock.readUnlock();
+    assertEquals(1, rwLock.getReadHoldCount());
+
+    rwLock.readUnlock();
+    assertEquals(0, rwLock.getReadHoldCount());
+
+    assertFalse(rwLock.isWriteLockedByCurrentThread());
+    assertEquals(0, rwLock.getWriteHoldCount());
+    rwLock.writeLock();
+    assertTrue(rwLock.isWriteLockedByCurrentThread());
+    assertEquals(1, rwLock.getWriteHoldCount());
+
+    rwLock.writeLock();
+    assertTrue(rwLock.isWriteLockedByCurrentThread());
+    assertEquals(2, rwLock.getWriteHoldCount());
+
+    rwLock.writeUnlock();
+    assertTrue(rwLock.isWriteLockedByCurrentThread());
+    assertEquals(1, rwLock.getWriteHoldCount());
+
+    rwLock.writeUnlock();
+    assertFalse(rwLock.isWriteLockedByCurrentThread());
+    assertEquals(0, rwLock.getWriteHoldCount());
+  }
+
+  @Test
+  public void testFSLockGetWaiterCount() throws InterruptedException {
+    final int threadCount = 3;
+    final CountDownLatch latch = new CountDownLatch(threadCount);
+    final Configuration conf = new Configuration();
+    conf.setBoolean("dfs.namenode.fslock.fair", true);
+    final FSNamesystemLock rwLock = new FSNamesystemLock(conf);
+    rwLock.writeLock();
+    ExecutorService helper = Executors.newFixedThreadPool(threadCount);
+
+    for (int x = 0; x < threadCount; x++) {
+      helper.execute(new Runnable() {
+        @Override
+        public void run() {
+          latch.countDown();
+          rwLock.readLock();
+        }
+      });
+    }
+
+    latch.await();
+    try {
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+        @Override
+        public Boolean get() {
+          return (threadCount == rwLock.getQueueLength());
+        }
+      }, 10, 1000);
+    } catch (TimeoutException e) {
+      fail("Expected number of blocked thread not found");
+    }
+  }
+
+  /**
+   * Test when FSNamesystem write lock is held for a long time,
+   * logger will report it.
+   */
+  @Test(timeout=45000)
+  public void testFSWriteLockLongHoldingReport() throws Exception {
+    final long writeLockReportingThreshold = 100L;
+    final long writeLockSuppressWarningInterval = 10000L;
+    Configuration conf = new Configuration();
+    conf.setLong(
+        DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY,
+        writeLockReportingThreshold);
+    conf.setTimeDuration(DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY,
+        writeLockSuppressWarningInterval, TimeUnit.MILLISECONDS);
+
+    final FakeTimer timer = new FakeTimer();
+    final FSNamesystemLock fsnLock = new FSNamesystemLock(conf, timer);
+    timer.advance(writeLockSuppressWarningInterval);
+
+    LogCapturer logs = LogCapturer.captureLogs(FSNamesystem.LOG);
+    GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.INFO);
+
+    // Don't report if the write lock is held for a short time
+    fsnLock.writeLock();
+    fsnLock.writeUnlock();
+    assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()));
+
+    // Report if the write lock is held for a long time
+    fsnLock.writeLock();
+    timer.advance(writeLockReportingThreshold + 10);
+    logs.clearOutput();
+    fsnLock.writeUnlock();
+    assertTrue(logs.getOutput().contains(GenericTestUtils.getMethodName()));
+
+    // Track but do not report if the write lock is held (interruptibly) for
+    // a long time but time since last report does not exceed the suppress
+    // warning interval
+    fsnLock.writeLockInterruptibly();
+    timer.advance(writeLockReportingThreshold + 10);
+    logs.clearOutput();
+    fsnLock.writeUnlock();
+    assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()));
+
+    // Track but do not report if it's held for a long time when re-entering
+    // write lock but time since last report does not exceed the suppress
+    // warning interval
+    fsnLock.writeLock();
+    timer.advance(writeLockReportingThreshold / 2 + 1);
+    fsnLock.writeLockInterruptibly();
+    timer.advance(writeLockReportingThreshold / 2 + 1);
+    fsnLock.writeLock();
+    timer.advance(writeLockReportingThreshold / 2);
+    logs.clearOutput();
+    fsnLock.writeUnlock();
+    assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()));
+    logs.clearOutput();
+    fsnLock.writeUnlock();
+    assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()));
+    logs.clearOutput();
+    fsnLock.writeUnlock();
+    assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()));
+
+    // Report if it's held for a long time and time since last report exceeds
+    // the supress warning interval
+    timer.advance(writeLockSuppressWarningInterval);
+    fsnLock.writeLock();
+    timer.advance(writeLockReportingThreshold + 100);
+    logs.clearOutput();
+    fsnLock.writeUnlock();
+    assertTrue(logs.getOutput().contains(GenericTestUtils.getMethodName()));
+    assertTrue(logs.getOutput().contains(
+        "Number of suppressed write-lock reports: 2"));
+  }
+
+  /**
+   * Test when FSNamesystem read lock is held for a long time,
+   * logger will report it.
+   */
+  @Test(timeout=45000)
+  public void testFSReadLockLongHoldingReport() throws Exception {
+    final long readLockReportingThreshold = 100L;
+    final long readLockSuppressWarningInterval = 10000L;
+    final String readLockLogStmt = "FSNamesystem read lock held for ";
+    Configuration conf = new Configuration();
+    conf.setLong(
+        DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY,
+        readLockReportingThreshold);
+    conf.setTimeDuration(DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY,
+        readLockSuppressWarningInterval, TimeUnit.MILLISECONDS);
+
+    final FakeTimer timer = new FakeTimer();
+    final FSNamesystemLock fsnLock = new FSNamesystemLock(conf, timer);
+    timer.advance(readLockSuppressWarningInterval);
+
+    LogCapturer logs = LogCapturer.captureLogs(FSNamesystem.LOG);
+    GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.INFO);
+
+    // Don't report if the read lock is held for a short time
+    fsnLock.readLock();
+    fsnLock.readUnlock();
+    assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()) &&
+        logs.getOutput().contains(readLockLogStmt));
+
+    // Report the first read lock warning if it is held for a long time
+    fsnLock.readLock();
+    timer.advance(readLockReportingThreshold + 10);
+    logs.clearOutput();
+    fsnLock.readUnlock();
+    assertTrue(logs.getOutput().contains(GenericTestUtils.getMethodName()) &&
+        logs.getOutput().contains(readLockLogStmt));
+
+    // Track but do not Report if the write lock is held for a long time but
+    // time since last report does not exceed the suppress warning interval
+    fsnLock.readLock();
+    timer.advance(readLockReportingThreshold + 10);
+    logs.clearOutput();
+    fsnLock.readUnlock();
+    assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()) &&
+        logs.getOutput().contains(readLockLogStmt));
+
+    // Track but do not Report if it's held for a long time when re-entering
+    // read lock but time since last report does not exceed the suppress
+    // warning interval
+    fsnLock.readLock();
+    timer.advance(readLockReportingThreshold / 2 + 1);
+    fsnLock.readLock();
+    timer.advance(readLockReportingThreshold / 2 + 1);
+    logs.clearOutput();
+    fsnLock.readUnlock();
+    assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()) ||
+        logs.getOutput().contains(readLockLogStmt));
+    logs.clearOutput();
+    fsnLock.readUnlock();
+    assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()) &&
+        logs.getOutput().contains(readLockLogStmt));
+
+    // Report if it's held for a long time (and time since last report
+    // exceeds the suppress warning interval) while another thread also has the
+    // read lock. Let one thread hold the lock long enough to activate an
+    // alert, then have another thread grab the read lock to ensure that this
+    // doesn't reset the timing.
+    timer.advance(readLockSuppressWarningInterval);
+    logs.clearOutput();
+    final CountDownLatch barrier = new CountDownLatch(1);
+    final CountDownLatch barrier2 = new CountDownLatch(1);
+    Thread t1 = new Thread() {
+      @Override
+      public void run() {
+        try {
+          fsnLock.readLock();
+          timer.advance(readLockReportingThreshold + 1);
+          barrier.countDown(); // Allow for t2 to acquire the read lock
+          barrier2.await(); // Wait until t2 has the read lock
+          fsnLock.readUnlock();
+        } catch (InterruptedException e) {
+          fail("Interrupted during testing");
+        }
+      }
+    };
+    Thread t2 = new Thread() {
+      @Override
+      public void run() {
+        try {
+          barrier.await(); // Wait until t1 finishes sleeping
+          fsnLock.readLock();
+          barrier2.countDown(); // Allow for t1 to unlock
+          fsnLock.readUnlock();
+        } catch (InterruptedException e) {
+          fail("Interrupted during testing");
+        }
+      }
+    };
+    t1.start();
+    t2.start();
+    t1.join();
+    t2.join();
+    // Look for the differentiating class names in the stack trace
+    String stackTracePatternString =
+        String.format("INFO.+%s(.+\n){4}\\Q%%s\\E\\.run", readLockLogStmt);
+    Pattern t1Pattern = Pattern.compile(
+        String.format(stackTracePatternString, t1.getClass().getName()));
+    assertTrue(t1Pattern.matcher(logs.getOutput()).find());
+    Pattern t2Pattern = Pattern.compile(
+        String.format(stackTracePatternString, t2.getClass().getName()));
+    assertFalse(t2Pattern.matcher(logs.getOutput()).find());
+    assertTrue(logs.getOutput().contains(
+        "Number of suppressed read-lock reports: 2"));
+  }
+
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/57] [abbrv] hadoop git commit: HADOOP-13544. JDiff reports unncessarily show unannotated APIs and cause confusion while our javadocs only show annotated and public APIs. (vinodkv via wangda)

Posted by in...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/875062b5/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.7.2.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.7.2.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.7.2.xml
index ff01b26..2328c81 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.7.2.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.7.2.xml
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
 <!-- Generated by the JDiff Javadoc doclet -->
 <!-- (http://www.jdiff.org) -->
-<!-- on Thu May 12 17:47:18 PDT 2016 -->
+<!-- on Wed Aug 24 13:54:38 PDT 2016 -->
 
 <api
   xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
@@ -9,7 +9,7 @@
   name="hadoop-yarn-api 2.7.2"
   jdversion="1.0.9">
 
-<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/classes:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2
 .jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vinodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-
 2.1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/vinodkv/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/vinodkv/.m2/repos
 itory/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core
 -asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/a
 pi-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.7
 .0_45.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar -sourcepath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/site/jdiff/xml -apiname hadoop-yarn-api 2.7.2 -->
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/classes:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.
 jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vinodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2
 .1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/vinodkv/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/vinodkv/.m2/reposi
 tory/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-
 asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/ap
 i-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.8.
 0_40.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar -sourcepath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/jdiff.jar -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/site/jdiff/xml -apiname hadoop-yarn-api 2.7.2 -->
 <package name="org.apache.hadoop.yarn.api">
   <!-- start interface org.apache.hadoop.yarn.api.ApplicationClientProtocol -->
   <interface name="ApplicationClientProtocol"    abstract="true"
@@ -17,7 +17,7 @@
     deprecated="not deprecated">
     <implements name="org.apache.hadoop.yarn.api.ApplicationBaseProtocol"/>
     <method name="getNewApplication" return="org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest"/>
@@ -44,7 +44,7 @@
       </doc>
     </method>
     <method name="submitApplication" return="org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest"/>
@@ -90,7 +90,7 @@
       </doc>
     </method>
     <method name="forceKillApplication" return="org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest"/>
@@ -118,7 +118,7 @@
       </doc>
     </method>
     <method name="getClusterMetrics" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest"/>
@@ -140,7 +140,7 @@
       </doc>
     </method>
     <method name="getClusterNodes" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest"/>
@@ -161,7 +161,7 @@
       </doc>
     </method>
     <method name="getQueueInfo" return="org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest"/>
@@ -184,7 +184,7 @@
       </doc>
     </method>
     <method name="getQueueUserAcls" return="org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest"/>
@@ -205,7 +205,7 @@
       </doc>
     </method>
     <method name="moveApplicationAcrossQueues" return="org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest"/>
@@ -221,7 +221,7 @@
       </doc>
     </method>
     <method name="submitReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest"/>
@@ -274,7 +274,7 @@
       </doc>
     </method>
     <method name="updateReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest"/>
@@ -309,7 +309,7 @@
       </doc>
     </method>
     <method name="deleteReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest"/>
@@ -333,7 +333,7 @@
       </doc>
     </method>
     <method name="getNodeToLabels" return="org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest"/>
@@ -351,7 +351,7 @@
       </doc>
     </method>
     <method name="getLabelsToNodes" return="org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest"/>
@@ -370,7 +370,7 @@
       </doc>
     </method>
     <method name="getClusterNodeLabels" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest"/>
@@ -497,65 +497,6 @@
     </doc>
   </interface>
   <!-- end interface org.apache.hadoop.yarn.api.ApplicationConstants -->
-  <!-- start class org.apache.hadoop.yarn.api.ApplicationConstants.Environment -->
-  <class name="ApplicationConstants.Environment" extends="java.lang.Enum"
-    abstract="false"
-    static="true" final="true" visibility="public"
-    deprecated="not deprecated">
-    <method name="values" return="org.apache.hadoop.yarn.api.ApplicationConstants.Environment[]"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="valueOf" return="org.apache.hadoop.yarn.api.ApplicationConstants.Environment"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-    </method>
-    <method name="key" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="toString" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="$" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Expand the environment variable based on client OS environment variable
- expansion syntax (e.g. $VAR for Linux and %VAR% for Windows).
- <p>
- Note: Use $$() method for cross-platform practice i.e. submit an
- application from a Windows client to a Linux/Unix server or vice versa.
- </p>]]>
-      </doc>
-    </method>
-    <method name="$$" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Expand the environment variable in platform-agnostic syntax. The
- parameter expansion marker "{{VAR}}" will be replaced with real parameter
- expansion marker ('%' for Windows and '$' for Linux) by NodeManager on
- container launch. For example: {{VAR}} will be replaced as $VAR on Linux,
- and %VAR% on Windows.]]>
-      </doc>
-    </method>
-    <doc>
-    <![CDATA[Environment for Applications.
-
- Some of the environment variables for applications are <em>final</em>
- i.e. they cannot be modified by the applications.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.api.ApplicationConstants.Environment -->
   <!-- start interface org.apache.hadoop.yarn.api.ApplicationHistoryProtocol -->
   <interface name="ApplicationHistoryProtocol"    abstract="true"
     static="false" final="false" visibility="public"
@@ -574,7 +515,7 @@
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="registerApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest"/>
@@ -611,7 +552,7 @@
       </doc>
     </method>
     <method name="finishApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest"/>
@@ -637,7 +578,7 @@
       </doc>
     </method>
     <method name="allocate" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest"/>
@@ -712,7 +653,7 @@
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="use" return="org.apache.hadoop.yarn.api.protocolrecords.UseSharedCacheResourceResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.UseSharedCacheResourceRequest"/>
@@ -740,7 +681,7 @@
       </doc>
     </method>
     <method name="release" return="org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceRequest"/>
@@ -778,19 +719,12 @@
     </doc>
   </interface>
   <!-- end interface org.apache.hadoop.yarn.api.ClientSCMProtocol -->
-  <!-- start interface org.apache.hadoop.yarn.api.ClientSCMProtocolPB -->
-  <interface name="ClientSCMProtocolPB"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.yarn.proto.ClientSCMProtocol.ClientSCMProtocolService.BlockingInterface"/>
-  </interface>
-  <!-- end interface org.apache.hadoop.yarn.api.ClientSCMProtocolPB -->
   <!-- start interface org.apache.hadoop.yarn.api.ContainerManagementProtocol -->
   <interface name="ContainerManagementProtocol"    abstract="true"
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="startContainers" return="org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest"/>
@@ -840,7 +774,7 @@
       </doc>
     </method>
     <method name="stopContainers" return="org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest"/>
@@ -879,7 +813,7 @@
       </doc>
     </method>
     <method name="getContainerStatuses" return="org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest"/>
@@ -2456,67 +2390,6 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse -->
-  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest -->
-  <class name="GetLabelsToNodesRequest" extends="java.lang.Object"
-    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="GetLabelsToNodesRequest"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="nodeLabels" type="java.util.Set"/>
-    </method>
-    <method name="setNodeLabels"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="nodeLabels" type="java.util.Set"/>
-    </method>
-    <method name="getNodeLabels" return="java.util.Set"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest -->
-  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse -->
-  <class name="GetLabelsToNodesResponse" extends="java.lang.Object"
-    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="GetLabelsToNodesResponse"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="map" type="java.util.Map"/>
-    </method>
-    <method name="setLabelsToNodes"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="map" type="java.util.Map"/>
-    </method>
-    <method name="getLabelsToNodes" return="java.util.Map"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse -->
   <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest -->
   <class name="GetNewApplicationRequest" extends="java.lang.Object"
     abstract="true"
@@ -2582,50 +2455,6 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse -->
-  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest -->
-  <class name="GetNodesToLabelsRequest" extends="java.lang.Object"
-    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="GetNodesToLabelsRequest"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest -->
-  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse -->
-  <class name="GetNodesToLabelsResponse" extends="java.lang.Object"
-    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="GetNodesToLabelsResponse"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="map" type="java.util.Map"/>
-    </method>
-    <method name="setNodeToLabels"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="map" type="java.util.Map"/>
-    </method>
-    <method name="getNodeToLabels" return="java.util.Map"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse -->
   <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest -->
   <class name="GetQueueInfoRequest" extends="java.lang.Object"
     abstract="true"
@@ -5838,127 +5667,6 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.yarn.api.records.ContainerReport -->
-  <!-- start class org.apache.hadoop.yarn.api.records.ContainerResourceDecrease -->
-  <class name="ContainerResourceDecrease" extends="java.lang.Object"
-    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="ContainerResourceDecrease"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ContainerResourceDecrease"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="existingContainerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
-      <param name="targetCapability" type="org.apache.hadoop.yarn.api.records.Resource"/>
-    </method>
-    <method name="getContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setContainerId"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
-    </method>
-    <method name="getCapability" return="org.apache.hadoop.yarn.api.records.Resource"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setCapability"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
-    </method>
-    <method name="hashCode" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="equals" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="other" type="java.lang.Object"/>
-    </method>
-    <doc>
-    <![CDATA[Used by Application Master to ask Node Manager reduce size of a specified
- container]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.api.records.ContainerResourceDecrease -->
-  <!-- start class org.apache.hadoop.yarn.api.records.ContainerResourceIncrease -->
-  <class name="ContainerResourceIncrease" extends="java.lang.Object"
-    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="ContainerResourceIncrease"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ContainerResourceIncrease"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="existingContainerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
-      <param name="targetCapability" type="org.apache.hadoop.yarn.api.records.Resource"/>
-      <param name="token" type="org.apache.hadoop.yarn.api.records.Token"/>
-    </method>
-    <method name="getContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setContainerId"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
-    </method>
-    <method name="getCapability" return="org.apache.hadoop.yarn.api.records.Resource"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setCapability"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
-    </method>
-    <method name="getContainerToken" return="org.apache.hadoop.yarn.api.records.Token"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setContainerToken"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="token" type="org.apache.hadoop.yarn.api.records.Token"/>
-    </method>
-    <method name="hashCode" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="equals" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="other" type="java.lang.Object"/>
-    </method>
-    <doc>
-    <![CDATA[Represent a new increased container accepted by Resource Manager]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.api.records.ContainerResourceIncrease -->
   <!-- start class org.apache.hadoop.yarn.api.records.ContainerResourceIncreaseRequest -->
   <class name="ContainerResourceIncreaseRequest" extends="java.lang.Object"
     abstract="true"
@@ -7664,7 +7372,7 @@
     deprecated="not deprecated">
     <implements name="java.util.Comparator"/>
     <implements name="java.io.Serializable"/>
-    <constructor name="ReservationRequest.ReservationRequestComparator"
+    <constructor name="ReservationRequestComparator"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
@@ -8260,7 +7968,7 @@
     deprecated="not deprecated">
     <implements name="java.util.Comparator"/>
     <implements name="java.io.Serializable"/>
-    <constructor name="ResourceRequest.ResourceRequestComparator"
+    <constructor name="ResourceRequestComparator"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
@@ -9389,7 +9097,7 @@
     abstract="false"
     static="true" final="false" visibility="public"
     deprecated="not deprecated">
-    <constructor name="TimelineEvents.EventsOfOneEntity"
+    <constructor name="EventsOfOneEntity"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
@@ -9556,7 +9264,7 @@
     abstract="false"
     static="true" final="false" visibility="public"
     deprecated="not deprecated">
-    <constructor name="TimelinePutResponse.TimelinePutError"
+    <constructor name="TimelinePutError"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
@@ -13137,34 +12845,6 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException -->
-  <!-- start class org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException -->
-  <class name="ApplicationMasterNotRegisteredException" extends="org.apache.hadoop.yarn.exceptions.YarnException"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="ApplicationMasterNotRegisteredException" type="java.lang.Throwable"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="ApplicationMasterNotRegisteredException" type="java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="ApplicationMasterNotRegisteredException" type="java.lang.String, java.lang.Throwable"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <doc>
-    <![CDATA[This exception is thrown when an Application Master tries to unregister by calling
- {@link ApplicationMasterProtocol#finishApplicationMaster(FinishApplicationMasterRequest)}
- API without first registering by calling
- {@link ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)}
- or after an RM restart. The ApplicationMaster is expected to call
- {@link ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)}
- and retry.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException -->
   <!-- start class org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException -->
   <class name="ApplicationNotFoundException" extends="org.apache.hadoop.yarn.exceptions.YarnException"
     abstract="false"
@@ -13214,158 +12894,6 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.yarn.exceptions.ContainerNotFoundException -->
-  <!-- start class org.apache.hadoop.yarn.exceptions.InvalidApplicationMasterRequestException -->
-  <class name="InvalidApplicationMasterRequestException" extends="org.apache.hadoop.yarn.exceptions.YarnException"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="InvalidApplicationMasterRequestException" type="java.lang.Throwable"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="InvalidApplicationMasterRequestException" type="java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="InvalidApplicationMasterRequestException" type="java.lang.String, java.lang.Throwable"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <doc>
-    <![CDATA[This exception is thrown when an ApplicationMaster asks for resources by
- calling {@link ApplicationMasterProtocol#allocate(AllocateRequest)}
- without first registering by calling
- {@link ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)}
- or if it tries to register more than once.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.exceptions.InvalidApplicationMasterRequestException -->
-  <!-- start class org.apache.hadoop.yarn.exceptions.InvalidAuxServiceException -->
-  <class name="InvalidAuxServiceException" extends="org.apache.hadoop.yarn.exceptions.YarnException"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="InvalidAuxServiceException" type="java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <doc>
-    <![CDATA[This exception is thrown by a NodeManager that is rejecting start-container
- requests via
- {@link ContainerManagementProtocol#startContainers(StartContainersRequest)}
- for auxservices does not exist.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.exceptions.InvalidAuxServiceException -->
-  <!-- start class org.apache.hadoop.yarn.exceptions.InvalidContainerException -->
-  <class name="InvalidContainerException" extends="org.apache.hadoop.yarn.exceptions.YarnException"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="InvalidContainerException" type="java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <doc>
-    <![CDATA[This exception is thrown by a NodeManager that is rejecting start-container
- requests via
- {@link ContainerManagementProtocol#startContainers(StartContainersRequest)}
- for containers allocated by a previous instance of the RM.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.exceptions.InvalidContainerException -->
-  <!-- start class org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException -->
-  <class name="InvalidContainerReleaseException" extends="org.apache.hadoop.yarn.exceptions.YarnException"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="InvalidContainerReleaseException" type="java.lang.Throwable"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="InvalidContainerReleaseException" type="java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="InvalidContainerReleaseException" type="java.lang.String, java.lang.Throwable"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <doc>
-    <![CDATA[This exception is thrown when an Application Master tries to release
- containers not belonging to it using
- {@link ApplicationMasterProtocol#allocate(AllocateRequest)} API.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException -->
-  <!-- start class org.apache.hadoop.yarn.exceptions.InvalidResourceBlacklistRequestException -->
-  <class name="InvalidResourceBlacklistRequestException" extends="org.apache.hadoop.yarn.exceptions.YarnException"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="InvalidResourceBlacklistRequestException" type="java.lang.Throwable"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="InvalidResourceBlacklistRequestException" type="java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="InvalidResourceBlacklistRequestException" type="java.lang.String, java.lang.Throwable"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <doc>
-    <![CDATA[This exception is thrown when an application provides an invalid
- {@link ResourceBlacklistRequest} specification for blacklisting of resources
- in {@link ApplicationMasterProtocol#allocate(AllocateRequest)} API.
-
- Currently this exceptions is thrown when an application tries to
- blacklist {@link ResourceRequest#ANY}.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.exceptions.InvalidResourceBlacklistRequestException -->
-  <!-- start class org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException -->
-  <class name="InvalidResourceRequestException" extends="org.apache.hadoop.yarn.exceptions.YarnException"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="InvalidResourceRequestException" type="java.lang.Throwable"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="InvalidResourceRequestException" type="java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="InvalidResourceRequestException" type="java.lang.String, java.lang.Throwable"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <doc>
-    <![CDATA[This exception is thrown when a resource requested via
- {@link ResourceRequest} in the
- {@link ApplicationMasterProtocol#allocate(AllocateRequest)} API is out of the
- range of the configured lower and upper limits on resources.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException -->
-  <!-- start class org.apache.hadoop.yarn.exceptions.NMNotYetReadyException -->
-  <class name="NMNotYetReadyException" extends="org.apache.hadoop.yarn.exceptions.YarnException"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="NMNotYetReadyException" type="java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <doc>
-    <![CDATA[This exception is thrown on
- {@link ContainerManagementProtocol#startContainers(StartContainersRequest)} API
- when an NM starts from scratch but has not yet connected with RM.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.exceptions.NMNotYetReadyException -->
   <!-- start class org.apache.hadoop.yarn.exceptions.YarnException -->
   <class name="YarnException" extends="java.lang.Exception"
     abstract="false"
@@ -13660,7 +13188,7 @@
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="runCleanerTask" return="org.apache.hadoop.yarn.server.api.protocolrecords.RunSharedCacheCleanerTaskResponse"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="request" type="org.apache.hadoop.yarn.server.api.protocolrecords.RunSharedCacheCleanerTaskRequest"/>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/57] [abbrv] hadoop git commit: YARN-3142. Improve locks in AppSchedulingInfo. (Varun Saxena via wangda)

Posted by in...@apache.org.
YARN-3142. Improve locks in AppSchedulingInfo. (Varun Saxena via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1831be8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1831be8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1831be8e

Branch: refs/heads/HDFS-10467
Commit: 1831be8e737fd423a9f3d590767b944147e85641
Parents: 875062b
Author: Wangda Tan <wa...@apache.org>
Authored: Tue Sep 27 11:54:55 2016 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Tue Sep 27 11:54:55 2016 -0700

----------------------------------------------------------------------
 .../scheduler/AppSchedulingInfo.java            | 619 +++++++++++--------
 1 file changed, 356 insertions(+), 263 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1831be8e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
index 39820f7..59a6650 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
@@ -30,6 +30,7 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -67,7 +68,8 @@ public class AppSchedulingInfo {
 
   private Queue queue;
   private ActiveUsersManager activeUsersManager;
-  private boolean pending = true; // whether accepted/allocated by scheduler
+  // whether accepted/allocated by scheduler
+  private volatile boolean pending = true;
   private ResourceUsage appResourceUsage;
 
   private AtomicBoolean userBlacklistChanged = new AtomicBoolean(false);
@@ -86,6 +88,9 @@ public class AppSchedulingInfo {
       SchedContainerChangeRequest>>> containerIncreaseRequestMap =
       new ConcurrentHashMap<>();
 
+  private final ReentrantReadWriteLock.ReadLock readLock;
+  private final ReentrantReadWriteLock.WriteLock writeLock;
+
   public AppSchedulingInfo(ApplicationAttemptId appAttemptId,
       String user, Queue queue, ActiveUsersManager activeUsersManager,
       long epoch, ResourceUsage appResourceUsage) {
@@ -97,6 +102,10 @@ public class AppSchedulingInfo {
     this.containerIdCounter = new AtomicLong(
         epoch << ResourceManager.EPOCH_BIT_SHIFT);
     this.appResourceUsage = appResourceUsage;
+
+    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+    readLock = lock.readLock();
+    writeLock = lock.writeLock();
   }
 
   public ApplicationId getApplicationId() {
@@ -115,14 +124,19 @@ public class AppSchedulingInfo {
     return this.containerIdCounter.incrementAndGet();
   }
 
-  public synchronized String getQueueName() {
-    return queue.getQueueName();
+  public String getQueueName() {
+    try {
+      this.readLock.lock();
+      return queue.getQueueName();
+    } finally {
+      this.readLock.unlock();
+    }
   }
 
-  public synchronized boolean isPending() {
+  public boolean isPending() {
     return pending;
   }
-  
+
   public Set<String> getRequestedPartitions() {
     return requestedPartitions;
   }
@@ -130,88 +144,103 @@ public class AppSchedulingInfo {
   /**
    * Clear any pending requests from this application.
    */
-  private synchronized void clearRequests() {
+  private void clearRequests() {
     schedulerKeys.clear();
     resourceRequestMap.clear();
     LOG.info("Application " + applicationId + " requests cleared");
   }
 
-  public synchronized boolean hasIncreaseRequest(NodeId nodeId) {
-    Map<SchedulerRequestKey, Map<ContainerId, SchedContainerChangeRequest>>
-        requestsOnNode = containerIncreaseRequestMap.get(nodeId);
-    return requestsOnNode == null ? false : requestsOnNode.size() > 0;
+  public boolean hasIncreaseRequest(NodeId nodeId) {
+    try {
+      this.readLock.lock();
+      Map<SchedulerRequestKey, Map<ContainerId, SchedContainerChangeRequest>>
+          requestsOnNode = containerIncreaseRequestMap.get(nodeId);
+      return requestsOnNode == null ? false : requestsOnNode.size() > 0;
+    } finally {
+      this.readLock.unlock();
+    }
   }
 
-  public synchronized Map<ContainerId, SchedContainerChangeRequest>
+  public Map<ContainerId, SchedContainerChangeRequest>
       getIncreaseRequests(NodeId nodeId, SchedulerRequestKey schedulerKey) {
-    Map<SchedulerRequestKey, Map<ContainerId, SchedContainerChangeRequest>>
-        requestsOnNode = containerIncreaseRequestMap.get(nodeId);
-    return requestsOnNode == null ? null : requestsOnNode.get(
-        schedulerKey);
+    try {
+      this.readLock.lock();
+      Map<SchedulerRequestKey, Map<ContainerId, SchedContainerChangeRequest>>
+          requestsOnNode = containerIncreaseRequestMap.get(nodeId);
+      return requestsOnNode == null ? null : requestsOnNode.get(
+          schedulerKey);
+    } finally {
+      this.readLock.unlock();
+    }
   }
 
   /**
    * return true if any of the existing increase requests are updated,
    *        false if none of them are updated
    */
-  public synchronized boolean updateIncreaseRequests(
+  public boolean updateIncreaseRequests(
       List<SchedContainerChangeRequest> increaseRequests) {
     boolean resourceUpdated = false;
 
-    for (SchedContainerChangeRequest r : increaseRequests) {
-      if (r.getRMContainer().getState() != RMContainerState.RUNNING) {
-        LOG.warn("rmContainer's state is not RUNNING, for increase request with"
-            + " container-id=" + r.getContainerId());
-        continue;
-      }
-      try {
-        RMServerUtils.checkSchedContainerChangeRequest(r, true);
-      } catch (YarnException e) {
-        LOG.warn("Error happens when checking increase request, Ignoring.."
-            + " exception=", e);
-        continue;
-      }
-      NodeId nodeId = r.getRMContainer().getAllocatedNode();
-
-      Map<SchedulerRequestKey, Map<ContainerId, SchedContainerChangeRequest>>
-          requestsOnNode = containerIncreaseRequestMap.get(nodeId);
-      if (null == requestsOnNode) {
-        requestsOnNode = new TreeMap<>();
-        containerIncreaseRequestMap.put(nodeId, requestsOnNode);
-      }
-
-      SchedContainerChangeRequest prevChangeRequest =
-          getIncreaseRequest(nodeId,
-              r.getRMContainer().getAllocatedSchedulerKey(),
-              r.getContainerId());
-      if (null != prevChangeRequest) {
-        if (Resources.equals(prevChangeRequest.getTargetCapacity(),
-            r.getTargetCapacity())) {
-          // increase request hasn't changed
+    try {
+      this.writeLock.lock();
+      for (SchedContainerChangeRequest r : increaseRequests) {
+        if (r.getRMContainer().getState() != RMContainerState.RUNNING) {
+          LOG.warn("rmContainer's state is not RUNNING, for increase request"
+              + " with container-id=" + r.getContainerId());
           continue;
         }
+        try {
+          RMServerUtils.checkSchedContainerChangeRequest(r, true);
+        } catch (YarnException e) {
+          LOG.warn("Error happens when checking increase request, Ignoring.."
+              + " exception=", e);
+          continue;
+        }
+        NodeId nodeId = r.getRMContainer().getAllocatedNode();
 
-        // remove the old one, as we will use the new one going forward
-        removeIncreaseRequest(nodeId,
-            prevChangeRequest.getRMContainer().getAllocatedSchedulerKey(),
-            prevChangeRequest.getContainerId());
-      }
+        Map<SchedulerRequestKey, Map<ContainerId, SchedContainerChangeRequest>>
+            requestsOnNode = containerIncreaseRequestMap.get(nodeId);
+        if (null == requestsOnNode) {
+          requestsOnNode = new TreeMap<>();
+          containerIncreaseRequestMap.put(nodeId, requestsOnNode);
+        }
 
-      if (Resources.equals(r.getTargetCapacity(),
-          r.getRMContainer().getAllocatedResource())) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Trying to increase container " + r.getContainerId()
-              + ", target capacity = previous capacity = " + prevChangeRequest
-              + ". Will ignore this increase request.");
+        SchedContainerChangeRequest prevChangeRequest =
+            getIncreaseRequest(nodeId,
+                r.getRMContainer().getAllocatedSchedulerKey(),
+                r.getContainerId());
+        if (null != prevChangeRequest) {
+          if (Resources.equals(prevChangeRequest.getTargetCapacity(),
+              r.getTargetCapacity())) {
+            // increase request hasn't changed
+            continue;
+          }
+
+          // remove the old one, as we will use the new one going forward
+          removeIncreaseRequest(nodeId,
+              prevChangeRequest.getRMContainer().getAllocatedSchedulerKey(),
+              prevChangeRequest.getContainerId());
         }
-        continue;
-      }
 
-      // add the new one
-      resourceUpdated = true;
-      insertIncreaseRequest(r);
+        if (Resources.equals(r.getTargetCapacity(),
+            r.getRMContainer().getAllocatedResource())) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Trying to increase container " + r.getContainerId()
+                + ", target capacity = previous capacity = " + prevChangeRequest
+                + ". Will ignore this increase request.");
+          }
+          continue;
+        }
+
+        // add the new one
+        resourceUpdated = true;
+        insertIncreaseRequest(r);
+      }
+      return resourceUpdated;
+    } finally {
+      this.writeLock.unlock();
     }
-    return resourceUpdated;
   }
 
   /**
@@ -275,61 +304,71 @@ public class AppSchedulingInfo {
     }
   }
 
-  public synchronized boolean removeIncreaseRequest(NodeId nodeId,
+  public boolean removeIncreaseRequest(NodeId nodeId,
       SchedulerRequestKey schedulerKey, ContainerId containerId) {
-    Map<SchedulerRequestKey, Map<ContainerId, SchedContainerChangeRequest>>
-        requestsOnNode = containerIncreaseRequestMap.get(nodeId);
-    if (null == requestsOnNode) {
-      return false;
-    }
+    try {
+      this.writeLock.lock();
+      Map<SchedulerRequestKey, Map<ContainerId, SchedContainerChangeRequest>>
+          requestsOnNode = containerIncreaseRequestMap.get(nodeId);
+      if (null == requestsOnNode) {
+        return false;
+      }
 
-    Map<ContainerId, SchedContainerChangeRequest> requestsOnNodeWithPriority =
-        requestsOnNode.get(schedulerKey);
-    if (null == requestsOnNodeWithPriority) {
-      return false;
-    }
+      Map<ContainerId, SchedContainerChangeRequest> requestsOnNodeWithPriority =
+          requestsOnNode.get(schedulerKey);
+      if (null == requestsOnNodeWithPriority) {
+        return false;
+      }
 
-    SchedContainerChangeRequest request =
-        requestsOnNodeWithPriority.remove(containerId);
-    
-    // remove hierarchies if it becomes empty
-    if (requestsOnNodeWithPriority.isEmpty()) {
-      requestsOnNode.remove(schedulerKey);
-      decrementSchedulerKeyReference(schedulerKey);
-    }
-    if (requestsOnNode.isEmpty()) {
-      containerIncreaseRequestMap.remove(nodeId);
-    }
+      SchedContainerChangeRequest request =
+          requestsOnNodeWithPriority.remove(containerId);
     
-    if (request == null) {
-      return false;
-    }
+      // remove hierarchies if it becomes empty
+      if (requestsOnNodeWithPriority.isEmpty()) {
+        requestsOnNode.remove(schedulerKey);
+        decrementSchedulerKeyReference(schedulerKey);
+      }
+      if (requestsOnNode.isEmpty()) {
+        containerIncreaseRequestMap.remove(nodeId);
+      }
 
-    // update queue's pending resource if request exists
-    String partition = request.getRMContainer().getNodeLabelExpression();
-    Resource delta = request.getDeltaCapacity();
-    appResourceUsage.decPending(partition, delta);
-    queue.decPendingResource(partition, delta);
-    
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("remove increase request:" + request);
+      if (request == null) {
+        return false;
+      }
+
+      // update queue's pending resource if request exists
+      String partition = request.getRMContainer().getNodeLabelExpression();
+      Resource delta = request.getDeltaCapacity();
+      appResourceUsage.decPending(partition, delta);
+      queue.decPendingResource(partition, delta);
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("remove increase request:" + request);
+      }
+
+      return true;
+    } finally {
+      this.writeLock.unlock();
     }
-    
-    return true;
   }
   
   public SchedContainerChangeRequest getIncreaseRequest(NodeId nodeId,
       SchedulerRequestKey schedulerKey, ContainerId containerId) {
-    Map<SchedulerRequestKey, Map<ContainerId, SchedContainerChangeRequest>>
-        requestsOnNode = containerIncreaseRequestMap.get(nodeId);
-    if (null == requestsOnNode) {
-      return null;
-    }
+    try {
+      this.readLock.lock();
+      Map<SchedulerRequestKey, Map<ContainerId, SchedContainerChangeRequest>>
+          requestsOnNode = containerIncreaseRequestMap.get(nodeId);
+      if (null == requestsOnNode) {
+        return null;
+      }
 
-    Map<ContainerId, SchedContainerChangeRequest> requestsOnNodeWithPriority =
-        requestsOnNode.get(schedulerKey);
-    return requestsOnNodeWithPriority == null ? null
-        : requestsOnNodeWithPriority.get(containerId);
+      Map<ContainerId, SchedContainerChangeRequest> requestsOnNodeWithPriority =
+          requestsOnNode.get(schedulerKey);
+      return requestsOnNodeWithPriority == null ? null
+          : requestsOnNodeWithPriority.get(containerId);
+    } finally {
+      this.readLock.unlock();
+    }
   }
 
   /**
@@ -343,49 +382,54 @@ public class AppSchedulingInfo {
    *          recover ResourceRequest on preemption
    * @return true if any resource was updated, false otherwise
    */
-  public synchronized boolean updateResourceRequests(
-      List<ResourceRequest> requests,
+  public boolean updateResourceRequests(List<ResourceRequest> requests,
       boolean recoverPreemptedRequestForAContainer) {
     // Flag to track if any incoming requests update "ANY" requests
     boolean anyResourcesUpdated = false;
 
-    // Update resource requests
-    for (ResourceRequest request : requests) {
-      SchedulerRequestKey schedulerKey = SchedulerRequestKey.create(request);
-      String resourceName = request.getResourceName();
-
-      // Update node labels if required
-      updateNodeLabels(request);
-
-      Map<String, ResourceRequest> asks =
-          this.resourceRequestMap.get(schedulerKey);
-      if (asks == null) {
-        asks = new ConcurrentHashMap<>();
-        this.resourceRequestMap.put(schedulerKey, asks);
-      }
+    try {
+      this.writeLock.lock();
+      // Update resource requests
+      for (ResourceRequest request : requests) {
+        SchedulerRequestKey schedulerKey = SchedulerRequestKey.create(request);
+        String resourceName = request.getResourceName();
+
+        // Update node labels if required
+        updateNodeLabels(request);
+
+        Map<String, ResourceRequest> asks =
+            this.resourceRequestMap.get(schedulerKey);
+        if (asks == null) {
+          asks = new ConcurrentHashMap<>();
+          this.resourceRequestMap.put(schedulerKey, asks);
+        }
 
-      // Increment number of containers if recovering preempted resources
-      ResourceRequest lastRequest = asks.get(resourceName);
-      if (recoverPreemptedRequestForAContainer && lastRequest != null) {
-        request.setNumContainers(lastRequest.getNumContainers() + 1);
-      }
+        // Increment number of containers if recovering preempted resources
+        ResourceRequest lastRequest = asks.get(resourceName);
+        if (recoverPreemptedRequestForAContainer && lastRequest != null) {
+          request.setNumContainers(lastRequest.getNumContainers() + 1);
+        }
 
-      // Update asks
-      asks.put(resourceName, request);
+        // Update asks
+        asks.put(resourceName, request);
 
-      if (resourceName.equals(ResourceRequest.ANY)) {
-        //update the applications requested labels set
-        requestedPartitions.add(request.getNodeLabelExpression() == null
-            ? RMNodeLabelsManager.NO_LABEL : request.getNodeLabelExpression());
+        if (resourceName.equals(ResourceRequest.ANY)) {
+          //update the applications requested labels set
+          requestedPartitions.add(request.getNodeLabelExpression() == null
+              ? RMNodeLabelsManager.NO_LABEL :
+                  request.getNodeLabelExpression());
 
-        anyResourcesUpdated = true;
+          anyResourcesUpdated = true;
 
-        // Update pendingResources
-        updatePendingResources(lastRequest, request, schedulerKey,
-            queue.getMetrics());
+          // Update pendingResources
+          updatePendingResources(lastRequest, request, schedulerKey,
+              queue.getMetrics());
+        }
       }
+      return anyResourcesUpdated;
+    } finally {
+      this.writeLock.unlock();
     }
-    return anyResourcesUpdated;
   }
 
   private void updatePendingResources(ResourceRequest lastRequest,
@@ -529,34 +573,49 @@ public class AppSchedulingInfo {
     return userBlacklistChanged.getAndSet(false);
   }
 
-  public synchronized Collection<SchedulerRequestKey> getSchedulerKeys() {
+  public Collection<SchedulerRequestKey> getSchedulerKeys() {
     return schedulerKeys.keySet();
   }
 
-  public synchronized Map<String, ResourceRequest> getResourceRequests(
+  public Map<String, ResourceRequest> getResourceRequests(
       SchedulerRequestKey schedulerKey) {
     return resourceRequestMap.get(schedulerKey);
   }
 
-  public synchronized List<ResourceRequest> getAllResourceRequests() {
+  public List<ResourceRequest> getAllResourceRequests() {
     List<ResourceRequest> ret = new ArrayList<>();
-    for (Map<String, ResourceRequest> r : resourceRequestMap.values()) {
-      ret.addAll(r.values());
+    try {
+      this.readLock.lock();
+      for (Map<String, ResourceRequest> r : resourceRequestMap.values()) {
+        ret.addAll(r.values());
+      }
+    } finally {
+      this.readLock.unlock();
     }
     return ret;
   }
 
-  public synchronized ResourceRequest getResourceRequest(
-      SchedulerRequestKey schedulerKey, String resourceName) {
-    Map<String, ResourceRequest> nodeRequests =
-        resourceRequestMap.get(schedulerKey);
-    return (nodeRequests == null) ? null : nodeRequests.get(resourceName);
+  public ResourceRequest getResourceRequest(SchedulerRequestKey schedulerKey,
+      String resourceName) {
+    try {
+      this.readLock.lock();
+      Map<String, ResourceRequest> nodeRequests =
+          resourceRequestMap.get(schedulerKey);
+      return (nodeRequests == null) ? null : nodeRequests.get(resourceName);
+    } finally {
+      this.readLock.unlock();
+    }
   }
 
-  public synchronized Resource getResource(SchedulerRequestKey schedulerKey) {
-    ResourceRequest request =
-        getResourceRequest(schedulerKey, ResourceRequest.ANY);
-    return (request == null) ? null : request.getCapability();
+  public Resource getResource(SchedulerRequestKey schedulerKey) {
+    try {
+      this.readLock.lock();
+      ResourceRequest request =
+          getResourceRequest(schedulerKey, ResourceRequest.ANY);
+      return (request == null) ? null : request.getCapability();
+    } finally {
+      this.readLock.unlock();
+    }
   }
 
   /**
@@ -582,8 +641,7 @@ public class AppSchedulingInfo {
     }
   }
 
-  public synchronized void increaseContainer(
-      SchedContainerChangeRequest increaseRequest) {
+  public void increaseContainer(SchedContainerChangeRequest increaseRequest) {
     NodeId nodeId = increaseRequest.getNodeId();
     SchedulerRequestKey schedulerKey =
         increaseRequest.getRMContainer().getAllocatedSchedulerKey();
@@ -596,16 +654,21 @@ public class AppSchedulingInfo {
           + increaseRequest.getNodeId() + " user=" + user + " resource="
           + deltaCapacity);
     }
-    // Set queue metrics
-    queue.getMetrics().allocateResources(user, deltaCapacity);
-    // remove the increase request from pending increase request map
-    removeIncreaseRequest(nodeId, schedulerKey, containerId);
-    // update usage
-    appResourceUsage.incUsed(increaseRequest.getNodePartition(), deltaCapacity);
+    try {
+      this.writeLock.lock();
+      // Set queue metrics
+      queue.getMetrics().allocateResources(user, deltaCapacity);
+      // remove the increase request from pending increase request map
+      removeIncreaseRequest(nodeId, schedulerKey, containerId);
+      // update usage
+      appResourceUsage.incUsed(increaseRequest.getNodePartition(),
+          deltaCapacity);
+    } finally {
+      this.writeLock.unlock();
+    }
   }
   
-  public synchronized void decreaseContainer(
-      SchedContainerChangeRequest decreaseRequest) {
+  public void decreaseContainer(SchedContainerChangeRequest decreaseRequest) {
     // Delta is negative when it's a decrease request
     Resource absDelta = Resources.negate(decreaseRequest.getDeltaCapacity());
 
@@ -615,12 +678,17 @@ public class AppSchedulingInfo {
           + decreaseRequest.getNodeId() + " user=" + user + " resource="
           + absDelta);
     }
-    
-    // Set queue metrics
-    queue.getMetrics().releaseResources(user, absDelta);
 
-    // update usage
-    appResourceUsage.decUsed(decreaseRequest.getNodePartition(), absDelta);
+    try {
+      this.writeLock.lock();
+      // Set queue metrics
+      queue.getMetrics().releaseResources(user, absDelta);
+
+      // update usage
+      appResourceUsage.decUsed(decreaseRequest.getNodePartition(), absDelta);
+    } finally {
+      this.writeLock.unlock();
+    }
   }
 
   /**
@@ -633,43 +701,48 @@ public class AppSchedulingInfo {
    * @param containerAllocated Container Allocated
    * @return List of ResourceRequests
    */
-  public synchronized List<ResourceRequest> allocate(NodeType type,
-      SchedulerNode node, SchedulerRequestKey schedulerKey,
-      ResourceRequest request, Container containerAllocated) {
+  public List<ResourceRequest> allocate(NodeType type, SchedulerNode node,
+      SchedulerRequestKey schedulerKey, ResourceRequest request,
+      Container containerAllocated) {
     List<ResourceRequest> resourceRequests = new ArrayList<>();
-    if (type == NodeType.NODE_LOCAL) {
-      allocateNodeLocal(node, schedulerKey, request, resourceRequests);
-    } else if (type == NodeType.RACK_LOCAL) {
-      allocateRackLocal(node, schedulerKey, request, resourceRequests);
-    } else {
-      allocateOffSwitch(request, resourceRequests, schedulerKey);
-    }
-    QueueMetrics metrics = queue.getMetrics();
-    if (pending) {
-      // once an allocation is done we assume the application is
-      // running from scheduler's POV.
-      pending = false;
-      metrics.runAppAttempt(applicationId, user);
-    }
+    try {
+      this.writeLock.lock();
+      if (type == NodeType.NODE_LOCAL) {
+        allocateNodeLocal(node, schedulerKey, request, resourceRequests);
+      } else if (type == NodeType.RACK_LOCAL) {
+        allocateRackLocal(node, schedulerKey, request, resourceRequests);
+      } else {
+        allocateOffSwitch(request, resourceRequests, schedulerKey);
+      }
+      QueueMetrics metrics = queue.getMetrics();
+      if (pending) {
+        // once an allocation is done we assume the application is
+        // running from scheduler's POV.
+        pending = false;
+        metrics.runAppAttempt(applicationId, user);
+      }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("allocate: applicationId=" + applicationId
-          + " container=" + containerAllocated.getId()
-          + " host=" + containerAllocated.getNodeId().toString()
-          + " user=" + user
-          + " resource=" + request.getCapability()
-          + " type=" + type);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("allocate: applicationId=" + applicationId
+            + " container=" + containerAllocated.getId()
+            + " host=" + containerAllocated.getNodeId().toString()
+            + " user=" + user
+            + " resource=" + request.getCapability()
+            + " type=" + type);
+      }
+      metrics.allocateResources(user, 1, request.getCapability(), true);
+      metrics.incrNodeTypeAggregations(user, type);
+      return resourceRequests;
+    } finally {
+      this.writeLock.unlock();
     }
-    metrics.allocateResources(user, 1, request.getCapability(), true);
-    metrics.incrNodeTypeAggregations(user, type);
-    return resourceRequests;
   }
 
   /**
    * The {@link ResourceScheduler} is allocating data-local resources to the
    * application.
    */
-  private synchronized void allocateNodeLocal(SchedulerNode node,
+  private void allocateNodeLocal(SchedulerNode node,
       SchedulerRequestKey schedulerKey, ResourceRequest nodeLocalRequest,
       List<ResourceRequest> resourceRequests) {
     // Update future requirements
@@ -701,7 +774,7 @@ public class AppSchedulingInfo {
    * The {@link ResourceScheduler} is allocating data-local resources to the
    * application.
    */
-  private synchronized void allocateRackLocal(SchedulerNode node,
+  private void allocateRackLocal(SchedulerNode node,
       SchedulerRequestKey schedulerKey, ResourceRequest rackLocalRequest,
       List<ResourceRequest> resourceRequests) {
     // Update future requirements
@@ -720,8 +793,8 @@ public class AppSchedulingInfo {
    * The {@link ResourceScheduler} is allocating data-local resources to the
    * application.
    */
-  private synchronized void allocateOffSwitch(
-      ResourceRequest offSwitchRequest, List<ResourceRequest> resourceRequests,
+  private void allocateOffSwitch(ResourceRequest offSwitchRequest,
+      List<ResourceRequest> resourceRequests,
       SchedulerRequestKey schedulerKey) {
     // Update future requirements
     decrementOutstanding(offSwitchRequest, schedulerKey);
@@ -729,8 +802,8 @@ public class AppSchedulingInfo {
     resourceRequests.add(cloneResourceRequest(offSwitchRequest));
   }
 
-  private synchronized void decrementOutstanding(
-      ResourceRequest offSwitchRequest, SchedulerRequestKey schedulerKey) {
+  private void decrementOutstanding(ResourceRequest offSwitchRequest,
+      SchedulerRequestKey schedulerKey) {
     int numOffSwitchContainers = offSwitchRequest.getNumContainers() - 1;
 
     // Do not remove ANY
@@ -748,66 +821,81 @@ public class AppSchedulingInfo {
     queue.decPendingResource(offSwitchRequest.getNodeLabelExpression(),
         offSwitchRequest.getCapability());
   }
-  
-  private synchronized void checkForDeactivation() {
+
+  private void checkForDeactivation() {
     if (schedulerKeys.isEmpty()) {
       activeUsersManager.deactivateApplication(user, applicationId);
     }
   }
   
-  public synchronized void move(Queue newQueue) {
-    QueueMetrics oldMetrics = queue.getMetrics();
-    QueueMetrics newMetrics = newQueue.getMetrics();
-    for (Map<String, ResourceRequest> asks : resourceRequestMap.values()) {
-      ResourceRequest request = asks.get(ResourceRequest.ANY);
-      if (request != null) {
-        oldMetrics.decrPendingResources(user, request.getNumContainers(),
-            request.getCapability());
-        newMetrics.incrPendingResources(user, request.getNumContainers(),
-            request.getCapability());
-        
-        Resource delta = Resources.multiply(request.getCapability(),
-            request.getNumContainers()); 
-        // Update Queue
-        queue.decPendingResource(request.getNodeLabelExpression(), delta);
-        newQueue.incPendingResource(request.getNodeLabelExpression(), delta);
+  public void move(Queue newQueue) {
+    try {
+      this.writeLock.lock();
+      QueueMetrics oldMetrics = queue.getMetrics();
+      QueueMetrics newMetrics = newQueue.getMetrics();
+      for (Map<String, ResourceRequest> asks : resourceRequestMap.values()) {
+        ResourceRequest request = asks.get(ResourceRequest.ANY);
+        if (request != null) {
+          oldMetrics.decrPendingResources(user, request.getNumContainers(),
+              request.getCapability());
+          newMetrics.incrPendingResources(user, request.getNumContainers(),
+              request.getCapability());
+
+          Resource delta = Resources.multiply(request.getCapability(),
+              request.getNumContainers());
+          // Update Queue
+          queue.decPendingResource(request.getNodeLabelExpression(), delta);
+          newQueue.incPendingResource(request.getNodeLabelExpression(), delta);
+        }
       }
+      oldMetrics.moveAppFrom(this);
+      newMetrics.moveAppTo(this);
+      activeUsersManager.deactivateApplication(user, applicationId);
+      activeUsersManager = newQueue.getActiveUsersManager();
+      activeUsersManager.activateApplication(user, applicationId);
+      this.queue = newQueue;
+    } finally {
+      this.writeLock.unlock();
     }
-    oldMetrics.moveAppFrom(this);
-    newMetrics.moveAppTo(this);
-    activeUsersManager.deactivateApplication(user, applicationId);
-    activeUsersManager = newQueue.getActiveUsersManager();
-    activeUsersManager.activateApplication(user, applicationId);
-    this.queue = newQueue;
   }
 
-  public synchronized void stop() {
+  public void stop() {
     // clear pending resources metrics for the application
-    QueueMetrics metrics = queue.getMetrics();
-    for (Map<String, ResourceRequest> asks : resourceRequestMap.values()) {
-      ResourceRequest request = asks.get(ResourceRequest.ANY);
-      if (request != null) {
-        metrics.decrPendingResources(user, request.getNumContainers(),
-            request.getCapability());
-        
-        // Update Queue
-        queue.decPendingResource(
-            request.getNodeLabelExpression(),
-            Resources.multiply(request.getCapability(),
-                request.getNumContainers()));
+    try {
+      this.writeLock.lock();
+      QueueMetrics metrics = queue.getMetrics();
+      for (Map<String, ResourceRequest> asks : resourceRequestMap.values()) {
+        ResourceRequest request = asks.get(ResourceRequest.ANY);
+        if (request != null) {
+          metrics.decrPendingResources(user, request.getNumContainers(),
+              request.getCapability());
+
+          // Update Queue
+          queue.decPendingResource(
+              request.getNodeLabelExpression(),
+              Resources.multiply(request.getCapability(),
+                  request.getNumContainers()));
+        }
       }
+      metrics.finishAppAttempt(applicationId, pending, user);
+
+      // Clear requests themselves
+      clearRequests();
+    } finally {
+      this.writeLock.unlock();
     }
-    metrics.finishAppAttempt(applicationId, pending, user);
-    
-    // Clear requests themselves
-    clearRequests();
   }
 
-  public synchronized void setQueue(Queue queue) {
-    this.queue = queue;
+  public void setQueue(Queue queue) {
+    try {
+      this.writeLock.lock();
+      this.queue = queue;
+    } finally {
+      this.writeLock.unlock();
+    }
   }
 
-  public Set<String> getBlackList() {
+  private Set<String> getBlackList() {
     return this.placesBlacklistedByApp;
   }
 
@@ -817,31 +905,36 @@ public class AppSchedulingInfo {
     }
   }
 
-  public synchronized void transferStateFromPreviousAppSchedulingInfo(
+  public void transferStateFromPreviousAppSchedulingInfo(
       AppSchedulingInfo appInfo) {
-    // This should not require locking the userBlacklist since it will not be
-    // used by this instance until after setCurrentAppAttempt.
+    // This should not require locking the placesBlacklistedByApp since it will
+    // not be used by this instance until after setCurrentAppAttempt.
     this.placesBlacklistedByApp = appInfo.getBlackList();
   }
 
-  public synchronized void recoverContainer(RMContainer rmContainer) {
-    QueueMetrics metrics = queue.getMetrics();
-    if (pending) {
-      // If there was any container to recover, the application was
-      // running from scheduler's POV.
-      pending = false;
-      metrics.runAppAttempt(applicationId, user);
-    }
+  public void recoverContainer(RMContainer rmContainer) {
+    try {
+      this.writeLock.lock();
+      QueueMetrics metrics = queue.getMetrics();
+      if (pending) {
+        // If there was any container to recover, the application was
+        // running from scheduler's POV.
+        pending = false;
+        metrics.runAppAttempt(applicationId, user);
+      }
 
-    // Container is completed. Skip recovering resources.
-    if (rmContainer.getState().equals(RMContainerState.COMPLETED)) {
-      return;
-    }
+      // Container is completed. Skip recovering resources.
+      if (rmContainer.getState().equals(RMContainerState.COMPLETED)) {
+        return;
+      }
 
-    metrics.allocateResources(user, 1, rmContainer.getAllocatedResource(),
-      false);
+      metrics.allocateResources(user, 1, rmContainer.getAllocatedResource(),
+          false);
+    } finally {
+      this.writeLock.unlock();
+    }
   }
-  
+
   public ResourceRequest cloneResourceRequest(ResourceRequest request) {
     ResourceRequest newRequest =
         ResourceRequest.newInstance(request.getPriority(),


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/57] [abbrv] hadoop git commit: YARN-4205. Add a service for monitoring application life time out. Contributed by Rohith Sharma K S

Posted by in...@apache.org.
YARN-4205. Add a service for monitoring application life time out. Contributed by Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ae5a3a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ae5a3a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ae5a3a5

Branch: refs/heads/HDFS-10467
Commit: 2ae5a3a5bf5ea355370469a53eeccff0b5220081
Parents: 1518cb9
Author: Jian He <ji...@apache.org>
Authored: Thu Sep 29 22:00:31 2016 +0800
Committer: Jian He <ji...@apache.org>
Committed: Thu Sep 29 22:00:31 2016 +0800

----------------------------------------------------------------------
 .../records/ApplicationSubmissionContext.java   |  21 +++
 .../api/records/ApplicationTimeoutType.java     |  41 +++++
 .../hadoop/yarn/conf/YarnConfiguration.java     |   6 +
 .../src/main/proto/yarn_protos.proto            |  10 ++
 .../pb/ApplicationSubmissionContextPBImpl.java  |  83 ++++++++++
 .../yarn/api/records/impl/pb/ProtoUtils.java    |  19 +++
 .../yarn/util/AbstractLivelinessMonitor.java    |  32 ++--
 .../src/main/resources/yarn-default.xml         |   9 +
 .../hadoop/yarn/api/TestPBImplRecords.java      |   2 +-
 .../resourcemanager/RMActiveServiceContext.java |  16 ++
 .../server/resourcemanager/RMAppManager.java    |   4 +
 .../yarn/server/resourcemanager/RMContext.java  |   5 +
 .../server/resourcemanager/RMContextImpl.java   |  12 ++
 .../server/resourcemanager/RMServerUtils.java   |  16 ++
 .../server/resourcemanager/ResourceManager.java |   9 +
 .../server/resourcemanager/rmapp/RMAppImpl.java |  47 +++++-
 .../rmapp/monitor/RMAppLifetimeMonitor.java     | 130 +++++++++++++++
 .../rmapp/monitor/RMAppToMonitor.java           |  77 +++++++++
 .../rmapp/monitor/package-info.java             |  28 ++++
 .../yarn/server/resourcemanager/MockRM.java     |  22 ++-
 .../rmapp/TestApplicationLifetimeMonitor.java   | 165 +++++++++++++++++++
 21 files changed, 738 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java
index 21cd1bb..83f601a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.api.records;
 
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
@@ -535,4 +536,24 @@ public abstract class ApplicationSubmissionContext {
   @Public
   @Unstable
   public abstract void setReservationID(ReservationId reservationID);
+
+  /**
+   * Get <code>ApplicationTimeouts</code> of the application. Timeout value is
+   * in seconds.
+   * @return all <code>ApplicationTimeouts</code> of the application.
+   */
+  @Public
+  @Unstable
+  public abstract Map<ApplicationTimeoutType, Long> getApplicationTimeouts();
+
+  /**
+   * Set the <code>ApplicationTimeouts</code> for the application in seconds.
+   * All pre-existing Map entries are cleared before adding the new Map.
+   * @param applicationTimeouts <code>ApplicationTimeouts</code>s for the
+   *          application
+   */
+  @Public
+  @Unstable
+  public abstract void setApplicationTimeouts(
+      Map<ApplicationTimeoutType, Long> applicationTimeouts);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationTimeoutType.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationTimeoutType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationTimeoutType.java
new file mode 100644
index 0000000..edde1b0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationTimeoutType.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+/**
+ * Application timeout type.
+ */
+@Public
+@Unstable
+public enum ApplicationTimeoutType {
+
+  /**
+   * <p>
+   * Timeout imposed on overall application life time. It includes actual
+   * run-time plus non-runtime. Non-runtime delays are time elapsed by scheduler
+   * to allocate container, time taken to store in RMStateStore and etc.
+   * </p>
+   * If this is set, then timeout monitoring start from application submission
+   * time.
+   */
+  LIFETIME;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 1421873..4d43357 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1533,6 +1533,12 @@ public class YarnConfiguration extends Configuration {
       false;
 
 
+  // Configurations for applicaiton life time monitor feature
+  public static final String RM_APPLICATION_LIFETIME_MONITOR_INTERVAL_MS =
+      RM_PREFIX + "application-timeouts.lifetime-monitor.interval-ms";
+
+  public static final long DEFAULT_RM_APPLICATION_LIFETIME_MONITOR_INTERVAL_MS =
+      60000;
 
   /**
    * Interval of time the linux container executor should try cleaning up

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 2d6007e..f788295 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -365,6 +365,16 @@ message ApplicationSubmissionContextProto {
   optional ReservationIdProto reservation_id = 15;
   optional string node_label_expression = 16;
   optional ResourceRequestProto am_container_resource_request = 17;
+  repeated ApplicationTimeoutMapProto application_timeouts = 18;
+}
+
+enum ApplicationTimeoutTypeProto {
+  APP_TIMEOUT_LIFETIME = 1;
+}
+
+message ApplicationTimeoutMapProto {
+  optional ApplicationTimeoutTypeProto application_timeout_type = 1;
+  optional int64 timeout = 2;
 }
 
 message LogAggregationContextProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java
index 67e3a84..62b54e7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java
@@ -18,7 +18,11 @@
 
 package org.apache.hadoop.yarn.api.records.impl.pb;
 
+import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -26,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.LogAggregationContext;
 import org.apache.hadoop.yarn.api.records.Priority;
@@ -36,6 +41,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationTimeoutMapProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerLaunchContextProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.LogAggregationContextProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
@@ -63,6 +69,7 @@ extends ApplicationSubmissionContext {
   private ResourceRequest amResourceRequest = null;
   private LogAggregationContext logAggregationContext = null;
   private ReservationId reservationId = null;
+  private Map<ApplicationTimeoutType, Long> applicationTimeouts = null;
 
   public ApplicationSubmissionContextPBImpl() {
     builder = ApplicationSubmissionContextProto.newBuilder();
@@ -131,6 +138,9 @@ extends ApplicationSubmissionContext {
     if (this.reservationId != null) {
       builder.setReservationId(convertToProtoFormat(this.reservationId));
     }
+    if (this.applicationTimeouts != null) {
+      addApplicationTimeouts();
+    }
   }
 
   private void mergeLocalToProto() {
@@ -548,4 +558,77 @@ extends ApplicationSubmissionContext {
   private ReservationIdProto convertToProtoFormat(ReservationId t) {
     return ((ReservationIdPBImpl) t).getProto();
   }
+
+  @Override
+  public Map<ApplicationTimeoutType, Long> getApplicationTimeouts() {
+    initApplicationTimeout();
+    return this.applicationTimeouts;
+  }
+
+  private void initApplicationTimeout() {
+    if (this.applicationTimeouts != null) {
+      return;
+    }
+    ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
+    List<ApplicationTimeoutMapProto> lists = p.getApplicationTimeoutsList();
+    this.applicationTimeouts =
+        new HashMap<ApplicationTimeoutType, Long>(lists.size());
+    for (ApplicationTimeoutMapProto timeoutProto : lists) {
+      this.applicationTimeouts.put(
+          ProtoUtils
+              .convertFromProtoFormat(timeoutProto.getApplicationTimeoutType()),
+          timeoutProto.getTimeout());
+    }
+  }
+
+  @Override
+  public void setApplicationTimeouts(
+      Map<ApplicationTimeoutType, Long> appTimeouts) {
+    if (appTimeouts == null) {
+      return;
+    }
+    initApplicationTimeout();
+    this.applicationTimeouts.clear();
+    this.applicationTimeouts.putAll(appTimeouts);
+  }
+
+  private void addApplicationTimeouts() {
+    maybeInitBuilder();
+    builder.clearApplicationTimeouts();
+    if (applicationTimeouts == null) {
+      return;
+    }
+    Iterable<? extends ApplicationTimeoutMapProto> values =
+        new Iterable<ApplicationTimeoutMapProto>() {
+
+          @Override
+          public Iterator<ApplicationTimeoutMapProto> iterator() {
+            return new Iterator<ApplicationTimeoutMapProto>() {
+              private Iterator<ApplicationTimeoutType> iterator =
+                  applicationTimeouts.keySet().iterator();
+
+              @Override
+              public boolean hasNext() {
+                return iterator.hasNext();
+              }
+
+              @Override
+              public ApplicationTimeoutMapProto next() {
+                ApplicationTimeoutType key = iterator.next();
+                return ApplicationTimeoutMapProto.newBuilder()
+                    .setTimeout(applicationTimeouts.get(key))
+                    .setApplicationTimeoutType(
+                        ProtoUtils.convertToProtoFormat(key))
+                    .build();
+              }
+
+              @Override
+              public void remove() {
+                throw new UnsupportedOperationException();
+              }
+            };
+          }
+        };
+    this.builder.addAllApplicationTimeouts(values);
+  }
 }  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
index 128120e..ab283e7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope;
 import org.apache.hadoop.yarn.api.records.AMCommand;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
+import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerRetryPolicy;
@@ -51,6 +52,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos;
 import org.apache.hadoop.yarn.proto.YarnProtos.AMCommandProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAccessTypeProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationTimeoutTypeProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto;
@@ -259,6 +261,23 @@ public class ProtoUtils {
     return ApplicationAccessType.valueOf(e.name().replace(
         APP_ACCESS_TYPE_PREFIX, ""));
   }
+
+  /*
+   * ApplicationTimeoutType
+   */
+  private static String APP_TIMEOUT_TYPE_PREFIX = "APP_TIMEOUT_";
+
+  public static ApplicationTimeoutTypeProto convertToProtoFormat(
+      ApplicationTimeoutType e) {
+    return ApplicationTimeoutTypeProto
+        .valueOf(APP_TIMEOUT_TYPE_PREFIX + e.name());
+  }
+
+  public static ApplicationTimeoutType convertFromProtoFormat(
+      ApplicationTimeoutTypeProto e) {
+    return ApplicationTimeoutType
+        .valueOf(e.name().replace(APP_TIMEOUT_TYPE_PREFIX, ""));
+  }
   
   /*
    * Reservation Request interpreter type

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java
index e80d032..b605026 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java
@@ -44,8 +44,8 @@ public abstract class AbstractLivelinessMonitor<O> extends AbstractService {
   private Thread checkerThread;
   private volatile boolean stopped;
   public static final int DEFAULT_EXPIRE = 5*60*1000;//5 mins
-  private int expireInterval = DEFAULT_EXPIRE;
-  private int monitorInterval = expireInterval/3;
+  private long expireInterval = DEFAULT_EXPIRE;
+  private long monitorInterval = expireInterval / 3;
 
   private final Clock clock;
 
@@ -85,7 +85,12 @@ public abstract class AbstractLivelinessMonitor<O> extends AbstractService {
     this.expireInterval = expireInterval;
   }
 
-  protected void setMonitorInterval(int monitorInterval) {
+  protected long getExpireInterval(O o) {
+    // by-default return for all the registered object interval.
+    return this.expireInterval;
+  }
+
+  protected void setMonitorInterval(long monitorInterval) {
     this.monitorInterval = monitorInterval;
   }
 
@@ -97,7 +102,11 @@ public abstract class AbstractLivelinessMonitor<O> extends AbstractService {
   }
 
   public synchronized void register(O ob) {
-    running.put(ob, clock.getTime());
+    register(ob, clock.getTime());
+  }
+
+  public synchronized void register(O ob, long monitorStartTime) {
+    running.put(ob, monitorStartTime);
   }
 
   public synchronized void unregister(O ob) {
@@ -117,19 +126,20 @@ public abstract class AbstractLivelinessMonitor<O> extends AbstractService {
     public void run() {
       while (!stopped && !Thread.currentThread().isInterrupted()) {
         synchronized (AbstractLivelinessMonitor.this) {
-          Iterator<Map.Entry<O, Long>> iterator = 
-            running.entrySet().iterator();
+          Iterator<Map.Entry<O, Long>> iterator = running.entrySet().iterator();
 
-          //avoid calculating current time everytime in loop
+          // avoid calculating current time everytime in loop
           long currentTime = clock.getTime();
 
           while (iterator.hasNext()) {
             Map.Entry<O, Long> entry = iterator.next();
-            if (currentTime > entry.getValue() + expireInterval) {
+            O key = entry.getKey();
+            long interval = getExpireInterval(key);
+            if (currentTime > entry.getValue() + interval) {
               iterator.remove();
-              expire(entry.getKey());
-              LOG.info("Expired:" + entry.getKey().toString() + 
-                      " Timed out after " + expireInterval/1000 + " secs");
+              expire(key);
+              LOG.info("Expired:" + entry.getKey().toString()
+                  + " Timed out after " + interval / 1000 + " secs");
             }
           }
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 965b575..524afec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3075,4 +3075,13 @@
     <name>yarn.resourcemanager.node-removal-untracked.timeout-ms</name>
     <value>60000</value>
   </property>
+
+  <property>
+    <description>
+    The RMAppLifetimeMonitor Service uses this value as lifetime monitor interval
+    </description>
+    <name>yarn.resourcemanager.application-timeouts.lifetime-monitor.interval-ms</name>
+    <value>60000</value>
+  </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
index e57a5a2..5270486 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
@@ -368,7 +368,7 @@ public class TestPBImplRecords {
       return bytes[rand.nextInt(4)];
     } else if (type.equals(int.class) || type.equals(Integer.class)) {
       return rand.nextInt(1000000);
-    } else if (type.equals(long.class)) {
+    } else if (type.equals(long.class) || type.equals(Long.class)) {
       return Long.valueOf(rand.nextInt(1000000));
     } else if (type.equals(float.class)) {
       return rand.nextFloat();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
index caa0ff13..0e305a9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.monitor.RMAppLifetimeMonitor;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
@@ -105,6 +106,8 @@ public class RMActiveServiceContext {
   private boolean isSchedulerReady = false;
   private PlacementManager queuePlacementManager = null;
 
+  private RMAppLifetimeMonitor rmAppLifetimeMonitor;
+
   public RMActiveServiceContext() {
     queuePlacementManager = new PlacementManager();
   }
@@ -467,4 +470,17 @@ public class RMActiveServiceContext {
   public void setQueuePlacementManager(PlacementManager placementMgr) {
     this.queuePlacementManager = placementMgr;
   }
+
+  @Private
+  @Unstable
+  public void setRMAppLifetimeMonitor(
+      RMAppLifetimeMonitor lifetimeMonitor) {
+    this.rmAppLifetimeMonitor = lifetimeMonitor;
+  }
+
+  @Private
+  @Unstable
+  public RMAppLifetimeMonitor getRMAppLifetimeMonitor() {
+    return this.rmAppLifetimeMonitor;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 136dee0..7352a28 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -385,6 +385,10 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
       }
     }
 
+    // fail the submission if configured application timeout value is invalid
+    RMServerUtils.validateApplicationTimeouts(
+        submissionContext.getApplicationTimeouts());
+
     // Create RMApp
     RMAppImpl application =
         new RMAppImpl(applicationId, rmContext, this.conf,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
index 2ba445c..c9d185f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.monitor.RMAppLifetimeMonitor;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
@@ -149,4 +150,8 @@ public interface RMContext {
   LeaderElectorService getLeaderElectorService();
 
   QueueLimitCalculator getNodeManagerQueueLimitCalculator();
+
+  void setRMAppLifetimeMonitor(RMAppLifetimeMonitor rmAppLifetimeMonitor);
+
+  RMAppLifetimeMonitor getRMAppLifetimeMonitor();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
index 1e702de..dc8f7d1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.monitor.RMAppLifetimeMonitor;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
@@ -499,4 +500,15 @@ public class RMContextImpl implements RMContext {
       QueueLimitCalculator limitCalculator) {
     this.queueLimitCalculator = limitCalculator;
   }
+
+  @Override
+  public void setRMAppLifetimeMonitor(
+      RMAppLifetimeMonitor rmAppLifetimeMonitor) {
+    this.activeServiceContext.setRMAppLifetimeMonitor(rmAppLifetimeMonitor);
+  }
+
+  @Override
+  public RMAppLifetimeMonitor getRMAppLifetimeMonitor() {
+    return this.activeServiceContext.getRMAppLifetimeMonitor();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
index 7fcabab..b90e499 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
+import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
@@ -51,6 +52,7 @@ import org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException;
 import org.apache.hadoop.yarn.exceptions
     .InvalidResourceBlacklistRequestException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
+import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
@@ -470,4 +472,18 @@ public class RMServerUtils {
       conf.set(entry.getKey(), entry.getValue());
     }
   }
+
+  public static void validateApplicationTimeouts(
+      Map<ApplicationTimeoutType, Long> timeouts) throws YarnException {
+    if (timeouts != null) {
+      for (Map.Entry<ApplicationTimeoutType, Long> timeout : timeouts
+          .entrySet()) {
+        if (timeout.getValue() < 0) {
+          String message = "Invalid application timeout, value="
+              + timeout.getValue() + " for type=" + timeout.getKey();
+          throw new YarnException(message);
+        }
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 8a6997d..5e9bece 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -96,6 +96,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessM
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.monitor.RMAppLifetimeMonitor;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent;
@@ -556,6 +557,10 @@ public class ResourceManager extends CompositeService implements Recoverable {
       addService(amFinishingMonitor);
       rmContext.setAMFinishingMonitor(amFinishingMonitor);
       
+      RMAppLifetimeMonitor rmAppLifetimeMonitor = createRMAppLifetimeMonitor();
+      addService(rmAppLifetimeMonitor);
+      rmContext.setRMAppLifetimeMonitor(rmAppLifetimeMonitor);
+
       RMNodeLabelsManager nlm = createNodeLabelManager();
       nlm.setRMContext(rmContext);
       addService(nlm);
@@ -1398,4 +1403,8 @@ public class ResourceManager extends CompositeService implements Recoverable {
     out.println("                            "
         + "[-remove-application-from-state-store <appId>]" + "\n");
   }
+
+  protected RMAppLifetimeMonitor createRMAppLifetimeMonitor() {
+    return new RMAppLifetimeMonitor(this.rmContext);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index e5bde32..727703b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -527,6 +528,8 @@ public class RMAppImpl implements RMApp, Recoverable {
             DEFAULT_AM_SCHEDULING_NODE_BLACKLISTING_DISABLE_THRESHOLD;
       }
     }
+
+
   }
 
   /**
@@ -1106,6 +1109,20 @@ public class RMAppImpl implements RMApp, Recoverable {
         }
       }
 
+      long applicationLifetime =
+          app.getApplicationLifetime(ApplicationTimeoutType.LIFETIME);
+      if (applicationLifetime > 0) {
+        app.rmContext.getRMAppLifetimeMonitor().registerApp(app.applicationId,
+            ApplicationTimeoutType.LIFETIME, app.submitTime,
+            applicationLifetime * 1000);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Application " + app.applicationId
+              + " is registered for timeout monitor, type="
+              + ApplicationTimeoutType.LIFETIME + " value="
+              + applicationLifetime + " seconds");
+        }
+      }
+
       // No existent attempts means the attempt associated with this app was not
       // started or started but not yet saved.
       if (app.attempts.isEmpty()) {
@@ -1152,6 +1169,13 @@ public class RMAppImpl implements RMApp, Recoverable {
 
     @Override
     public RMAppState transition(RMAppImpl app, RMAppEvent event) {
+      Map<ApplicationTimeoutType, Long> timeouts =
+          app.submissionContext.getApplicationTimeouts();
+      if (timeouts != null && timeouts.size() > 0) {
+        app.rmContext.getRMAppLifetimeMonitor()
+            .unregisterApp(app.getApplicationId(), timeouts.keySet());
+      }
+
       if (app.transitionTodo instanceof SingleArcTransition) {
         ((SingleArcTransition) app.transitionTodo).transition(app,
           app.eventCausingFinalSaving);
@@ -1160,7 +1184,6 @@ public class RMAppImpl implements RMApp, Recoverable {
           app.eventCausingFinalSaving);
       }
       return app.targetedFinalState;
-
     }
   }
 
@@ -1209,6 +1232,18 @@ public class RMAppImpl implements RMApp, Recoverable {
     @Override
     public void transition(RMAppImpl app, RMAppEvent event) {
 
+      long applicationLifetime =
+          app.getApplicationLifetime(ApplicationTimeoutType.LIFETIME);
+      if (applicationLifetime > 0) {
+        app.rmContext.getRMAppLifetimeMonitor().registerApp(app.applicationId,
+            ApplicationTimeoutType.LIFETIME, app.submitTime,
+            applicationLifetime * 1000);
+        LOG.debug("Application " + app.applicationId
+            + " is registered for timeout monitor, type="
+            + ApplicationTimeoutType.LIFETIME + " value=" + applicationLifetime
+            + " seconds");
+      }
+
       // If recovery is enabled then store the application information in a
       // non-blocking call so make sure that RM has stored the information
       // needed to restart the AM after RM restart without further client
@@ -1922,4 +1957,14 @@ public class RMAppImpl implements RMApp, Recoverable {
   public int getNextAttemptId() {
     return nextAttemptId;
   }
+
+  private long getApplicationLifetime(ApplicationTimeoutType type) {
+    Map<ApplicationTimeoutType, Long> timeouts =
+        this.submissionContext.getApplicationTimeouts();
+    long applicationLifetime = -1;
+    if (timeouts != null && timeouts.containsKey(type)) {
+      applicationLifetime = timeouts.get(type);
+    }
+    return applicationLifetime;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java
new file mode 100644
index 0000000..e550c97
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp.monitor;
+
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
+import org.apache.hadoop.yarn.util.AbstractLivelinessMonitor;
+import org.apache.hadoop.yarn.util.SystemClock;
+
+/**
+ * This service will monitor the applications against the lifetime value given.
+ * The applications will be killed if it running beyond the given time.
+ */
+public class RMAppLifetimeMonitor
+    extends AbstractLivelinessMonitor<RMAppToMonitor> {
+
+  private static final Log LOG = LogFactory.getLog(RMAppLifetimeMonitor.class);
+
+  private RMContext rmContext;
+  private Map<RMAppToMonitor, Long> monitoredApps =
+      new HashMap<RMAppToMonitor, Long>();
+
+  private static final EnumSet<RMAppState> COMPLETED_APP_STATES =
+      EnumSet.of(RMAppState.FINISHED, RMAppState.FINISHING, RMAppState.FAILED,
+          RMAppState.KILLED, RMAppState.FINAL_SAVING, RMAppState.KILLING);
+
+  public RMAppLifetimeMonitor(RMContext rmContext) {
+    super(RMAppLifetimeMonitor.class.getName(), SystemClock.getInstance());
+    this.rmContext = rmContext;
+  }
+
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    long monitorInterval = conf.getLong(
+        YarnConfiguration.RM_APPLICATION_LIFETIME_MONITOR_INTERVAL_MS,
+        YarnConfiguration.DEFAULT_RM_APPLICATION_LIFETIME_MONITOR_INTERVAL_MS);
+    if (monitorInterval <= 0) {
+      monitorInterval =
+          YarnConfiguration.DEFAULT_RM_APPLICATION_LIFETIME_MONITOR_INTERVAL_MS;
+    }
+    setMonitorInterval(monitorInterval);
+    LOG.info("Application lifelime monitor interval set to " + monitorInterval
+        + " ms.");
+    super.serviceInit(conf);
+  }
+
+  @SuppressWarnings("unchecked")
+  @Override
+  protected synchronized void expire(RMAppToMonitor monitoredAppKey) {
+    Long remove = monitoredApps.remove(monitoredAppKey);
+    ApplicationId appId = monitoredAppKey.getApplicationId();
+    RMApp app = rmContext.getRMApps().get(appId);
+    if (app == null) {
+      return;
+    }
+    // Don't trigger a KILL event if application is in completed states
+    if (!COMPLETED_APP_STATES.contains(app.getState())) {
+      String diagnostics =
+          "Application killed due to exceeding its lifetime period " + remove
+              + " milliseconds";
+      rmContext.getDispatcher().getEventHandler()
+          .handle(new RMAppEvent(appId, RMAppEventType.KILL, diagnostics));
+    } else {
+      LOG.info("Application " + appId
+          + " is about to complete. So not killing the application.");
+    }
+  }
+
+  public synchronized void registerApp(ApplicationId appId,
+      ApplicationTimeoutType timeoutType, long monitorStartTime, long timeout) {
+    RMAppToMonitor appToMonitor = new RMAppToMonitor(appId, timeoutType);
+    register(appToMonitor, monitorStartTime);
+    monitoredApps.putIfAbsent(appToMonitor, timeout);
+  }
+
+  @Override
+  protected synchronized long getExpireInterval(
+      RMAppToMonitor monitoredAppKey) {
+    return monitoredApps.get(monitoredAppKey);
+  }
+
+  public synchronized void unregisterApp(ApplicationId appId,
+      ApplicationTimeoutType timeoutType) {
+    RMAppToMonitor appToRemove = new RMAppToMonitor(appId, timeoutType);
+    unregister(appToRemove);
+    monitoredApps.remove(appToRemove);
+  }
+
+  public synchronized void unregisterApp(ApplicationId appId,
+      Set<ApplicationTimeoutType> types) {
+    for (ApplicationTimeoutType type : types) {
+      unregisterApp(appId, type);
+    }
+  }
+
+  public synchronized void updateApplicationTimeouts(ApplicationId appId,
+      Map<ApplicationTimeoutType, Long> timeouts) {
+    // TODO in YARN-5611
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppToMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppToMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppToMonitor.java
new file mode 100644
index 0000000..1cf2132
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppToMonitor.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp.monitor;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
+
+/**
+ * This class used for monitor application with applicationId+appTimeoutType.
+ */
+public class RMAppToMonitor {
+
+  private ApplicationId applicationId;
+  private ApplicationTimeoutType appTimeoutType;
+
+  RMAppToMonitor(ApplicationId appId, ApplicationTimeoutType timeoutType) {
+    this.applicationId = appId;
+    this.appTimeoutType = timeoutType;
+  }
+
+  public ApplicationId getApplicationId() {
+    return applicationId;
+  }
+
+  public ApplicationTimeoutType getAppTimeoutType() {
+    return appTimeoutType;
+  }
+
+  @Override
+  public int hashCode() {
+    return applicationId.hashCode() + appTimeoutType.hashCode();
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (getClass() != obj.getClass()) {
+      return false;
+    }
+    RMAppToMonitor other = (RMAppToMonitor) obj;
+    if (!this.applicationId.equals(other.getApplicationId())) {
+      return false;
+    }
+    if (this.appTimeoutType != other.getAppTimeoutType()) {
+      return false;
+    }
+    return true;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append(applicationId.toString()).append("_").append(appTimeoutType);
+    return sb.toString();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/package-info.java
new file mode 100644
index 0000000..a3cc7ef
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.resourcemanager.rmapp.monitor contains
+ * classes related to application monitor.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp.monitor;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index f843261..25a8288 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerState;
@@ -460,7 +461,7 @@ public class MockRM extends ResourceManager {
     return submitApp(resource, name, user, acls, false, queue,
       super.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
       YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null, null, true, false,
-      false, null, 0, null, true, priority, amLabel);
+        false, null, 0, null, true, priority, amLabel, null);
   }
 
   public RMApp submitApp(Resource resource, String name, String user,
@@ -561,7 +562,7 @@ public class MockRM extends ResourceManager {
     return submitApp(capability, name, user, acls, unmanaged, queue,
       maxAppAttempts, ts, appType, waitForAccepted, keepContainers,
       isAppIdProvided, applicationId, attemptFailuresValidityInterval,
-      logAggregationContext, cancelTokensWhenComplete, priority, "");
+        logAggregationContext, cancelTokensWhenComplete, priority, "", null);
   }
 
   public RMApp submitApp(Resource capability, String name, String user,
@@ -570,7 +571,8 @@ public class MockRM extends ResourceManager {
       boolean waitForAccepted, boolean keepContainers, boolean isAppIdProvided,
       ApplicationId applicationId, long attemptFailuresValidityInterval,
       LogAggregationContext logAggregationContext,
-      boolean cancelTokensWhenComplete, Priority priority, String amLabel)
+      boolean cancelTokensWhenComplete, Priority priority, String amLabel,
+      Map<ApplicationTimeoutType, Long> applicationTimeouts)
       throws Exception {
     ApplicationId appId = isAppIdProvided ? applicationId : null;
     ApplicationClientProtocol client = getClientRMService();
@@ -587,6 +589,9 @@ public class MockRM extends ResourceManager {
     sub.setApplicationId(appId);
     sub.setApplicationName(name);
     sub.setMaxAppAttempts(maxAppAttempts);
+    if (applicationTimeouts != null && applicationTimeouts.size() > 0) {
+      sub.setApplicationTimeouts(applicationTimeouts);
+    }
     if (unmanaged) {
       sub.setUnmanagedAM(true);
     }
@@ -1073,4 +1078,15 @@ public class MockRM extends ResourceManager {
         !apps.containsKey(appId));
     LOG.info("app is removed from scheduler, " + appId);
   }
+
+  public RMApp submitApp(int masterMemory, Priority priority,
+      Map<ApplicationTimeoutType, Long> applicationTimeouts) throws Exception {
+    Resource resource = Resource.newInstance(masterMemory, 0);
+    return submitApp(
+        resource, "", UserGroupInformation.getCurrentUser().getShortUserName(),
+        null, false, null,
+        super.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
+            YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null, null, true,
+        false, false, null, 0, null, true, priority, null, applicationTimeouts);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ae5a3a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java
new file mode 100644
index 0000000..3f2db1d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestApplicationLifetimeMonitor.java
@@ -0,0 +1,165 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
+import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.TestRMRestart;
+import org.apache.hadoop.yarn.server.resourcemanager.TestWorkPreservingRMRestart;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test class for application life time monitor feature test.
+ */
+public class TestApplicationLifetimeMonitor {
+  private YarnConfiguration conf;
+
+  @Before
+  public void setup() throws IOException {
+    conf = new YarnConfiguration();
+    Logger rootLogger = LogManager.getRootLogger();
+    rootLogger.setLevel(Level.DEBUG);
+    UserGroupInformation.setConfiguration(conf);
+    conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
+    conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED,
+        true);
+    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+    conf.setLong(YarnConfiguration.RM_APPLICATION_LIFETIME_MONITOR_INTERVAL_MS,
+        3000L);
+  }
+
+  @Test(timeout = 90000)
+  public void testApplicationLifetimeMonitor() throws Exception {
+    MockRM rm = null;
+    try {
+      rm = new MockRM(conf);
+      rm.start();
+      Priority appPriority = Priority.newInstance(0);
+      MockNM nm1 = rm.registerNode("127.0.0.1:1234", 16 * 1024);
+
+      Map<ApplicationTimeoutType, Long> timeouts =
+          new HashMap<ApplicationTimeoutType, Long>();
+      timeouts.put(ApplicationTimeoutType.LIFETIME, 10L);
+      RMApp app1 = rm.submitApp(1024, appPriority, timeouts);
+      nm1.nodeHeartbeat(true);
+      // Send launch Event
+      MockAM am1 =
+          rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
+      am1.registerAppAttempt();
+      rm.waitForState(app1.getApplicationId(), RMAppState.KILLED);
+      Assert.assertTrue("Applicaiton killed before lifetime value",
+          (System.currentTimeMillis() - app1.getSubmitTime()) > 10000);
+    } finally {
+      stopRM(rm);
+    }
+  }
+
+  @SuppressWarnings("rawtypes")
+  @Test(timeout = 180000)
+  public void testApplicationLifetimeOnRMRestart() throws Exception {
+    MemoryRMStateStore memStore = new MemoryRMStateStore();
+    memStore.init(conf);
+    MockRM rm1 = new MockRM(conf, memStore);
+    rm1.start();
+    MockNM nm1 =
+        new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
+    nm1.registerNode();
+    nm1.nodeHeartbeat(true);
+
+    long appLifetime = 60L;
+    Map<ApplicationTimeoutType, Long> timeouts =
+        new HashMap<ApplicationTimeoutType, Long>();
+    timeouts.put(ApplicationTimeoutType.LIFETIME, appLifetime);
+    RMApp app1 = rm1.submitApp(200, Priority.newInstance(0), timeouts);
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
+
+    // Re-start RM
+    MockRM rm2 = new MockRM(conf, memStore);
+    rm2.start();
+    nm1.setResourceTrackerService(rm2.getResourceTrackerService());
+
+    // recover app
+    RMApp recoveredApp1 =
+        rm2.getRMContext().getRMApps().get(app1.getApplicationId());
+
+    NMContainerStatus amContainer = TestRMRestart.createNMContainerStatus(
+        am1.getApplicationAttemptId(), 1, ContainerState.RUNNING);
+    NMContainerStatus runningContainer = TestRMRestart.createNMContainerStatus(
+        am1.getApplicationAttemptId(), 2, ContainerState.RUNNING);
+
+    nm1.registerNode(Arrays.asList(amContainer, runningContainer), null);
+
+    // Wait for RM to settle down on recovering containers;
+    TestWorkPreservingRMRestart.waitForNumContainersToRecover(2, rm2,
+        am1.getApplicationAttemptId());
+    Set<ContainerId> launchedContainers =
+        ((RMNodeImpl) rm2.getRMContext().getRMNodes().get(nm1.getNodeId()))
+            .getLaunchedContainers();
+    assertTrue(launchedContainers.contains(amContainer.getContainerId()));
+    assertTrue(launchedContainers.contains(runningContainer.getContainerId()));
+
+    // check RMContainers are re-recreated and the container state is correct.
+    rm2.waitForState(nm1, amContainer.getContainerId(),
+        RMContainerState.RUNNING);
+    rm2.waitForState(nm1, runningContainer.getContainerId(),
+        RMContainerState.RUNNING);
+
+    // re register attempt to rm2
+    rm2.waitForState(recoveredApp1.getApplicationId(), RMAppState.ACCEPTED);
+    am1.setAMRMProtocol(rm2.getApplicationMasterService(), rm2.getRMContext());
+    am1.registerAppAttempt();
+    rm2.waitForState(recoveredApp1.getApplicationId(), RMAppState.RUNNING);
+
+    // wait for app life time and application to be in killed state.
+    rm2.waitForState(recoveredApp1.getApplicationId(), RMAppState.KILLED);
+    Assert.assertTrue("Applicaiton killed before lifetime value",
+        (System.currentTimeMillis()
+            - recoveredApp1.getSubmitTime()) > appLifetime);
+  }
+
+  private void stopRM(MockRM rm) {
+    if (rm != null) {
+      rm.stop();
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/57] [abbrv] hadoop git commit: HDFS-10910. HDFS Erasure Coding doc should state its currently supported erasure coding policies. Contributed by Yiqun Lin.

Posted by in...@apache.org.
HDFS-10910. HDFS Erasure Coding doc should state its currently supported erasure coding policies. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee33a022
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee33a022
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee33a022

Branch: refs/heads/HDFS-10467
Commit: ee33a02234511ac69c1e491fd38490a141ec907e
Parents: 2ab1ef1
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Fri Sep 30 12:48:11 2016 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Fri Sep 30 12:48:11 2016 -0700

----------------------------------------------------------------------
 .../hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md      | 9 ++++-----
 1 file changed, 4 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee33a022/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 9066a15..76c1b3a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -59,9 +59,9 @@ Architecture
       1. _Read the data from source nodes:_ Input data is read in parallel from source nodes using a dedicated thread pool.
         Based on the EC policy, it schedules the read requests to all source targets and reads only the minimum number of input blocks for reconstruction.
 
-      1. _Decode the data and generate the output data:_ New data and parity blocks are decoded from the input data. All missing data and parity blocks are decoded together.
+      2. _Decode the data and generate the output data:_ New data and parity blocks are decoded from the input data. All missing data and parity blocks are decoded together.
 
-      1. _Transfer the generated data blocks to target nodes:_ Once decoding is finished, the recovered blocks are transferred to target DataNodes.
+      3. _Transfer the generated data blocks to target nodes:_ Once decoding is finished, the recovered blocks are transferred to target DataNodes.
 
  *  **ErasureCoding policy**
     To accommodate heterogeneous workloads, we allow files and directories in an HDFS cluster to have different replication and EC policies.
@@ -69,10 +69,9 @@ Architecture
 
       1. _The ECSchema:_ This includes the numbers of data and parity blocks in an EC group (e.g., 6+3), as well as the codec algorithm (e.g., Reed-Solomon).
 
-      1. _The size of a striping cell._ This determines the granularity of striped reads and writes, including buffer sizes and encoding work.
+      2. _The size of a striping cell._ This determines the granularity of striped reads and writes, including buffer sizes and encoding work.
 
-    Currently, HDFS supports the Reed-Solomon and XOR erasure coding algorithms. Additional algorithms are planned as future work.
-    The system default scheme is Reed-Solomon (6, 3) with a cell size of 64KB.
+    There are three policies currently being supported: RS-DEFAULT-3-2-64k, RS-DEFAULT-6-3-64k and RS-LEGACY-6-3-64k. All with default cell size of 64KB. The system default policy is RS-DEFAULT-6-3-64k which use the default schema RS_6_3_SCHEMA with a cell size of 64KB.
 
 
 Deployment


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/57] [abbrv] hadoop git commit: Revert "HADOOP-13584. hdoop-aliyun: merge HADOOP-12756 branch back" This reverts commit 5707f88d8550346f167e45c2f8c4161eb3957e3a

Posted by in...@apache.org.
Revert "HADOOP-13584. hdoop-aliyun: merge HADOOP-12756 branch back"
This reverts commit 5707f88d8550346f167e45c2f8c4161eb3957e3a


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1443988
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1443988
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1443988

Branch: refs/heads/HDFS-10467
Commit: d1443988f809fe6656f60dfed4ee4e0f4844ee5c
Parents: 9a44a83
Author: Kai Zheng <ka...@intel.com>
Authored: Thu Sep 29 09:18:27 2016 +0800
Committer: Kai Zheng <ka...@intel.com>
Committed: Thu Sep 29 09:18:27 2016 +0800

----------------------------------------------------------------------
 .gitignore                                      |   2 -
 hadoop-project/pom.xml                          |  22 -
 .../dev-support/findbugs-exclude.xml            |  18 -
 hadoop-tools/hadoop-aliyun/pom.xml              | 154 ------
 .../aliyun/oss/AliyunCredentialsProvider.java   |  87 ---
 .../fs/aliyun/oss/AliyunOSSFileSystem.java      | 543 -------------------
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java | 516 ------------------
 .../fs/aliyun/oss/AliyunOSSInputStream.java     | 260 ---------
 .../fs/aliyun/oss/AliyunOSSOutputStream.java    | 111 ----
 .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java    | 167 ------
 .../apache/hadoop/fs/aliyun/oss/Constants.java  | 113 ----
 .../hadoop/fs/aliyun/oss/package-info.java      |  22 -
 .../site/markdown/tools/hadoop-aliyun/index.md  | 294 ----------
 .../fs/aliyun/oss/AliyunOSSTestUtils.java       |  77 ---
 .../fs/aliyun/oss/TestAliyunCredentials.java    |  78 ---
 .../oss/TestAliyunOSSFileSystemContract.java    | 239 --------
 .../oss/TestAliyunOSSFileSystemStore.java       | 125 -----
 .../fs/aliyun/oss/TestAliyunOSSInputStream.java | 145 -----
 .../aliyun/oss/TestAliyunOSSOutputStream.java   |  91 ----
 .../aliyun/oss/contract/AliyunOSSContract.java  |  49 --
 .../contract/TestAliyunOSSContractCreate.java   |  35 --
 .../contract/TestAliyunOSSContractDelete.java   |  34 --
 .../contract/TestAliyunOSSContractDistCp.java   |  44 --
 .../TestAliyunOSSContractGetFileStatus.java     |  35 --
 .../contract/TestAliyunOSSContractMkdir.java    |  34 --
 .../oss/contract/TestAliyunOSSContractOpen.java |  34 --
 .../contract/TestAliyunOSSContractRename.java   |  35 --
 .../contract/TestAliyunOSSContractRootDir.java  |  69 ---
 .../oss/contract/TestAliyunOSSContractSeek.java |  34 --
 .../src/test/resources/contract/aliyun-oss.xml  | 115 ----
 .../src/test/resources/core-site.xml            |  46 --
 .../src/test/resources/log4j.properties         |  23 -
 hadoop-tools/hadoop-tools-dist/pom.xml          |   6 -
 hadoop-tools/pom.xml                            |   1 -
 34 files changed, 3658 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index 194862b..a5d69d0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -31,5 +31,3 @@ hadoop-tools/hadoop-aws/src/test/resources/auth-keys.xml
 hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml
 hadoop-tools/hadoop-azure/src/test/resources/azure-auth-keys.xml
 patchprocess/
-hadoop-tools/hadoop-aliyun/src/test/resources/auth-keys.xml
-hadoop-tools/hadoop-aliyun/src/test/resources/contract-test-options.xml

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 49ea40f..d9a01a0 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -439,12 +439,6 @@
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-aliyun</artifactId>
-        <version>${project.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-kms</artifactId>
         <version>${project.version}</version>
         <classifier>classes</classifier>
@@ -1011,22 +1005,6 @@
         <version>4.2.0</version>
      </dependency>
 
-      <dependency>
-        <groupId>com.aliyun.oss</groupId>
-        <artifactId>aliyun-sdk-oss</artifactId>
-        <version>2.2.1</version>
-        <exclusions>
-          <exclusion>
-            <groupId>org.apache.httpcomponents</groupId>
-            <artifactId>httpclient</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>commons-beanutils</groupId>
-            <artifactId>commons-beanutils</artifactId>
-          </exclusion>
-        </exclusions>
-     </dependency>
-
      <dependency>
        <groupId>xerces</groupId>
        <artifactId>xercesImpl</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml b/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
deleted file mode 100644
index 40d78d0..0000000
--- a/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
+++ /dev/null
@@ -1,18 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<FindBugsFilter>
-</FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/pom.xml b/hadoop-tools/hadoop-aliyun/pom.xml
deleted file mode 100644
index 358b18b..0000000
--- a/hadoop-tools/hadoop-aliyun/pom.xml
+++ /dev/null
@@ -1,154 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha2-SNAPSHOT</version>
-    <relativePath>../../hadoop-project</relativePath>
-  </parent>
-  <artifactId>hadoop-aliyun</artifactId>
-  <name>Apache Hadoop Aliyun OSS support</name>
-  <packaging>jar</packaging>
-
-  <properties>
-    <file.encoding>UTF-8</file.encoding>
-    <downloadSources>true</downloadSources>
-  </properties>
-
-  <profiles>
-    <profile>
-      <id>tests-off</id>
-      <activation>
-        <file>
-          <missing>src/test/resources/auth-keys.xml</missing>
-        </file>
-      </activation>
-      <properties>
-        <maven.test.skip>true</maven.test.skip>
-      </properties>
-    </profile>
-    <profile>
-      <id>tests-on</id>
-      <activation>
-        <file>
-          <exists>src/test/resources/auth-keys.xml</exists>
-        </file>
-      </activation>
-      <properties>
-        <maven.test.skip>false</maven.test.skip>
-      </properties>
-    </profile>
-  </profiles>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>findbugs-maven-plugin</artifactId>
-        <configuration>
-          <findbugsXmlOutput>true</findbugsXmlOutput>
-          <xmlOutput>true</xmlOutput>
-          <excludeFilterFile>${basedir}/dev-support/findbugs-exclude.xml
-          </excludeFilterFile>
-          <effort>Max</effort>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-project-info-reports-plugin</artifactId>
-        <configuration>
-          <dependencyDetailsEnabled>false</dependencyDetailsEnabled>
-          <dependencyLocationsEnabled>false</dependencyLocationsEnabled>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-surefire-plugin</artifactId>
-        <configuration>
-          <forkedProcessTimeoutInSeconds>3600</forkedProcessTimeoutInSeconds>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>deplist</id>
-            <phase>compile</phase>
-            <goals>
-              <goal>list</goal>
-            </goals>
-            <configuration>
-              <!-- build a shellprofile -->
-              <outputFile>${project.basedir}/target/hadoop-tools-deps/${project.artifactId}.tools-optional.txt</outputFile>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-
-  <dependencies>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>com.aliyun.oss</groupId>
-      <artifactId>aliyun-sdk-oss</artifactId>
-      <scope>compile</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>compile</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-distcp</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-distcp</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-yarn-server-tests</artifactId>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-</project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
deleted file mode 100644
index b46c67a..0000000
--- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss;
-
-import com.aliyun.oss.common.auth.Credentials;
-import com.aliyun.oss.common.auth.CredentialsProvider;
-import com.aliyun.oss.common.auth.DefaultCredentials;
-import com.aliyun.oss.common.auth.InvalidCredentialsException;
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-
-import java.io.IOException;
-
-import static org.apache.hadoop.fs.aliyun.oss.Constants.*;
-
-/**
- * Support session credentials for authenticating with Aliyun.
- */
-public class AliyunCredentialsProvider implements CredentialsProvider {
-  private Credentials credentials = null;
-
-  public AliyunCredentialsProvider(Configuration conf)
-      throws IOException {
-    String accessKeyId;
-    String accessKeySecret;
-    String securityToken;
-    try {
-      accessKeyId = AliyunOSSUtils.getValueWithKey(conf, ACCESS_KEY_ID);
-      accessKeySecret = AliyunOSSUtils.getValueWithKey(conf, ACCESS_KEY_SECRET);
-    } catch (IOException e) {
-      throw new InvalidCredentialsException(e);
-    }
-
-    try {
-      securityToken = AliyunOSSUtils.getValueWithKey(conf, SECURITY_TOKEN);
-    } catch (IOException e) {
-      securityToken = null;
-    }
-
-    if (StringUtils.isEmpty(accessKeyId)
-        || StringUtils.isEmpty(accessKeySecret)) {
-      throw new InvalidCredentialsException(
-          "AccessKeyId and AccessKeySecret should not be null or empty.");
-    }
-
-    if (StringUtils.isNotEmpty(securityToken)) {
-      credentials = new DefaultCredentials(accessKeyId, accessKeySecret,
-          securityToken);
-    } else {
-      credentials = new DefaultCredentials(accessKeyId, accessKeySecret);
-    }
-  }
-
-  @Override
-  public void setCredentials(Credentials creds) {
-    if (creds == null) {
-      throw new InvalidCredentialsException("Credentials should not be null.");
-    }
-
-    credentials = creds;
-  }
-
-  @Override
-  public Credentials getCredentials() {
-    if (credentials == null) {
-      throw new InvalidCredentialsException("Invalid credentials");
-    }
-
-    return credentials;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
deleted file mode 100644
index 81e038d..0000000
--- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
+++ /dev/null
@@ -1,543 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.util.Progressable;
-
-import com.aliyun.oss.model.OSSObjectSummary;
-import com.aliyun.oss.model.ObjectListing;
-import com.aliyun.oss.model.ObjectMetadata;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.fs.aliyun.oss.Constants.*;
-
-/**
- * Implementation of {@link FileSystem} for <a href="https://oss.aliyun.com">
- * Aliyun OSS</a>, used to access OSS blob system in a filesystem style.
- */
-public class AliyunOSSFileSystem extends FileSystem {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(AliyunOSSFileSystem.class);
-  private URI uri;
-  private Path workingDir;
-  private AliyunOSSFileSystemStore store;
-  private int maxKeys;
-
-  @Override
-  public FSDataOutputStream append(Path path, int bufferSize,
-      Progressable progress) throws IOException {
-    throw new IOException("Append is not supported!");
-  }
-
-  @Override
-  public void close() throws IOException {
-    try {
-      store.close();
-    } finally {
-      super.close();
-    }
-  }
-
-  @Override
-  public FSDataOutputStream create(Path path, FsPermission permission,
-      boolean overwrite, int bufferSize, short replication, long blockSize,
-      Progressable progress) throws IOException {
-    String key = pathToKey(path);
-    FileStatus status = null;
-
-    try {
-      // get the status or throw a FNFE
-      status = getFileStatus(path);
-
-      // if the thread reaches here, there is something at the path
-      if (status.isDirectory()) {
-        // path references a directory
-        throw new FileAlreadyExistsException(path + " is a directory");
-      }
-      if (!overwrite) {
-        // path references a file and overwrite is disabled
-        throw new FileAlreadyExistsException(path + " already exists");
-      }
-      LOG.debug("Overwriting file {}", path);
-    } catch (FileNotFoundException e) {
-      // this means the file is not found
-    }
-
-    return new FSDataOutputStream(new AliyunOSSOutputStream(getConf(),
-        store, key, progress, statistics), (Statistics)(null));
-  }
-
-  @Override
-  public boolean delete(Path path, boolean recursive) throws IOException {
-    try {
-      return innerDelete(getFileStatus(path), recursive);
-    } catch (FileNotFoundException e) {
-      LOG.debug("Couldn't delete {} - does not exist", path);
-      return false;
-    }
-  }
-
-  /**
-   * Delete an object. See {@link #delete(Path, boolean)}.
-   *
-   * @param status fileStatus object
-   * @param recursive if path is a directory and set to
-   * true, the directory is deleted else throws an exception. In
-   * case of a file the recursive can be set to either true or false.
-   * @return  true if delete is successful else false.
-   * @throws IOException due to inability to delete a directory or file.
-   */
-  private boolean innerDelete(FileStatus status, boolean recursive)
-      throws IOException {
-    Path f = status.getPath();
-    String key = pathToKey(f);
-    if (status.isDirectory()) {
-      if (!recursive) {
-        FileStatus[] statuses = listStatus(status.getPath());
-        // Check whether it is an empty directory or not
-        if (statuses.length > 0) {
-          throw new IOException("Cannot remove directory " + f +
-              ": It is not empty!");
-        } else {
-          // Delete empty directory without '-r'
-          key = AliyunOSSUtils.maybeAddTrailingSlash(key);
-          store.deleteObject(key);
-        }
-      } else {
-        store.deleteDirs(key);
-      }
-    } else {
-      store.deleteObject(key);
-    }
-
-    createFakeDirectoryIfNecessary(f);
-    return true;
-  }
-
-  private void createFakeDirectoryIfNecessary(Path f) throws IOException {
-    String key = pathToKey(f);
-    if (StringUtils.isNotEmpty(key) && !exists(f)) {
-      LOG.debug("Creating new fake directory at {}", f);
-      mkdir(pathToKey(f.getParent()));
-    }
-  }
-
-  @Override
-  public FileStatus getFileStatus(Path path) throws IOException {
-    Path qualifiedPath = path.makeQualified(uri, workingDir);
-    String key = pathToKey(qualifiedPath);
-
-    // Root always exists
-    if (key.length() == 0) {
-      return new FileStatus(0, true, 1, 0, 0, qualifiedPath);
-    }
-
-    ObjectMetadata meta = store.getObjectMetadata(key);
-    // If key not found and key does not end with "/"
-    if (meta == null && !key.endsWith("/")) {
-      // In case of 'dir + "/"'
-      key += "/";
-      meta = store.getObjectMetadata(key);
-    }
-    if (meta == null) {
-      ObjectListing listing = store.listObjects(key, 1, null, false);
-      if (CollectionUtils.isNotEmpty(listing.getObjectSummaries()) ||
-          CollectionUtils.isNotEmpty(listing.getCommonPrefixes())) {
-        return new FileStatus(0, true, 1, 0, 0, qualifiedPath);
-      } else {
-        throw new FileNotFoundException(path + ": No such file or directory!");
-      }
-    } else if (objectRepresentsDirectory(key, meta.getContentLength())) {
-      return new FileStatus(0, true, 1, 0, 0, qualifiedPath);
-    } else {
-      return new FileStatus(meta.getContentLength(), false, 1,
-          getDefaultBlockSize(path), meta.getLastModified().getTime(),
-          qualifiedPath);
-    }
-  }
-
-  @Override
-  public String getScheme() {
-    return "oss";
-  }
-
-  @Override
-  public URI getUri() {
-    return uri;
-  }
-
-  @Override
-  public Path getWorkingDirectory() {
-    return workingDir;
-  }
-
-  @Deprecated
-  public long getDefaultBlockSize() {
-    return getConf().getLong(FS_OSS_BLOCK_SIZE_KEY, FS_OSS_BLOCK_SIZE_DEFAULT);
-  }
-
-  @Override
-  public String getCanonicalServiceName() {
-    // Does not support Token
-    return null;
-  }
-
-  /**
-   * Initialize new FileSystem.
-   *
-   * @param name the uri of the file system, including host, port, etc.
-   * @param conf configuration of the file system
-   * @throws IOException IO problems
-   */
-  public void initialize(URI name, Configuration conf) throws IOException {
-    super.initialize(name, conf);
-
-    uri = java.net.URI.create(name.getScheme() + "://" + name.getAuthority());
-    workingDir = new Path("/user",
-        System.getProperty("user.name")).makeQualified(uri, null);
-
-    store = new AliyunOSSFileSystemStore();
-    store.initialize(name, conf, statistics);
-    maxKeys = conf.getInt(MAX_PAGING_KEYS_KEY, MAX_PAGING_KEYS_DEFAULT);
-    setConf(conf);
-  }
-
-  /**
-   * Check if OSS object represents a directory.
-   *
-   * @param name object key
-   * @param size object content length
-   * @return true if object represents a directory
-   */
-  private boolean objectRepresentsDirectory(final String name,
-      final long size) {
-    return StringUtils.isNotEmpty(name) && name.endsWith("/") && size == 0L;
-  }
-
-  /**
-   * Turn a path (relative or otherwise) into an OSS key.
-   *
-   * @param path the path of the file.
-   * @return the key of the object that represents the file.
-   */
-  private String pathToKey(Path path) {
-    if (!path.isAbsolute()) {
-      path = new Path(workingDir, path);
-    }
-
-    return path.toUri().getPath().substring(1);
-  }
-
-  private Path keyToPath(String key) {
-    return new Path("/" + key);
-  }
-
-  @Override
-  public FileStatus[] listStatus(Path path) throws IOException {
-    String key = pathToKey(path);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("List status for path: " + path);
-    }
-
-    final List<FileStatus> result = new ArrayList<FileStatus>();
-    final FileStatus fileStatus = getFileStatus(path);
-
-    if (fileStatus.isDirectory()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("listStatus: doing listObjects for directory " + key);
-      }
-
-      ObjectListing objects = store.listObjects(key, maxKeys, null, false);
-      while (true) {
-        statistics.incrementReadOps(1);
-        for (OSSObjectSummary objectSummary : objects.getObjectSummaries()) {
-          String objKey = objectSummary.getKey();
-          if (objKey.equals(key + "/")) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Ignoring: " + objKey);
-            }
-            continue;
-          } else {
-            Path keyPath = keyToPath(objectSummary.getKey())
-                .makeQualified(uri, workingDir);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Adding: fi: " + keyPath);
-            }
-            result.add(new FileStatus(objectSummary.getSize(), false, 1,
-                getDefaultBlockSize(keyPath),
-                objectSummary.getLastModified().getTime(), keyPath));
-          }
-        }
-
-        for (String prefix : objects.getCommonPrefixes()) {
-          if (prefix.equals(key + "/")) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Ignoring: " + prefix);
-            }
-            continue;
-          } else {
-            Path keyPath = keyToPath(prefix).makeQualified(uri, workingDir);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Adding: rd: " + keyPath);
-            }
-            result.add(new FileStatus(0, true, 1, 0, 0, keyPath));
-          }
-        }
-
-        if (objects.isTruncated()) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("listStatus: list truncated - getting next batch");
-          }
-          String nextMarker = objects.getNextMarker();
-          objects = store.listObjects(key, maxKeys, nextMarker, false);
-          statistics.incrementReadOps(1);
-        } else {
-          break;
-        }
-      }
-    } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding: rd (not a dir): " + path);
-      }
-      result.add(fileStatus);
-    }
-
-    return result.toArray(new FileStatus[result.size()]);
-  }
-
-  /**
-   * Used to create an empty file that represents an empty directory.
-   *
-   * @param key directory path
-   * @return true if directory is successfully created
-   * @throws IOException
-   */
-  private boolean mkdir(final String key) throws IOException {
-    String dirName = key;
-    if (StringUtils.isNotEmpty(key)) {
-      if (!key.endsWith("/")) {
-        dirName += "/";
-      }
-      store.storeEmptyFile(dirName);
-    }
-    return true;
-  }
-
-  @Override
-  public boolean mkdirs(Path path, FsPermission permission)
-      throws IOException {
-    try {
-      FileStatus fileStatus = getFileStatus(path);
-
-      if (fileStatus.isDirectory()) {
-        return true;
-      } else {
-        throw new FileAlreadyExistsException("Path is a file: " + path);
-      }
-    } catch (FileNotFoundException e) {
-      validatePath(path);
-      String key = pathToKey(path);
-      return mkdir(key);
-    }
-  }
-
-  /**
-   * Check whether the path is a valid path.
-   *
-   * @param path the path to be checked.
-   * @throws IOException
-   */
-  private void validatePath(Path path) throws IOException {
-    Path fPart = path.getParent();
-    do {
-      try {
-        FileStatus fileStatus = getFileStatus(fPart);
-        if (fileStatus.isDirectory()) {
-          // If path exists and a directory, exit
-          break;
-        } else {
-          throw new FileAlreadyExistsException(String.format(
-              "Can't make directory for path '%s', it is a file.", fPart));
-        }
-      } catch (FileNotFoundException fnfe) {
-      }
-      fPart = fPart.getParent();
-    } while (fPart != null);
-  }
-
-  @Override
-  public FSDataInputStream open(Path path, int bufferSize) throws IOException {
-    final FileStatus fileStatus = getFileStatus(path);
-    if (fileStatus.isDirectory()) {
-      throw new FileNotFoundException("Can't open " + path +
-          " because it is a directory");
-    }
-
-    return new FSDataInputStream(new AliyunOSSInputStream(getConf(), store,
-        pathToKey(path), fileStatus.getLen(), statistics));
-  }
-
-  @Override
-  public boolean rename(Path srcPath, Path dstPath) throws IOException {
-    if (srcPath.isRoot()) {
-      // Cannot rename root of file system
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Cannot rename the root of a filesystem");
-      }
-      return false;
-    }
-    Path parent = dstPath.getParent();
-    while (parent != null && !srcPath.equals(parent)) {
-      parent = parent.getParent();
-    }
-    if (parent != null) {
-      return false;
-    }
-    FileStatus srcStatus = getFileStatus(srcPath);
-    FileStatus dstStatus;
-    try {
-      dstStatus = getFileStatus(dstPath);
-    } catch (FileNotFoundException fnde) {
-      dstStatus = null;
-    }
-    if (dstStatus == null) {
-      // If dst doesn't exist, check whether dst dir exists or not
-      dstStatus = getFileStatus(dstPath.getParent());
-      if (!dstStatus.isDirectory()) {
-        throw new IOException(String.format(
-            "Failed to rename %s to %s, %s is a file", srcPath, dstPath,
-            dstPath.getParent()));
-      }
-    } else {
-      if (srcStatus.getPath().equals(dstStatus.getPath())) {
-        return !srcStatus.isDirectory();
-      } else if (dstStatus.isDirectory()) {
-        // If dst is a directory
-        dstPath = new Path(dstPath, srcPath.getName());
-        FileStatus[] statuses;
-        try {
-          statuses = listStatus(dstPath);
-        } catch (FileNotFoundException fnde) {
-          statuses = null;
-        }
-        if (statuses != null && statuses.length > 0) {
-          // If dst exists and not a directory / not empty
-          throw new FileAlreadyExistsException(String.format(
-              "Failed to rename %s to %s, file already exists or not empty!",
-              srcPath, dstPath));
-        }
-      } else {
-        // If dst is not a directory
-        throw new FileAlreadyExistsException(String.format(
-            "Failed to rename %s to %s, file already exists!", srcPath,
-            dstPath));
-      }
-    }
-    if (srcStatus.isDirectory()) {
-      copyDirectory(srcPath, dstPath);
-    } else {
-      copyFile(srcPath, dstPath);
-    }
-
-    return srcPath.equals(dstPath) || delete(srcPath, true);
-  }
-
-  /**
-   * Copy file from source path to destination path.
-   * (the caller should make sure srcPath is a file and dstPath is valid)
-   *
-   * @param srcPath source path.
-   * @param dstPath destination path.
-   * @return true if file is successfully copied.
-   */
-  private boolean copyFile(Path srcPath, Path dstPath) {
-    String srcKey = pathToKey(srcPath);
-    String dstKey = pathToKey(dstPath);
-    return store.copyFile(srcKey, dstKey);
-  }
-
-  /**
-   * Copy a directory from source path to destination path.
-   * (the caller should make sure srcPath is a directory, and dstPath is valid)
-   *
-   * @param srcPath source path.
-   * @param dstPath destination path.
-   * @return true if directory is successfully copied.
-   */
-  private boolean copyDirectory(Path srcPath, Path dstPath) throws IOException {
-    String srcKey = AliyunOSSUtils
-        .maybeAddTrailingSlash(pathToKey(srcPath));
-    String dstKey = AliyunOSSUtils
-        .maybeAddTrailingSlash(pathToKey(dstPath));
-
-    if (dstKey.startsWith(srcKey)) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Cannot rename a directory to a subdirectory of self");
-      }
-      return false;
-    }
-
-    store.storeEmptyFile(dstKey);
-    ObjectListing objects = store.listObjects(srcKey, maxKeys, null, true);
-    statistics.incrementReadOps(1);
-    // Copy files from src folder to dst
-    while (true) {
-      for (OSSObjectSummary objectSummary : objects.getObjectSummaries()) {
-        String newKey =
-            dstKey.concat(objectSummary.getKey().substring(srcKey.length()));
-        store.copyFile(objectSummary.getKey(), newKey);
-      }
-      if (objects.isTruncated()) {
-        String nextMarker = objects.getNextMarker();
-        objects = store.listObjects(srcKey, maxKeys, nextMarker, true);
-        statistics.incrementReadOps(1);
-      } else {
-        break;
-      }
-    }
-    return true;
-  }
-
-  @Override
-  public void setWorkingDirectory(Path dir) {
-    this.workingDir = dir;
-  }
-
-  public AliyunOSSFileSystemStore getStore() {
-    return store;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
deleted file mode 100644
index 9792a78..0000000
--- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
+++ /dev/null
@@ -1,516 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.aliyun.oss;
-
-import com.aliyun.oss.ClientConfiguration;
-import com.aliyun.oss.ClientException;
-import com.aliyun.oss.OSSClient;
-import com.aliyun.oss.OSSException;
-import com.aliyun.oss.common.auth.CredentialsProvider;
-import com.aliyun.oss.common.comm.Protocol;
-import com.aliyun.oss.model.AbortMultipartUploadRequest;
-import com.aliyun.oss.model.CannedAccessControlList;
-import com.aliyun.oss.model.CompleteMultipartUploadRequest;
-import com.aliyun.oss.model.CompleteMultipartUploadResult;
-import com.aliyun.oss.model.CopyObjectResult;
-import com.aliyun.oss.model.DeleteObjectsRequest;
-import com.aliyun.oss.model.GetObjectRequest;
-import com.aliyun.oss.model.InitiateMultipartUploadRequest;
-import com.aliyun.oss.model.InitiateMultipartUploadResult;
-import com.aliyun.oss.model.ListObjectsRequest;
-import com.aliyun.oss.model.ObjectMetadata;
-import com.aliyun.oss.model.ObjectListing;
-import com.aliyun.oss.model.OSSObjectSummary;
-import com.aliyun.oss.model.PartETag;
-import com.aliyun.oss.model.PutObjectResult;
-import com.aliyun.oss.model.UploadPartCopyRequest;
-import com.aliyun.oss.model.UploadPartCopyResult;
-import com.aliyun.oss.model.UploadPartRequest;
-import com.aliyun.oss.model.UploadPartResult;
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.ByteArrayInputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.apache.hadoop.fs.aliyun.oss.Constants.*;
-
-/**
- * Core implementation of Aliyun OSS Filesystem for Hadoop.
- * Provides the bridging logic between Hadoop's abstract filesystem and
- * Aliyun OSS.
- */
-public class AliyunOSSFileSystemStore {
-  public static final Logger LOG =
-      LoggerFactory.getLogger(AliyunOSSFileSystemStore.class);
-  private FileSystem.Statistics statistics;
-  private OSSClient ossClient;
-  private String bucketName;
-  private long uploadPartSize;
-  private long multipartThreshold;
-  private long partSize;
-  private int maxKeys;
-  private String serverSideEncryptionAlgorithm;
-
-  public void initialize(URI uri, Configuration conf,
-                         FileSystem.Statistics stat) throws IOException {
-    statistics = stat;
-    ClientConfiguration clientConf = new ClientConfiguration();
-    clientConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS_KEY,
-        MAXIMUM_CONNECTIONS_DEFAULT));
-    boolean secureConnections = conf.getBoolean(SECURE_CONNECTIONS_KEY,
-        SECURE_CONNECTIONS_DEFAULT);
-    clientConf.setProtocol(secureConnections ? Protocol.HTTPS : Protocol.HTTP);
-    clientConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES_KEY,
-        MAX_ERROR_RETRIES_DEFAULT));
-    clientConf.setConnectionTimeout(conf.getInt(ESTABLISH_TIMEOUT_KEY,
-        ESTABLISH_TIMEOUT_DEFAULT));
-    clientConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT_KEY,
-        SOCKET_TIMEOUT_DEFAULT));
-
-    String proxyHost = conf.getTrimmed(PROXY_HOST_KEY, "");
-    int proxyPort = conf.getInt(PROXY_PORT_KEY, -1);
-    if (StringUtils.isNotEmpty(proxyHost)) {
-      clientConf.setProxyHost(proxyHost);
-      if (proxyPort >= 0) {
-        clientConf.setProxyPort(proxyPort);
-      } else {
-        if (secureConnections) {
-          LOG.warn("Proxy host set without port. Using HTTPS default 443");
-          clientConf.setProxyPort(443);
-        } else {
-          LOG.warn("Proxy host set without port. Using HTTP default 80");
-          clientConf.setProxyPort(80);
-        }
-      }
-      String proxyUsername = conf.getTrimmed(PROXY_USERNAME_KEY);
-      String proxyPassword = conf.getTrimmed(PROXY_PASSWORD_KEY);
-      if ((proxyUsername == null) != (proxyPassword == null)) {
-        String msg = "Proxy error: " + PROXY_USERNAME_KEY + " or " +
-            PROXY_PASSWORD_KEY + " set without the other.";
-        LOG.error(msg);
-        throw new IllegalArgumentException(msg);
-      }
-      clientConf.setProxyUsername(proxyUsername);
-      clientConf.setProxyPassword(proxyPassword);
-      clientConf.setProxyDomain(conf.getTrimmed(PROXY_DOMAIN_KEY));
-      clientConf.setProxyWorkstation(conf.getTrimmed(PROXY_WORKSTATION_KEY));
-    } else if (proxyPort >= 0) {
-      String msg = "Proxy error: " + PROXY_PORT_KEY + " set without " +
-          PROXY_HOST_KEY;
-      LOG.error(msg);
-      throw new IllegalArgumentException(msg);
-    }
-
-    String endPoint = conf.getTrimmed(ENDPOINT_KEY, "");
-    CredentialsProvider provider =
-        AliyunOSSUtils.getCredentialsProvider(conf);
-    ossClient = new OSSClient(endPoint, provider, clientConf);
-    uploadPartSize = conf.getLong(MULTIPART_UPLOAD_SIZE_KEY,
-        MULTIPART_UPLOAD_SIZE_DEFAULT);
-    multipartThreshold = conf.getLong(MIN_MULTIPART_UPLOAD_THRESHOLD_KEY,
-        MIN_MULTIPART_UPLOAD_THRESHOLD_DEFAULT);
-    partSize = conf.getLong(MULTIPART_UPLOAD_SIZE_KEY,
-        MULTIPART_UPLOAD_SIZE_DEFAULT);
-    if (partSize < MIN_MULTIPART_UPLOAD_PART_SIZE) {
-      partSize = MIN_MULTIPART_UPLOAD_PART_SIZE;
-    }
-    serverSideEncryptionAlgorithm =
-        conf.get(SERVER_SIDE_ENCRYPTION_ALGORITHM_KEY, "");
-
-    if (uploadPartSize < 5 * 1024 * 1024) {
-      LOG.warn(MULTIPART_UPLOAD_SIZE_KEY + " must be at least 5 MB");
-      uploadPartSize = 5 * 1024 * 1024;
-    }
-
-    if (multipartThreshold < 5 * 1024 * 1024) {
-      LOG.warn(MIN_MULTIPART_UPLOAD_THRESHOLD_KEY + " must be at least 5 MB");
-      multipartThreshold = 5 * 1024 * 1024;
-    }
-
-    if (multipartThreshold > 1024 * 1024 * 1024) {
-      LOG.warn(MIN_MULTIPART_UPLOAD_THRESHOLD_KEY + " must be less than 1 GB");
-      multipartThreshold = 1024 * 1024 * 1024;
-    }
-
-    String cannedACLName = conf.get(CANNED_ACL_KEY, CANNED_ACL_DEFAULT);
-    if (StringUtils.isNotEmpty(cannedACLName)) {
-      CannedAccessControlList cannedACL =
-          CannedAccessControlList.valueOf(cannedACLName);
-      ossClient.setBucketAcl(bucketName, cannedACL);
-    }
-
-    maxKeys = conf.getInt(MAX_PAGING_KEYS_KEY, MAX_PAGING_KEYS_DEFAULT);
-    bucketName = uri.getHost();
-  }
-
-  /**
-   * Delete an object, and update write operation statistics.
-   *
-   * @param key key to blob to delete.
-   */
-  public void deleteObject(String key) {
-    ossClient.deleteObject(bucketName, key);
-    statistics.incrementWriteOps(1);
-  }
-
-  /**
-   * Delete a list of keys, and update write operation statistics.
-   *
-   * @param keysToDelete collection of keys to delete.
-   */
-  public void deleteObjects(List<String> keysToDelete) {
-    if (CollectionUtils.isNotEmpty(keysToDelete)) {
-      DeleteObjectsRequest deleteRequest =
-          new DeleteObjectsRequest(bucketName);
-      deleteRequest.setKeys(keysToDelete);
-      ossClient.deleteObjects(deleteRequest);
-      statistics.incrementWriteOps(keysToDelete.size());
-    }
-  }
-
-  /**
-   * Delete a directory from Aliyun OSS.
-   *
-   * @param key directory key to delete.
-   */
-  public void deleteDirs(String key) {
-    key = AliyunOSSUtils.maybeAddTrailingSlash(key);
-    ListObjectsRequest listRequest = new ListObjectsRequest(bucketName);
-    listRequest.setPrefix(key);
-    listRequest.setDelimiter(null);
-    listRequest.setMaxKeys(maxKeys);
-
-    while (true) {
-      ObjectListing objects = ossClient.listObjects(listRequest);
-      statistics.incrementReadOps(1);
-      List<String> keysToDelete = new ArrayList<String>();
-      for (OSSObjectSummary objectSummary : objects.getObjectSummaries()) {
-        keysToDelete.add(objectSummary.getKey());
-      }
-      deleteObjects(keysToDelete);
-      if (objects.isTruncated()) {
-        listRequest.setMarker(objects.getNextMarker());
-      } else {
-        break;
-      }
-    }
-  }
-
-  /**
-   * Return metadata of a given object key.
-   *
-   * @param key object key.
-   * @return return null if key does not exist.
-   */
-  public ObjectMetadata getObjectMetadata(String key) {
-    try {
-      return ossClient.getObjectMetadata(bucketName, key);
-    } catch (OSSException osse) {
-      return null;
-    } finally {
-      statistics.incrementReadOps(1);
-    }
-  }
-
-  /**
-   * Upload an empty file as an OSS object, using single upload.
-   *
-   * @param key object key.
-   * @throws IOException if failed to upload object.
-   */
-  public void storeEmptyFile(String key) throws IOException {
-    ObjectMetadata dirMeta = new ObjectMetadata();
-    byte[] buffer = new byte[0];
-    ByteArrayInputStream in = new ByteArrayInputStream(buffer);
-    dirMeta.setContentLength(0);
-    try {
-      ossClient.putObject(bucketName, key, in, dirMeta);
-    } finally {
-      in.close();
-    }
-  }
-
-  /**
-   * Copy an object from source key to destination key.
-   *
-   * @param srcKey source key.
-   * @param dstKey destination key.
-   * @return true if file is successfully copied.
-   */
-  public boolean copyFile(String srcKey, String dstKey) {
-    ObjectMetadata objectMeta =
-        ossClient.getObjectMetadata(bucketName, srcKey);
-    long contentLength = objectMeta.getContentLength();
-    if (contentLength <= multipartThreshold) {
-      return singleCopy(srcKey, dstKey);
-    } else {
-      return multipartCopy(srcKey, contentLength, dstKey);
-    }
-  }
-
-  /**
-   * Use single copy to copy an OSS object.
-   * (The caller should make sure srcPath is a file and dstPath is valid)
-   *
-   * @param srcKey source key.
-   * @param dstKey destination key.
-   * @return true if object is successfully copied.
-   */
-  private boolean singleCopy(String srcKey, String dstKey) {
-    CopyObjectResult copyResult =
-        ossClient.copyObject(bucketName, srcKey, bucketName, dstKey);
-    LOG.debug(copyResult.getETag());
-    return true;
-  }
-
-  /**
-   * Use multipart copy to copy an OSS object.
-   * (The caller should make sure srcPath is a file and dstPath is valid)
-   *
-   * @param srcKey source key.
-   * @param contentLength data size of the object to copy.
-   * @param dstKey destination key.
-   * @return true if success, or false if upload is aborted.
-   */
-  private boolean multipartCopy(String srcKey, long contentLength,
-      String dstKey) {
-    long realPartSize =
-        AliyunOSSUtils.calculatePartSize(contentLength, uploadPartSize);
-    int partNum = (int) (contentLength / realPartSize);
-    if (contentLength % realPartSize != 0) {
-      partNum++;
-    }
-    InitiateMultipartUploadRequest initiateMultipartUploadRequest =
-        new InitiateMultipartUploadRequest(bucketName, dstKey);
-    ObjectMetadata meta = new ObjectMetadata();
-    if (StringUtils.isNotEmpty(serverSideEncryptionAlgorithm)) {
-      meta.setServerSideEncryption(serverSideEncryptionAlgorithm);
-    }
-    initiateMultipartUploadRequest.setObjectMetadata(meta);
-    InitiateMultipartUploadResult initiateMultipartUploadResult =
-        ossClient.initiateMultipartUpload(initiateMultipartUploadRequest);
-    String uploadId = initiateMultipartUploadResult.getUploadId();
-    List<PartETag> partETags = new ArrayList<PartETag>();
-    try {
-      for (int i = 0; i < partNum; i++) {
-        long skipBytes = realPartSize * i;
-        long size = (realPartSize < contentLength - skipBytes) ?
-            realPartSize : contentLength - skipBytes;
-        UploadPartCopyRequest partCopyRequest = new UploadPartCopyRequest();
-        partCopyRequest.setSourceBucketName(bucketName);
-        partCopyRequest.setSourceKey(srcKey);
-        partCopyRequest.setBucketName(bucketName);
-        partCopyRequest.setKey(dstKey);
-        partCopyRequest.setUploadId(uploadId);
-        partCopyRequest.setPartSize(size);
-        partCopyRequest.setBeginIndex(skipBytes);
-        partCopyRequest.setPartNumber(i + 1);
-        UploadPartCopyResult partCopyResult =
-            ossClient.uploadPartCopy(partCopyRequest);
-        statistics.incrementWriteOps(1);
-        partETags.add(partCopyResult.getPartETag());
-      }
-      CompleteMultipartUploadRequest completeMultipartUploadRequest =
-          new CompleteMultipartUploadRequest(bucketName, dstKey,
-              uploadId, partETags);
-      CompleteMultipartUploadResult completeMultipartUploadResult =
-          ossClient.completeMultipartUpload(completeMultipartUploadRequest);
-      LOG.debug(completeMultipartUploadResult.getETag());
-      return true;
-    } catch (OSSException | ClientException e) {
-      AbortMultipartUploadRequest abortMultipartUploadRequest =
-          new AbortMultipartUploadRequest(bucketName, dstKey, uploadId);
-      ossClient.abortMultipartUpload(abortMultipartUploadRequest);
-      return false;
-    }
-  }
-
-  /**
-   * Upload a file as an OSS object, using single upload.
-   *
-   * @param key object key.
-   * @param file local file to upload.
-   * @throws IOException if failed to upload object.
-   */
-  public void uploadObject(String key, File file) throws IOException {
-    File object = file.getAbsoluteFile();
-    FileInputStream fis = new FileInputStream(object);
-    ObjectMetadata meta = new ObjectMetadata();
-    meta.setContentLength(object.length());
-    if (StringUtils.isNotEmpty(serverSideEncryptionAlgorithm)) {
-      meta.setServerSideEncryption(serverSideEncryptionAlgorithm);
-    }
-    try {
-      PutObjectResult result = ossClient.putObject(bucketName, key, fis, meta);
-      LOG.debug(result.getETag());
-      statistics.incrementWriteOps(1);
-    } finally {
-      fis.close();
-    }
-  }
-
-  /**
-   * Upload a file as an OSS object, using multipart upload.
-   *
-   * @param key object key.
-   * @param file local file to upload.
-   * @throws IOException if failed to upload object.
-   */
-  public void multipartUploadObject(String key, File file) throws IOException {
-    File object = file.getAbsoluteFile();
-    long dataLen = object.length();
-    long realPartSize = AliyunOSSUtils.calculatePartSize(dataLen, partSize);
-    int partNum = (int) (dataLen / realPartSize);
-    if (dataLen % realPartSize != 0) {
-      partNum += 1;
-    }
-
-    InitiateMultipartUploadRequest initiateMultipartUploadRequest =
-        new InitiateMultipartUploadRequest(bucketName, key);
-    ObjectMetadata meta = new ObjectMetadata();
-    if (StringUtils.isNotEmpty(serverSideEncryptionAlgorithm)) {
-      meta.setServerSideEncryption(serverSideEncryptionAlgorithm);
-    }
-    initiateMultipartUploadRequest.setObjectMetadata(meta);
-    InitiateMultipartUploadResult initiateMultipartUploadResult =
-        ossClient.initiateMultipartUpload(initiateMultipartUploadRequest);
-    List<PartETag> partETags = new ArrayList<PartETag>();
-    String uploadId = initiateMultipartUploadResult.getUploadId();
-
-    try {
-      for (int i = 0; i < partNum; i++) {
-        // TODO: Optimize this, avoid opening the object multiple times
-        FileInputStream fis = new FileInputStream(object);
-        try {
-          long skipBytes = realPartSize * i;
-          AliyunOSSUtils.skipFully(fis, skipBytes);
-          long size = (realPartSize < dataLen - skipBytes) ?
-              realPartSize : dataLen - skipBytes;
-          UploadPartRequest uploadPartRequest = new UploadPartRequest();
-          uploadPartRequest.setBucketName(bucketName);
-          uploadPartRequest.setKey(key);
-          uploadPartRequest.setUploadId(uploadId);
-          uploadPartRequest.setInputStream(fis);
-          uploadPartRequest.setPartSize(size);
-          uploadPartRequest.setPartNumber(i + 1);
-          UploadPartResult uploadPartResult =
-              ossClient.uploadPart(uploadPartRequest);
-          statistics.incrementWriteOps(1);
-          partETags.add(uploadPartResult.getPartETag());
-        } finally {
-          fis.close();
-        }
-      }
-      CompleteMultipartUploadRequest completeMultipartUploadRequest =
-          new CompleteMultipartUploadRequest(bucketName, key,
-              uploadId, partETags);
-      CompleteMultipartUploadResult completeMultipartUploadResult =
-          ossClient.completeMultipartUpload(completeMultipartUploadRequest);
-      LOG.debug(completeMultipartUploadResult.getETag());
-    } catch (OSSException | ClientException e) {
-      AbortMultipartUploadRequest abortMultipartUploadRequest =
-          new AbortMultipartUploadRequest(bucketName, key, uploadId);
-      ossClient.abortMultipartUpload(abortMultipartUploadRequest);
-    }
-  }
-
-  /**
-   * list objects.
-   *
-   * @param prefix prefix.
-   * @param maxListingLength max no. of entries
-   * @param marker last key in any previous search.
-   * @param recursive whether to list directory recursively.
-   * @return a list of matches.
-   */
-  public ObjectListing listObjects(String prefix, int maxListingLength,
-                                   String marker, boolean recursive) {
-    String delimiter = recursive ? null : "/";
-    prefix = AliyunOSSUtils.maybeAddTrailingSlash(prefix);
-    ListObjectsRequest listRequest = new ListObjectsRequest(bucketName);
-    listRequest.setPrefix(prefix);
-    listRequest.setDelimiter(delimiter);
-    listRequest.setMaxKeys(maxListingLength);
-    listRequest.setMarker(marker);
-
-    ObjectListing listing = ossClient.listObjects(listRequest);
-    statistics.incrementReadOps(1);
-    return listing;
-  }
-
-  /**
-   * Retrieve a part of an object.
-   *
-   * @param key the object name that is being retrieved from the Aliyun OSS.
-   * @param byteStart start position.
-   * @param byteEnd end position.
-   * @return This method returns null if the key is not found.
-   */
-  public InputStream retrieve(String key, long byteStart, long byteEnd) {
-    try {
-      GetObjectRequest request = new GetObjectRequest(bucketName, key);
-      request.setRange(byteStart, byteEnd);
-      return ossClient.getObject(request).getObjectContent();
-    } catch (OSSException | ClientException e) {
-      return null;
-    }
-  }
-
-  /**
-   * Close OSS client properly.
-   */
-  public void close() {
-    if (ossClient != null) {
-      ossClient.shutdown();
-      ossClient = null;
-    }
-  }
-
-  /**
-   * Clean up all objects matching the prefix.
-   *
-   * @param prefix Aliyun OSS object prefix.
-   */
-  public void purge(String prefix) {
-    String key;
-    try {
-      ObjectListing objects = listObjects(prefix, maxKeys, null, true);
-      for (OSSObjectSummary object : objects.getObjectSummaries()) {
-        key = object.getKey();
-        ossClient.deleteObject(bucketName, key);
-      }
-
-      for (String dir: objects.getCommonPrefixes()) {
-        deleteDirs(dir);
-      }
-    } catch (OSSException | ClientException e) {
-      LOG.error("Failed to purge " + prefix);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
deleted file mode 100644
index b87a3a7..0000000
--- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSInputStream.java
+++ /dev/null
@@ -1,260 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss;
-
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStream;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSExceptionMessages;
-import org.apache.hadoop.fs.FSInputStream;
-import org.apache.hadoop.fs.FileSystem.Statistics;
-
-import static org.apache.hadoop.fs.aliyun.oss.Constants.*;
-
-/**
- * The input stream for OSS blob system.
- * The class uses multi-part downloading to read data from the object content
- * stream.
- */
-public class AliyunOSSInputStream extends FSInputStream {
-  public static final Log LOG = LogFactory.getLog(AliyunOSSInputStream.class);
-  private final long downloadPartSize;
-  private AliyunOSSFileSystemStore store;
-  private final String key;
-  private Statistics statistics;
-  private boolean closed;
-  private InputStream wrappedStream = null;
-  private long contentLength;
-  private long position;
-  private long partRemaining;
-
-  public AliyunOSSInputStream(Configuration conf,
-      AliyunOSSFileSystemStore store, String key, Long contentLength,
-      Statistics statistics) throws IOException {
-    this.store = store;
-    this.key = key;
-    this.statistics = statistics;
-    this.contentLength = contentLength;
-    downloadPartSize = conf.getLong(MULTIPART_DOWNLOAD_SIZE_KEY,
-        MULTIPART_DOWNLOAD_SIZE_DEFAULT);
-    reopen(0);
-    closed = false;
-  }
-
-  /**
-   * Reopen the wrapped stream at give position, by seeking for
-   * data of a part length from object content stream.
-   *
-   * @param pos position from start of a file
-   * @throws IOException if failed to reopen
-   */
-  private synchronized void reopen(long pos) throws IOException {
-    long partSize;
-
-    if (pos < 0) {
-      throw new EOFException("Cannot seek at negative position:" + pos);
-    } else if (pos > contentLength) {
-      throw new EOFException("Cannot seek after EOF, contentLength:" +
-          contentLength + " position:" + pos);
-    } else if (pos + downloadPartSize > contentLength) {
-      partSize = contentLength - pos;
-    } else {
-      partSize = downloadPartSize;
-    }
-
-    if (wrappedStream != null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Aborting old stream to open at pos " + pos);
-      }
-      wrappedStream.close();
-    }
-
-    wrappedStream = store.retrieve(key, pos, pos + partSize -1);
-    if (wrappedStream == null) {
-      throw new IOException("Null IO stream");
-    }
-    position = pos;
-    partRemaining = partSize;
-  }
-
-  @Override
-  public synchronized int read() throws IOException {
-    checkNotClosed();
-
-    if (partRemaining <= 0 && position < contentLength) {
-      reopen(position);
-    }
-
-    int tries = MAX_RETRIES;
-    boolean retry;
-    int byteRead = -1;
-    do {
-      retry = false;
-      try {
-        byteRead = wrappedStream.read();
-      } catch (Exception e) {
-        handleReadException(e, --tries);
-        retry = true;
-      }
-    } while (retry);
-    if (byteRead >= 0) {
-      position++;
-      partRemaining--;
-    }
-
-    if (statistics != null && byteRead >= 0) {
-      statistics.incrementBytesRead(1);
-    }
-    return byteRead;
-  }
-
-
-  /**
-   * Verify that the input stream is open. Non blocking; this gives
-   * the last state of the volatile {@link #closed} field.
-   *
-   * @throws IOException if the connection is closed.
-   */
-  private void checkNotClosed() throws IOException {
-    if (closed) {
-      throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
-    }
-  }
-
-  @Override
-  public synchronized int read(byte[] buf, int off, int len)
-      throws IOException {
-    checkNotClosed();
-
-    if (buf == null) {
-      throw new NullPointerException();
-    } else if (off < 0 || len < 0 || len > buf.length - off) {
-      throw new IndexOutOfBoundsException();
-    } else if (len == 0) {
-      return 0;
-    }
-
-    int bytesRead = 0;
-    // Not EOF, and read not done
-    while (position < contentLength && bytesRead < len) {
-      if (partRemaining == 0) {
-        reopen(position);
-      }
-
-      int tries = MAX_RETRIES;
-      boolean retry;
-      int bytes = -1;
-      do {
-        retry = false;
-        try {
-          bytes = wrappedStream.read(buf, off + bytesRead, len - bytesRead);
-        } catch (Exception e) {
-          handleReadException(e, --tries);
-          retry = true;
-        }
-      } while (retry);
-
-      if (bytes > 0) {
-        bytesRead += bytes;
-        position += bytes;
-        partRemaining -= bytes;
-      } else if (partRemaining != 0) {
-        throw new IOException("Failed to read from stream. Remaining:" +
-            partRemaining);
-      }
-    }
-
-    if (statistics != null && bytesRead > 0) {
-      statistics.incrementBytesRead(bytesRead);
-    }
-
-    // Read nothing, but attempt to read something
-    if (bytesRead == 0 && len > 0) {
-      return -1;
-    } else {
-      return bytesRead;
-    }
-  }
-
-  @Override
-  public synchronized void close() throws IOException {
-    if (closed) {
-      return;
-    }
-    closed = true;
-    if (wrappedStream != null) {
-      wrappedStream.close();
-    }
-  }
-
-  @Override
-  public synchronized int available() throws IOException {
-    checkNotClosed();
-
-    long remaining = contentLength - position;
-    if (remaining > Integer.MAX_VALUE) {
-      return Integer.MAX_VALUE;
-    }
-    return (int)remaining;
-  }
-
-  @Override
-  public synchronized void seek(long pos) throws IOException {
-    checkNotClosed();
-    if (position == pos) {
-      return;
-    } else if (pos > position && pos < position + partRemaining) {
-      AliyunOSSUtils.skipFully(wrappedStream, pos - position);
-      position = pos;
-    } else {
-      reopen(pos);
-    }
-  }
-
-  @Override
-  public synchronized long getPos() throws IOException {
-    checkNotClosed();
-    return position;
-  }
-
-  @Override
-  public boolean seekToNewSource(long targetPos) throws IOException {
-    checkNotClosed();
-    return false;
-  }
-
-  private void handleReadException(Exception e, int tries) throws IOException{
-    if (tries == 0) {
-      throw new IOException(e);
-    }
-
-    LOG.warn("Some exceptions occurred in oss connection, try to reopen oss" +
-        " connection at position '" + position + "', " + e.getMessage());
-    try {
-      Thread.sleep(100);
-    } catch (InterruptedException e2) {
-      LOG.warn(e2.getMessage());
-    }
-    reopen(position);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSOutputStream.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSOutputStream.java
deleted file mode 100644
index c75ee18..0000000
--- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSOutputStream.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss;
-
-import java.io.BufferedOutputStream;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem.Statistics;
-import org.apache.hadoop.fs.LocalDirAllocator;
-import org.apache.hadoop.util.Progressable;
-
-import static org.apache.hadoop.fs.aliyun.oss.Constants.*;
-
-/**
- * The output stream for OSS blob system.
- * Data will be buffered on local disk, then uploaded to OSS in
- * {@link #close()} method.
- */
-public class AliyunOSSOutputStream extends OutputStream {
-  public static final Log LOG = LogFactory.getLog(AliyunOSSOutputStream.class);
-  private AliyunOSSFileSystemStore store;
-  private final String key;
-  private Statistics statistics;
-  private Progressable progress;
-  private long partSizeThreshold;
-  private LocalDirAllocator dirAlloc;
-  private boolean closed;
-  private File tmpFile;
-  private BufferedOutputStream backupStream;
-
-  public AliyunOSSOutputStream(Configuration conf,
-      AliyunOSSFileSystemStore store, String key, Progressable progress,
-      Statistics statistics) throws IOException {
-    this.store = store;
-    this.key = key;
-    // The caller cann't get any progress information
-    this.progress = progress;
-    this.statistics = statistics;
-    partSizeThreshold = conf.getLong(MIN_MULTIPART_UPLOAD_THRESHOLD_KEY,
-        MIN_MULTIPART_UPLOAD_THRESHOLD_DEFAULT);
-
-    if (conf.get(BUFFER_DIR_KEY) == null) {
-      conf.set(BUFFER_DIR_KEY, conf.get("hadoop.tmp.dir") + "/oss");
-    }
-    dirAlloc = new LocalDirAllocator(BUFFER_DIR_KEY);
-
-    tmpFile = dirAlloc.createTmpFileForWrite("output-",
-        LocalDirAllocator.SIZE_UNKNOWN, conf);
-    backupStream = new BufferedOutputStream(new FileOutputStream(tmpFile));
-    closed = false;
-  }
-
-  @Override
-  public synchronized void close() throws IOException {
-    if (closed) {
-      return;
-    }
-    closed = true;
-    if (backupStream != null) {
-      backupStream.close();
-    }
-    long dataLen = tmpFile.length();
-    try {
-      if (dataLen <= partSizeThreshold) {
-        store.uploadObject(key, tmpFile);
-      } else {
-        store.multipartUploadObject(key, tmpFile);
-      }
-    } finally {
-      if (!tmpFile.delete()) {
-        LOG.warn("Can not delete file: " + tmpFile);
-      }
-    }
-  }
-
-
-
-  @Override
-  public synchronized void flush() throws IOException {
-    backupStream.flush();
-  }
-
-  @Override
-  public synchronized void write(int b) throws IOException {
-    backupStream.write(b);
-    statistics.incrementBytesWritten(1);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java
deleted file mode 100644
index cae9749..0000000
--- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-import com.aliyun.oss.common.auth.CredentialsProvider;
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.ProviderUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.hadoop.fs.aliyun.oss.Constants.*;
-
-/**
- * Utility methods for Aliyun OSS code.
- */
-final public class AliyunOSSUtils {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(AliyunOSSUtils.class);
-
-  private AliyunOSSUtils() {
-  }
-
-  /**
-   * Used to get password from configuration.
-   *
-   * @param conf configuration that contains password information
-   * @param key the key of the password
-   * @return the value for the key
-   * @throws IOException if failed to get password from configuration
-   */
-  public static String getValueWithKey(Configuration conf, String key)
-      throws IOException {
-    try {
-      final char[] pass = conf.getPassword(key);
-      if (pass != null) {
-        return (new String(pass)).trim();
-      } else {
-        return "";
-      }
-    } catch (IOException ioe) {
-      throw new IOException("Cannot find password option " + key, ioe);
-    }
-  }
-
-  /**
-   * Skip the requested number of bytes or fail if there are no enough bytes
-   * left. This allows for the possibility that {@link InputStream#skip(long)}
-   * may not skip as many bytes as requested (most likely because of reaching
-   * EOF).
-   *
-   * @param is the input stream to skip.
-   * @param n the number of bytes to skip.
-   * @throws IOException thrown when skipped less number of bytes.
-   */
-  public static void skipFully(InputStream is, long n) throws IOException {
-    long total = 0;
-    long cur = 0;
-
-    do {
-      cur = is.skip(n - total);
-      total += cur;
-    } while((total < n) && (cur > 0));
-
-    if (total < n) {
-      throw new IOException("Failed to skip " + n + " bytes, possibly due " +
-              "to EOF.");
-    }
-  }
-
-  /**
-   * Calculate a proper size of multipart piece. If <code>minPartSize</code>
-   * is too small, the number of multipart pieces may exceed the limit of
-   * {@link Constants#MULTIPART_UPLOAD_PART_NUM_LIMIT}.
-   *
-   * @param contentLength the size of file.
-   * @param minPartSize the minimum size of multipart piece.
-   * @return a revisional size of multipart piece.
-   */
-  public static long calculatePartSize(long contentLength, long minPartSize) {
-    long tmpPartSize = contentLength / MULTIPART_UPLOAD_PART_NUM_LIMIT + 1;
-    return Math.max(minPartSize, tmpPartSize);
-  }
-
-  /**
-   * Create credential provider specified by configuration, or create default
-   * credential provider if not specified.
-   *
-   * @param conf configuration
-   * @return a credential provider
-   * @throws IOException on any problem. Class construction issues may be
-   * nested inside the IOE.
-   */
-  public static CredentialsProvider getCredentialsProvider(Configuration conf)
-      throws IOException {
-    CredentialsProvider credentials;
-
-    String className = conf.getTrimmed(ALIYUN_OSS_CREDENTIALS_PROVIDER_KEY);
-    if (StringUtils.isEmpty(className)) {
-      Configuration newConf =
-          ProviderUtils.excludeIncompatibleCredentialProviders(conf,
-              AliyunOSSFileSystem.class);
-      credentials = new AliyunCredentialsProvider(newConf);
-    } else {
-      try {
-        LOG.debug("Credential provider class is:" + className);
-        Class<?> credClass = Class.forName(className);
-        try {
-          credentials =
-              (CredentialsProvider)credClass.getDeclaredConstructor(
-                  Configuration.class).newInstance(conf);
-        } catch (NoSuchMethodException | SecurityException e) {
-          credentials =
-              (CredentialsProvider)credClass.getDeclaredConstructor()
-              .newInstance();
-        }
-      } catch (ClassNotFoundException e) {
-        throw new IOException(className + " not found.", e);
-      } catch (NoSuchMethodException | SecurityException e) {
-        throw new IOException(String.format("%s constructor exception.  A " +
-            "class specified in %s must provide an accessible constructor " +
-            "accepting URI and Configuration, or an accessible default " +
-            "constructor.", className, ALIYUN_OSS_CREDENTIALS_PROVIDER_KEY),
-            e);
-      } catch (ReflectiveOperationException | IllegalArgumentException e) {
-        throw new IOException(className + " instantiation exception.", e);
-      }
-    }
-
-    return credentials;
-  }
-
-  /**
-   * Turns a path (relative or otherwise) into an OSS key, adding a trailing
-   * "/" if the path is not the root <i>and</i> does not already have a "/"
-   * at the end.
-   *
-   * @param key OSS key or ""
-   * @return the with a trailing "/", or, if it is the root key, "".
-   */
-  public static String maybeAddTrailingSlash(String key) {
-    if (StringUtils.isNotEmpty(key) && !key.endsWith("/")) {
-      return key + '/';
-    } else {
-      return key;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/Constants.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/Constants.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/Constants.java
deleted file mode 100644
index 04a2ccd..0000000
--- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/Constants.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss;
-
-/**
- * ALL configuration constants for OSS filesystem.
- */
-public final class Constants {
-
-  private Constants() {
-  }
-
-  // Class of credential provider
-  public static final String ALIYUN_OSS_CREDENTIALS_PROVIDER_KEY =
-      "fs.oss.credentials.provider";
-
-  // OSS access verification
-  public static final String ACCESS_KEY_ID = "fs.oss.accessKeyId";
-  public static final String ACCESS_KEY_SECRET = "fs.oss.accessKeySecret";
-  public static final String SECURITY_TOKEN = "fs.oss.securityToken";
-
-  // Number of simultaneous connections to oss
-  public static final String MAXIMUM_CONNECTIONS_KEY =
-      "fs.oss.connection.maximum";
-  public static final int MAXIMUM_CONNECTIONS_DEFAULT = 32;
-
-  // Connect to oss over ssl
-  public static final String SECURE_CONNECTIONS_KEY =
-      "fs.oss.connection.secure.enabled";
-  public static final boolean SECURE_CONNECTIONS_DEFAULT = true;
-
-  // Use a custom endpoint
-  public static final String ENDPOINT_KEY = "fs.oss.endpoint";
-
-  // Connect to oss through a proxy server
-  public static final String PROXY_HOST_KEY = "fs.oss.proxy.host";
-  public static final String PROXY_PORT_KEY = "fs.oss.proxy.port";
-  public static final String PROXY_USERNAME_KEY = "fs.oss.proxy.username";
-  public static final String PROXY_PASSWORD_KEY = "fs.oss.proxy.password";
-  public static final String PROXY_DOMAIN_KEY = "fs.oss.proxy.domain";
-  public static final String PROXY_WORKSTATION_KEY =
-      "fs.oss.proxy.workstation";
-
-  // Number of times we should retry errors
-  public static final String MAX_ERROR_RETRIES_KEY = "fs.oss.attempts.maximum";
-  public static final int MAX_ERROR_RETRIES_DEFAULT = 20;
-
-  // Time until we give up trying to establish a connection to oss
-  public static final String ESTABLISH_TIMEOUT_KEY =
-      "fs.oss.connection.establish.timeout";
-  public static final int ESTABLISH_TIMEOUT_DEFAULT = 50000;
-
-  // Time until we give up on a connection to oss
-  public static final String SOCKET_TIMEOUT_KEY = "fs.oss.connection.timeout";
-  public static final int SOCKET_TIMEOUT_DEFAULT = 200000;
-
-  // Number of records to get while paging through a directory listing
-  public static final String MAX_PAGING_KEYS_KEY = "fs.oss.paging.maximum";
-  public static final int MAX_PAGING_KEYS_DEFAULT = 1000;
-
-  // Size of each of or multipart pieces in bytes
-  public static final String MULTIPART_UPLOAD_SIZE_KEY =
-      "fs.oss.multipart.upload.size";
-
-  public static final long MULTIPART_UPLOAD_SIZE_DEFAULT = 10 * 1024 * 1024;
-  public static final int MULTIPART_UPLOAD_PART_NUM_LIMIT = 10000;
-
-  // Minimum size in bytes before we start a multipart uploads or copy
-  public static final String MIN_MULTIPART_UPLOAD_THRESHOLD_KEY =
-      "fs.oss.multipart.upload.threshold";
-  public static final long MIN_MULTIPART_UPLOAD_THRESHOLD_DEFAULT =
-      20 * 1024 * 1024;
-
-  public static final String MULTIPART_DOWNLOAD_SIZE_KEY =
-      "fs.oss.multipart.download.size";
-
-  public static final long MULTIPART_DOWNLOAD_SIZE_DEFAULT = 100 * 1024;
-
-  // Comma separated list of directories
-  public static final String BUFFER_DIR_KEY = "fs.oss.buffer.dir";
-
-  // private | public-read | public-read-write
-  public static final String CANNED_ACL_KEY = "fs.oss.acl.default";
-  public static final String CANNED_ACL_DEFAULT = "";
-
-  // OSS server-side encryption
-  public static final String SERVER_SIDE_ENCRYPTION_ALGORITHM_KEY =
-      "fs.oss.server-side-encryption-algorithm";
-
-  public static final String FS_OSS_BLOCK_SIZE_KEY = "fs.oss.block.size";
-  public static final int FS_OSS_BLOCK_SIZE_DEFAULT = 64 * 1024 * 1024;
-  public static final String FS_OSS = "oss";
-
-  public static final long MIN_MULTIPART_UPLOAD_PART_SIZE = 100 * 1024L;
-  public static final int MAX_RETRIES = 10;
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/package-info.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/package-info.java
deleted file mode 100644
index 234567b..0000000
--- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Aliyun OSS Filesystem.
- */
-package org.apache.hadoop.fs.aliyun.oss;
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/57] [abbrv] hadoop git commit: HADOOP-13544. JDiff reports unncessarily show unannotated APIs and cause confusion while our javadocs only show annotated and public APIs. (vinodkv via wangda)

Posted by in...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/875062b5/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.7.2.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.7.2.xml b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.7.2.xml
index c20349b..00cb6f4 100644
--- a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.7.2.xml
+++ b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.7.2.xml
@@ -17,7 +17,7 @@
 -->
 <!-- Generated by the JDiff Javadoc doclet -->
 <!-- (http://www.jdiff.org) -->
-<!-- on Mon Jun 13 20:33:05 PDT 2016 -->
+<!-- on Wed Aug 24 13:56:43 PDT 2016 -->
 
 <api
   xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
@@ -25,219 +25,10 @@
   name="hadoop-mapreduce-client-common 2.7.2"
   jdversion="1.0.9">
 
-<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/classes:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.7.2.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2
 .jar:/Users/vinodkv/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/
 org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/vinodkv/.m2/repository/com/google/inject/guice/3.0/guice-3.0.jar:/Users/vinodkv/.m2/repository/javax/inject/javax.inject/1/javax.inject-1.jar:/Users/vinodkv/.m2/repository/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop
 -yarn/hadoop-yarn-client/target/hadoop-yarn-client-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/hadoop-mapreduce-client-core-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/hadoop-yarn-server-common-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/Workspace/eclipse-works
 pace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vinodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/vinodkv/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons
 -configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/U
 sers/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.7.0_45.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/.m2/repository/com/google/inject/extensions/gui
 ce-servlet/3.0/guice-servlet-3.0.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar -sourcepath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/site/jdiff/xml -apiname hadoop-mapredu
 ce-client-common 2.7.2 -->
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/classes:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.7.2.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.
 jar:/Users/vinodkv/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/o
 rg/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/vinodkv/.m2/repository/com/google/inject/guice/3.0/guice-3.0.jar:/Users/vinodkv/.m2/repository/javax/inject/javax.inject/1/javax.inject-1.jar:/Users/vinodkv/.m2/repository/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-
 yarn/hadoop-yarn-client/target/hadoop-yarn-client-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/hadoop-mapreduce-client-core-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/hadoop-yarn-server-common-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/Workspace/eclipse-worksp
 ace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vinodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/vinodkv/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-
 configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Us
 ers/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_40.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/.m2/repository/com/google/inject/extensions/guic
 e-servlet/3.0/guice-servlet-3.0.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar -sourcepath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapredu
 ce-client/hadoop-mapreduce-client-common/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/jdiff.jar -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/site/jdiff/xml -apiname hadoop-mapreduce-client-common 2.7.2 -->
 <package name="org.apache.hadoop.mapred">
 </package>
 <package name="org.apache.hadoop.mapreduce">
-  <!-- start class org.apache.hadoop.mapreduce.TypeConverter -->
-  <class name="TypeConverter" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="TypeConverter"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="fromYarn" return="org.apache.hadoop.mapred.JobID"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="id" type="org.apache.hadoop.mapreduce.v2.api.records.JobId"/>
-    </method>
-    <method name="fromYarn" return="org.apache.hadoop.mapreduce.JobID"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="appID" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
-    </method>
-    <method name="toYarn" return="org.apache.hadoop.mapreduce.v2.api.records.JobId"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="id" type="org.apache.hadoop.mapreduce.JobID"/>
-    </method>
-    <method name="fromYarn" return="org.apache.hadoop.mapreduce.TaskType"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="taskType" type="org.apache.hadoop.mapreduce.v2.api.records.TaskType"/>
-    </method>
-    <method name="toYarn" return="org.apache.hadoop.mapreduce.v2.api.records.TaskType"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="taskType" type="org.apache.hadoop.mapreduce.TaskType"/>
-    </method>
-    <method name="fromYarn" return="org.apache.hadoop.mapred.TaskID"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="id" type="org.apache.hadoop.mapreduce.v2.api.records.TaskId"/>
-    </method>
-    <method name="toYarn" return="org.apache.hadoop.mapreduce.v2.api.records.TaskId"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="id" type="org.apache.hadoop.mapreduce.TaskID"/>
-    </method>
-    <method name="toYarn" return="org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="state" type="org.apache.hadoop.mapred.TaskStatus.State"/>
-    </method>
-    <method name="toYarn" return="org.apache.hadoop.mapreduce.v2.api.records.Phase"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="phase" type="org.apache.hadoop.mapred.TaskStatus.Phase"/>
-    </method>
-    <method name="fromYarn" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="newEvents" type="org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent[]"/>
-    </method>
-    <method name="fromYarn" return="org.apache.hadoop.mapred.TaskCompletionEvent"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="newEvent" type="org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent"/>
-    </method>
-    <method name="fromYarn" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="newStatus" type="org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus"/>
-    </method>
-    <method name="fromYarn" return="org.apache.hadoop.mapred.TaskAttemptID"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="id" type="org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId"/>
-    </method>
-    <method name="toYarn" return="org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="id" type="org.apache.hadoop.mapred.TaskAttemptID"/>
-    </method>
-    <method name="toYarn" return="org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="id" type="org.apache.hadoop.mapreduce.TaskAttemptID"/>
-    </method>
-    <method name="fromYarn" return="org.apache.hadoop.mapreduce.Counters"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="yCntrs" type="org.apache.hadoop.mapreduce.v2.api.records.Counters"/>
-    </method>
-    <method name="toYarn" return="org.apache.hadoop.mapreduce.v2.api.records.Counters"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="counters" type="org.apache.hadoop.mapred.Counters"/>
-    </method>
-    <method name="toYarn" return="org.apache.hadoop.mapreduce.v2.api.records.Counters"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="counters" type="org.apache.hadoop.mapreduce.Counters"/>
-    </method>
-    <method name="fromYarn" return="org.apache.hadoop.mapreduce.JobStatus"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="jobreport" type="org.apache.hadoop.mapreduce.v2.api.records.JobReport"/>
-      <param name="trackingUrl" type="java.lang.String"/>
-    </method>
-    <method name="fromYarn" return="org.apache.hadoop.mapreduce.QueueState"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="state" type="org.apache.hadoop.yarn.api.records.QueueState"/>
-    </method>
-    <method name="fromYarn" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="state" type="org.apache.hadoop.mapreduce.v2.api.records.JobState"/>
-    </method>
-    <method name="fromYarn" return="org.apache.hadoop.mapred.TIPStatus"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="state" type="org.apache.hadoop.mapreduce.v2.api.records.TaskState"/>
-    </method>
-    <method name="fromYarn" return="org.apache.hadoop.mapreduce.TaskReport"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="report" type="org.apache.hadoop.mapreduce.v2.api.records.TaskReport"/>
-    </method>
-    <method name="fromYarn" return="java.util.List"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="taskReports" type="java.util.List"/>
-    </method>
-    <method name="fromYarn" return="org.apache.hadoop.mapreduce.JobStatus.State"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="yarnApplicationState" type="org.apache.hadoop.yarn.api.records.YarnApplicationState"/>
-      <param name="finalApplicationStatus" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"/>
-    </method>
-    <method name="fromYarn" return="org.apache.hadoop.mapreduce.TaskTrackerInfo"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="node" type="org.apache.hadoop.yarn.api.records.NodeReport"/>
-    </method>
-    <method name="fromYarnNodes" return="org.apache.hadoop.mapreduce.TaskTrackerInfo[]"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="nodes" type="java.util.List"/>
-    </method>
-    <method name="fromYarn" return="org.apache.hadoop.mapreduce.JobStatus"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="application" type="org.apache.hadoop.yarn.api.records.ApplicationReport"/>
-      <param name="jobFile" type="java.lang.String"/>
-    </method>
-    <method name="fromYarnApps" return="org.apache.hadoop.mapreduce.JobStatus[]"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="applications" type="java.util.List"/>
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-    </method>
-    <method name="fromYarn" return="org.apache.hadoop.mapreduce.QueueInfo"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="queueInfo" type="org.apache.hadoop.yarn.api.records.QueueInfo"/>
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-    </method>
-    <method name="fromYarnQueueInfo" return="org.apache.hadoop.mapreduce.QueueInfo[]"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="queues" type="java.util.List"/>
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-    </method>
-    <method name="fromYarnQueueUserAclsInfo" return="org.apache.hadoop.mapreduce.QueueAclsInfo[]"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="userAcls" type="java.util.List"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.mapreduce.TypeConverter -->
 </package>
 <package name="org.apache.hadoop.mapreduce.v2.api.protocolrecords">
   <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest -->
@@ -245,12 +36,12 @@
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="getDelegationToken" return="org.apache.hadoop.yarn.api.records.Token"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
     <method name="setDelegationToken"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="dToken" type="org.apache.hadoop.yarn.api.records.Token"/>
@@ -271,498 +62,34 @@
     </doc>
   </interface>
   <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest -->
-  <interface name="FailTaskAttemptRequest"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getTaskAttemptId" return="org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setTaskAttemptId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="taskAttemptId" type="org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId"/>
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse -->
-  <interface name="FailTaskAttemptResponse"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest -->
-  <interface name="GetCountersRequest"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getJobId" return="org.apache.hadoop.mapreduce.v2.api.records.JobId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setJobId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="jobId" type="org.apache.hadoop.mapreduce.v2.api.records.JobId"/>
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse -->
-  <interface name="GetCountersResponse"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getCounters" return="org.apache.hadoop.mapreduce.v2.api.records.Counters"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setCounters"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="counters" type="org.apache.hadoop.mapreduce.v2.api.records.Counters"/>
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse -->
   <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest -->
   <interface name="GetDelegationTokenRequest"    abstract="true"
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="getRenewer" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
     <method name="setRenewer"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="renewer" type="java.lang.String"/>
     </method>
   </interface>
   <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse -->
-  <interface name="GetDelegationTokenResponse"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="setDelegationToken"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="clientDToken" type="org.apache.hadoop.yarn.api.records.Token"/>
-    </method>
-    <method name="getDelegationToken" return="org.apache.hadoop.yarn.api.records.Token"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest -->
-  <interface name="GetDiagnosticsRequest"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getTaskAttemptId" return="org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setTaskAttemptId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="taskAttemptId" type="org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId"/>
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse -->
-  <interface name="GetDiagnosticsResponse"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getDiagnosticsList" return="java.util.List"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getDiagnostics" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="index" type="int"/>
-    </method>
-    <method name="getDiagnosticsCount" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="addAllDiagnostics"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="diagnostics" type="java.util.List"/>
-    </method>
-    <method name="addDiagnostics"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="diagnostic" type="java.lang.String"/>
-    </method>
-    <method name="removeDiagnostics"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="index" type="int"/>
-    </method>
-    <method name="clearDiagnostics"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest -->
-  <interface name="GetJobReportRequest"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getJobId" return="org.apache.hadoop.mapreduce.v2.api.records.JobId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setJobId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="jobId" type="org.apache.hadoop.mapreduce.v2.api.records.JobId"/>
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse -->
-  <interface name="GetJobReportResponse"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getJobReport" return="org.apache.hadoop.mapreduce.v2.api.records.JobReport"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setJobReport"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="jobReport" type="org.apache.hadoop.mapreduce.v2.api.records.JobReport"/>
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest -->
-  <interface name="GetTaskAttemptCompletionEventsRequest"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getJobId" return="org.apache.hadoop.mapreduce.v2.api.records.JobId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getFromEventId" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getMaxEvents" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setJobId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="jobId" type="org.apache.hadoop.mapreduce.v2.api.records.JobId"/>
-    </method>
-    <method name="setFromEventId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="id" type="int"/>
-    </method>
-    <method name="setMaxEvents"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="maxEvents" type="int"/>
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse -->
-  <interface name="GetTaskAttemptCompletionEventsResponse"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getCompletionEventList" return="java.util.List"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getCompletionEvent" return="org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="index" type="int"/>
-    </method>
-    <method name="getCompletionEventCount" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="addAllCompletionEvents"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="eventList" type="java.util.List"/>
-    </method>
-    <method name="addCompletionEvent"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="event" type="org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent"/>
-    </method>
-    <method name="removeCompletionEvent"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="index" type="int"/>
-    </method>
-    <method name="clearCompletionEvents"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest -->
-  <interface name="GetTaskAttemptReportRequest"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getTaskAttemptId" return="org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setTaskAttemptId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="taskAttemptId" type="org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId"/>
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse -->
-  <interface name="GetTaskAttemptReportResponse"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getTaskAttemptReport" return="org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setTaskAttemptReport"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="taskAttemptReport" type="org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport"/>
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest -->
-  <interface name="GetTaskReportRequest"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getTaskId" return="org.apache.hadoop.mapreduce.v2.api.records.TaskId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setTaskId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="taskId" type="org.apache.hadoop.mapreduce.v2.api.records.TaskId"/>
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse -->
-  <interface name="GetTaskReportResponse"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getTaskReport" return="org.apache.hadoop.mapreduce.v2.api.records.TaskReport"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setTaskReport"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="taskReport" type="org.apache.hadoop.mapreduce.v2.api.records.TaskReport"/>
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest -->
-  <interface name="GetTaskReportsRequest"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getJobId" return="org.apache.hadoop.mapreduce.v2.api.records.JobId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getTaskType" return="org.apache.hadoop.mapreduce.v2.api.records.TaskType"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setJobId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="jobId" type="org.apache.hadoop.mapreduce.v2.api.records.JobId"/>
-    </method>
-    <method name="setTaskType"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="taskType" type="org.apache.hadoop.mapreduce.v2.api.records.TaskType"/>
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse -->
-  <interface name="GetTaskReportsResponse"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getTaskReportList" return="java.util.List"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getTaskReport" return="org.apache.hadoop.mapreduce.v2.api.records.TaskReport"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="index" type="int"/>
-    </method>
-    <method name="getTaskReportCount" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="addAllTaskReports"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="taskReports" type="java.util.List"/>
-    </method>
-    <method name="addTaskReport"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="taskReport" type="org.apache.hadoop.mapreduce.v2.api.records.TaskReport"/>
-    </method>
-    <method name="removeTaskReport"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="index" type="int"/>
-    </method>
-    <method name="clearTaskReports"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest -->
-  <interface name="KillJobRequest"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getJobId" return="org.apache.hadoop.mapreduce.v2.api.records.JobId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setJobId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="jobId" type="org.apache.hadoop.mapreduce.v2.api.records.JobId"/>
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse -->
-  <interface name="KillJobResponse"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest -->
-  <interface name="KillTaskAttemptRequest"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getTaskAttemptId" return="org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setTaskAttemptId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="taskAttemptId" type="org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId"/>
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse -->
-  <interface name="KillTaskAttemptResponse"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest -->
-  <interface name="KillTaskRequest"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="getTaskId" return="org.apache.hadoop.mapreduce.v2.api.records.TaskId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="setTaskId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="taskId" type="org.apache.hadoop.mapreduce.v2.api.records.TaskId"/>
-    </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest -->
-  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse -->
-  <interface name="KillTaskResponse"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-  </interface>
-  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse -->
   <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest -->
   <interface name="RenewDelegationTokenRequest"    abstract="true"
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="getDelegationToken" return="org.apache.hadoop.yarn.api.records.Token"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
     <method name="setDelegationToken"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="dToken" type="org.apache.hadoop.yarn.api.records.Token"/>
@@ -778,12 +105,12 @@
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="getNextExpirationTime" return="long"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
     <method name="setNextExpirationTime"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="expTime" type="long"/>
@@ -797,40 +124,6 @@
 <package name="org.apache.hadoop.mapreduce.v2.security">
 </package>
 <package name="org.apache.hadoop.yarn.proto">
-  <!-- start interface org.apache.hadoop.yarn.proto.HSClientProtocol -->
-  <interface name="HSClientProtocol"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <doc>
-    <![CDATA[Fake protocol to differentiate the blocking interfaces in the
- security info class loaders.]]>
-    </doc>
-  </interface>
-  <!-- end interface org.apache.hadoop.yarn.proto.HSClientProtocol -->
-  <!-- start class org.apache.hadoop.yarn.proto.HSClientProtocol.HSClientProtocolService -->
-  <class name="HSClientProtocol.HSClientProtocolService" extends="java.lang.Object"
-    abstract="true"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="HSClientProtocol.HSClientProtocolService"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="newReflectiveBlockingService" return="com.google.protobuf.BlockingService"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="impl" type="org.apache.hadoop.yarn.proto.HSClientProtocol.HSClientProtocolService.BlockingInterface"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.yarn.proto.HSClientProtocol.HSClientProtocolService -->
-  <!-- start interface org.apache.hadoop.yarn.proto.HSClientProtocol.HSClientProtocolService.BlockingInterface -->
-  <interface name="HSClientProtocol.HSClientProtocolService.BlockingInterface"    abstract="true"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB"/>
-  </interface>
-  <!-- end interface org.apache.hadoop.yarn.proto.HSClientProtocol.HSClientProtocolService.BlockingInterface -->
 </package>
 
 </api>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/57] [abbrv] hadoop git commit: Revert "HDFS-10923. Make InstrumentedLock require ReentrantLock."

Posted by in...@apache.org.
Revert "HDFS-10923. Make InstrumentedLock require ReentrantLock."

This reverts commit c7ce6fdc20fe053f0bb3bcf900ffc0e1db6feee5.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe9ebe20
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe9ebe20
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe9ebe20

Branch: refs/heads/HDFS-10467
Commit: fe9ebe20ab113567f0777c11cb48ce0d3ce587a8
Parents: c7ce6fd
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Sep 30 23:11:51 2016 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Sep 30 23:11:51 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/InstrumentedLock.java    | 185 ++++++++++++++++++
 .../hadoop/hdfs/InstrumentedReentrantLock.java  | 195 -------------------
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   4 +-
 .../hadoop/hdfs/TestInstrumentedLock.java       | 166 ++++++++++++++++
 .../hdfs/TestInstrumentedReentrantLock.java     | 177 -----------------
 5 files changed, 353 insertions(+), 374 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe9ebe20/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedLock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedLock.java
new file mode 100644
index 0000000..6279e95
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedLock.java
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Timer;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * This is a debugging class that can be used by callers to track
+ * whether a specifc lock is being held for too long and periodically
+ * log a warning and stack trace, if so.
+ *
+ * The logged warnings are throttled so that logs are not spammed.
+ *
+ * A new instance of InstrumentedLock can be created for each object
+ * that needs to be instrumented.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class InstrumentedLock implements Lock {
+
+  private final Lock lock;
+  private final Log logger;
+  private final String name;
+  private final Timer clock;
+
+  /** Minimum gap between two lock warnings. */
+  private final long minLoggingGap;
+  /** Threshold for detecting long lock held time. */
+  private final long lockWarningThreshold;
+
+  // Tracking counters for lock statistics.
+  private volatile long lockAcquireTimestamp;
+  private final AtomicLong lastLogTimestamp;
+  private final AtomicLong warningsSuppressed = new AtomicLong(0);
+
+  /**
+   * Create a instrumented lock instance which logs a warning message
+   * when lock held time is above given threshold.
+   *
+   * @param name the identifier of the lock object
+   * @param logger this class does not have its own logger, will log to the
+   *               given logger instead
+   * @param minLoggingGapMs  the minimum time gap between two log messages,
+   *                         this is to avoid spamming to many logs
+   * @param lockWarningThresholdMs the time threshold to view lock held
+   *                               time as being "too long"
+   */
+  public InstrumentedLock(String name, Log logger, long minLoggingGapMs,
+      long lockWarningThresholdMs) {
+    this(name, logger, new ReentrantLock(),
+        minLoggingGapMs, lockWarningThresholdMs);
+  }
+
+  public InstrumentedLock(String name, Log logger, Lock lock,
+      long minLoggingGapMs, long lockWarningThresholdMs) {
+    this(name, logger, lock,
+        minLoggingGapMs, lockWarningThresholdMs, new Timer());
+  }
+
+  @VisibleForTesting
+  InstrumentedLock(String name, Log logger, Lock lock,
+      long minLoggingGapMs, long lockWarningThresholdMs, Timer clock) {
+    this.name = name;
+    this.lock = lock;
+    this.clock = clock;
+    this.logger = logger;
+    minLoggingGap = minLoggingGapMs;
+    lockWarningThreshold = lockWarningThresholdMs;
+    lastLogTimestamp = new AtomicLong(
+      clock.monotonicNow() - Math.max(minLoggingGap, lockWarningThreshold));
+  }
+
+  @Override
+  public void lock() {
+    lock.lock();
+    lockAcquireTimestamp = clock.monotonicNow();
+  }
+
+  @Override
+  public void lockInterruptibly() throws InterruptedException {
+    lock.lockInterruptibly();
+    lockAcquireTimestamp = clock.monotonicNow();
+  }
+
+  @Override
+  public boolean tryLock() {
+    if (lock.tryLock()) {
+      lockAcquireTimestamp = clock.monotonicNow();
+      return true;
+    }
+    return false;
+  }
+
+  @Override
+  public boolean tryLock(long time, TimeUnit unit) throws InterruptedException {
+    if (lock.tryLock(time, unit)) {
+      lockAcquireTimestamp = clock.monotonicNow();
+      return true;
+    }
+    return false;
+  }
+
+  @Override
+  public void unlock() {
+    long localLockReleaseTime = clock.monotonicNow();
+    long localLockAcquireTime = lockAcquireTimestamp;
+    lock.unlock();
+    check(localLockAcquireTime, localLockReleaseTime);
+  }
+
+  @Override
+  public Condition newCondition() {
+    return lock.newCondition();
+  }
+
+  @VisibleForTesting
+  void logWarning(long lockHeldTime, long suppressed) {
+    logger.warn(String.format("Lock held time above threshold: " +
+        "lock identifier: %s " +
+        "lockHeldTimeMs=%d ms. Suppressed %d lock warnings. " +
+        "The stack trace is: %s" ,
+        name, lockHeldTime, suppressed,
+        StringUtils.getStackTrace(Thread.currentThread())));
+  }
+
+  /**
+   * Log a warning if the lock was held for too long.
+   *
+   * Should be invoked by the caller immediately AFTER releasing the lock.
+   *
+   * @param acquireTime  - timestamp just after acquiring the lock.
+   * @param releaseTime - timestamp just before releasing the lock.
+   */
+  private void check(long acquireTime, long releaseTime) {
+    if (!logger.isWarnEnabled()) {
+      return;
+    }
+
+    final long lockHeldTime = releaseTime - acquireTime;
+    if (lockWarningThreshold - lockHeldTime < 0) {
+      long now;
+      long localLastLogTs;
+      do {
+        now = clock.monotonicNow();
+        localLastLogTs = lastLogTimestamp.get();
+        long deltaSinceLastLog = now - localLastLogTs;
+        // check should print log or not
+        if (deltaSinceLastLog - minLoggingGap < 0) {
+          warningsSuppressed.incrementAndGet();
+          return;
+        }
+      } while (!lastLogTimestamp.compareAndSet(localLastLogTs, now));
+      long suppressed = warningsSuppressed.getAndSet(0);
+      logWarning(lockHeldTime, suppressed);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe9ebe20/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedReentrantLock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedReentrantLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedReentrantLock.java
deleted file mode 100644
index 010571e..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedReentrantLock.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Timer;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * This is a debugging class that can be used by callers to track
- * whether a specific lock is being held for too long and periodically
- * log a warning and stack trace, if so.
- *
- * The logged warnings are throttled so that logs are not spammed.
- *
- * A new instance of InstrumentedLock can be created for each object
- * that needs to be instrumented.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class InstrumentedReentrantLock implements Lock {
-
-  @VisibleForTesting
-  final ReentrantLock lock;
-  private final Log logger;
-  private final String name;
-  private final Timer clock;
-
-  /** Minimum gap between two lock warnings. */
-  private final long minLoggingGap;
-  /** Threshold for detecting long lock held time. */
-  private final long lockWarningThreshold;
-
-  // Tracking counters for lock statistics.
-  private volatile long lockAcquireTimestamp;
-  private final AtomicLong lastLogTimestamp;
-  private final AtomicLong warningsSuppressed = new AtomicLong(0);
-
-  /**
-   * Create a instrumented lock instance which logs a warning message
-   * when lock held time is above given threshold.
-   *
-   * @param name the identifier of the lock object
-   * @param logger this class does not have its own logger, will log to the
-   *               given logger instead
-   * @param minLoggingGapMs  the minimum time gap between two log messages,
-   *                         this is to avoid spamming to many logs
-   * @param lockWarningThresholdMs the time threshold to view lock held
-   *                               time as being "too long"
-   */
-  public InstrumentedReentrantLock(
-      String name, Log logger, long minLoggingGapMs,
-      long lockWarningThresholdMs) {
-    this(name, logger, new ReentrantLock(),
-        minLoggingGapMs, lockWarningThresholdMs);
-  }
-
-  public InstrumentedReentrantLock(
-      String name, Log logger, ReentrantLock lock,
-      long minLoggingGapMs, long lockWarningThresholdMs) {
-    this(name, logger, lock,
-        minLoggingGapMs, lockWarningThresholdMs, new Timer());
-  }
-
-  @VisibleForTesting
-  InstrumentedReentrantLock(
-      String name, Log logger, ReentrantLock lock,
-      long minLoggingGapMs, long lockWarningThresholdMs, Timer clock) {
-    this.name = name;
-    this.lock = lock;
-    this.clock = clock;
-    this.logger = logger;
-    minLoggingGap = minLoggingGapMs;
-    lockWarningThreshold = lockWarningThresholdMs;
-    lastLogTimestamp = new AtomicLong(
-      clock.monotonicNow() - Math.max(minLoggingGap, lockWarningThreshold));
-  }
-
-  @Override
-  public void lock() {
-    lock.lock();
-    if (lock.getHoldCount() == 1) {
-      lockAcquireTimestamp = clock.monotonicNow();
-    }
-  }
-
-  @Override
-  public void lockInterruptibly() throws InterruptedException {
-    lock.lockInterruptibly();
-    if (lock.getHoldCount() == 1) {
-      lockAcquireTimestamp = clock.monotonicNow();
-    }
-  }
-
-  @Override
-  public boolean tryLock() {
-    if (lock.tryLock() && lock.getHoldCount() == 1) {
-      lockAcquireTimestamp = clock.monotonicNow();
-      return true;
-    }
-    return false;
-  }
-
-  @Override
-  public boolean tryLock(long time, TimeUnit unit) throws InterruptedException {
-    if (lock.tryLock(time, unit) && lock.getHoldCount() == 1) {
-      lockAcquireTimestamp = clock.monotonicNow();
-      return true;
-    }
-    return false;
-  }
-
-  @Override
-  public void unlock() {
-    final boolean needReport = (lock.getHoldCount() == 1);
-    long localLockReleaseTime = clock.monotonicNow();
-    long localLockAcquireTime = lockAcquireTimestamp;
-    lock.unlock();
-    if (needReport) {
-      check(localLockAcquireTime, localLockReleaseTime);
-    }
-  }
-
-  @Override
-  public Condition newCondition() {
-    return lock.newCondition();
-  }
-
-  @VisibleForTesting
-  void logWarning(long lockHeldTime, long suppressed) {
-    logger.warn(String.format("Lock held time above threshold: " +
-        "lock identifier: %s " +
-        "lockHeldTimeMs=%d ms. Suppressed %d lock warnings. " +
-        "The stack trace is: %s" ,
-        name, lockHeldTime, suppressed,
-        StringUtils.getStackTrace(Thread.currentThread())));
-  }
-
-  /**
-   * Log a warning if the lock was held for too long.
-   *
-   * Should be invoked by the caller immediately AFTER releasing the lock.
-   *
-   * @param acquireTime  - timestamp just after acquiring the lock.
-   * @param releaseTime - timestamp just before releasing the lock.
-   */
-  private void check(long acquireTime, long releaseTime) {
-    if (!logger.isWarnEnabled()) {
-      return;
-    }
-
-    final long lockHeldTime = releaseTime - acquireTime;
-    if (lockWarningThreshold - lockHeldTime < 0) {
-      long now;
-      long localLastLogTs;
-      do {
-        now = clock.monotonicNow();
-        localLastLogTs = lastLogTimestamp.get();
-        long deltaSinceLastLog = now - localLastLogTs;
-        // check should print log or not
-        if (deltaSinceLastLog - minLoggingGap < 0) {
-          warningsSuppressed.incrementAndGet();
-          return;
-        }
-      } while (!lastLogTimestamp.compareAndSet(localLastLogTs, now));
-      long suppressed = warningsSuppressed.getAndSet(0);
-      logWarning(lockHeldTime, suppressed);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe9ebe20/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index ab31f25..26a2e9f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -58,7 +58,7 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.ExtendedBlockId;
-import org.apache.hadoop.hdfs.InstrumentedReentrantLock;
+import org.apache.hadoop.hdfs.InstrumentedLock;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@@ -266,7 +266,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     this.conf = conf;
     this.smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
     this.datasetLock = new AutoCloseableLock(
-        new InstrumentedReentrantLock(getClass().getName(), LOG,
+        new InstrumentedLock(getClass().getName(), LOG,
           conf.getTimeDuration(
             DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY,
             DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_DEFAULT,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe9ebe20/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInstrumentedLock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInstrumentedLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInstrumentedLock.java
new file mode 100644
index 0000000..f470688
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInstrumentedLock.java
@@ -0,0 +1,166 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.Lock;
+
+import org.apache.hadoop.util.AutoCloseableLock;
+import org.apache.hadoop.util.Timer;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+import static org.mockito.Mockito.*;
+import static org.junit.Assert.*;
+
+/**
+ * A test class for InstrumentedLock.
+ */
+public class TestInstrumentedLock {
+
+  static final Log LOG = LogFactory.getLog(TestInstrumentedLock.class);
+
+  @Rule public TestName name = new TestName();
+
+  /**
+   * Test exclusive access of the lock.
+   * @throws Exception
+   */
+  @Test(timeout=10000)
+  public void testMultipleThread() throws Exception {
+    String testname = name.getMethodName();
+    InstrumentedLock lock = new InstrumentedLock(testname, LOG, 0, 300);
+    lock.lock();
+    try {
+      Thread competingThread = new Thread() {
+        @Override
+        public void run() {
+          assertFalse(lock.tryLock());
+        }
+      };
+      competingThread.start();
+      competingThread.join();
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  /**
+   * Test the correctness with try-with-resource syntax.
+   * @throws Exception
+   */
+  @Test(timeout=10000)
+  public void testTryWithResourceSyntax() throws Exception {
+    String testname = name.getMethodName();
+    final AtomicReference<Thread> lockThread = new AtomicReference<>(null);
+    Lock lock = new InstrumentedLock(testname, LOG, 0, 300) {
+      @Override
+      public void lock() {
+        super.lock();
+        lockThread.set(Thread.currentThread());
+      }
+      @Override
+      public void unlock() {
+        super.unlock();
+        lockThread.set(null);
+      }
+    };
+    AutoCloseableLock acl = new AutoCloseableLock(lock);
+    try (AutoCloseable localLock = acl.acquire()) {
+      assertEquals(acl, localLock);
+      Thread competingThread = new Thread() {
+        @Override
+        public void run() {
+          assertNotEquals(Thread.currentThread(), lockThread.get());
+          assertFalse(lock.tryLock());
+        }
+      };
+      competingThread.start();
+      competingThread.join();
+      assertEquals(Thread.currentThread(), lockThread.get());
+    }
+    assertNull(lockThread.get());
+  }
+
+  /**
+   * Test the lock logs warning when lock held time is greater than threshold
+   * and not log warning otherwise.
+   * @throws Exception
+   */
+  @Test(timeout=10000)
+  public void testLockLongHoldingReport() throws Exception {
+    String testname = name.getMethodName();
+    final AtomicLong time = new AtomicLong(0);
+    Timer mclock = new Timer() {
+      @Override
+      public long monotonicNow() {
+        return time.get();
+      }
+    };
+    Lock mlock = mock(Lock.class);
+
+    final AtomicLong wlogged = new AtomicLong(0);
+    final AtomicLong wsuppresed = new AtomicLong(0);
+    InstrumentedLock lock = new InstrumentedLock(
+        testname, LOG, mlock, 2000, 300, mclock) {
+      @Override
+      void logWarning(long lockHeldTime, long suppressed) {
+        wlogged.incrementAndGet();
+        wsuppresed.set(suppressed);
+      }
+    };
+
+    // do not log warning when the lock held time is short
+    lock.lock();   // t = 0
+    time.set(200);
+    lock.unlock(); // t = 200
+    assertEquals(0, wlogged.get());
+    assertEquals(0, wsuppresed.get());
+
+    lock.lock();   // t = 200
+    time.set(700);
+    lock.unlock(); // t = 700
+    assertEquals(1, wlogged.get());
+    assertEquals(0, wsuppresed.get());
+
+    // despite the lock held time is greater than threshold
+    // suppress the log warning due to the logging gap
+    // (not recorded in wsuppressed until next log message)
+    lock.lock();   // t = 700
+    time.set(1100);
+    lock.unlock(); // t = 1100
+    assertEquals(1, wlogged.get());
+    assertEquals(0, wsuppresed.get());
+
+    // log a warning message when the lock held time is greater the threshold
+    // and the logging time gap is satisfied. Also should display suppressed
+    // previous warnings.
+    time.set(2400);
+    lock.lock();   // t = 2400
+    time.set(2800);
+    lock.unlock(); // t = 2800
+    assertEquals(2, wlogged.get());
+    assertEquals(1, wsuppresed.get());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe9ebe20/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInstrumentedReentrantLock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInstrumentedReentrantLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInstrumentedReentrantLock.java
deleted file mode 100644
index 3374b8a..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInstrumentedReentrantLock.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-import org.apache.hadoop.util.AutoCloseableLock;
-import org.apache.hadoop.util.FakeTimer;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
-import static org.mockito.Mockito.*;
-import static org.junit.Assert.*;
-
-/**
- * A test class for {@link InstrumentedReentrantLock}.
- */
-public class TestInstrumentedReentrantLock {
-
-  static final Log LOG = LogFactory.getLog(TestInstrumentedReentrantLock.class);
-
-  @Rule public TestName name = new TestName();
-
-  /**
-   * Test exclusive access of the lock.
-   * @throws Exception
-   */
-  @Test(timeout=10000)
-  public void testMultipleThread() throws Exception {
-    String testname = name.getMethodName();
-    InstrumentedReentrantLock lock =
-        new InstrumentedReentrantLock(testname, LOG, 0, 300);
-    lock.lock();
-    try {
-      Thread competingThread = new Thread() {
-        @Override
-        public void run() {
-          assertFalse(lock.tryLock());
-        }
-      };
-      competingThread.start();
-      competingThread.join();
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  /**
-   * Test the correctness with try-with-resource syntax.
-   * @throws Exception
-   */
-  @Test(timeout=10000)
-  public void testTryWithResourceSyntax() throws Exception {
-    String testname = name.getMethodName();
-    final AtomicReference<Thread> lockThread = new AtomicReference<>(null);
-    Lock lock = new InstrumentedReentrantLock(testname, LOG, 0, 300) {
-      @Override
-      public void lock() {
-        super.lock();
-        lockThread.set(Thread.currentThread());
-      }
-      @Override
-      public void unlock() {
-        super.unlock();
-        lockThread.set(null);
-      }
-    };
-    AutoCloseableLock acl = new AutoCloseableLock(lock);
-    try (AutoCloseable localLock = acl.acquire()) {
-      assertEquals(acl, localLock);
-      Thread competingThread = new Thread() {
-        @Override
-        public void run() {
-          assertNotEquals(Thread.currentThread(), lockThread.get());
-          assertFalse(lock.tryLock());
-        }
-      };
-      competingThread.start();
-      competingThread.join();
-      assertEquals(Thread.currentThread(), lockThread.get());
-    }
-    assertNull(lockThread.get());
-  }
-
-  /**
-   * Test the lock logs warning when lock held time is greater than threshold
-   * and not log warning otherwise.
-   * @throws Exception
-   */
-  @Test(timeout=10000)
-  public void testLockLongHoldingReport() throws Exception {
-    String testname = name.getMethodName();
-    FakeTimer mclock = new FakeTimer();
-    final int warningThreshold = 500;
-    final int minLoggingGap = warningThreshold * 10;
-
-    final AtomicLong wlogged = new AtomicLong(0);
-    final AtomicLong wsuppresed = new AtomicLong(0);
-    InstrumentedReentrantLock lock = new InstrumentedReentrantLock(
-        testname, LOG, new ReentrantLock(), minLoggingGap,
-        warningThreshold, mclock) {
-      @Override
-      void logWarning(long lockHeldTime, long suppressed) {
-        wlogged.incrementAndGet();
-        wsuppresed.set(suppressed);
-      }
-    };
-
-    // do not log warning when the lock held time is <= warningThreshold.
-    lock.lock();
-    mclock.advance(warningThreshold);
-    lock.unlock();
-    assertEquals(0, wlogged.get());
-    assertEquals(0, wsuppresed.get());
-
-    // log a warning when the lock held time exceeds the threshold.
-    lock.lock();
-    mclock.advance(warningThreshold + 1);
-    assertEquals(1, lock.lock.getHoldCount());
-    lock.unlock();
-    assertEquals(1, wlogged.get());
-    assertEquals(0, wsuppresed.get());
-
-    // despite the lock held time is greater than threshold
-    // suppress the log warning due to the logging gap
-    // (not recorded in wsuppressed until next log message)
-    lock.lock();
-    mclock.advance(warningThreshold + 1);
-    lock.unlock();
-    assertEquals(1, wlogged.get());
-    assertEquals(0, wsuppresed.get());
-
-    // log a warning message when the lock held time is greater the threshold
-    // and the logging time gap is satisfied. Also should display suppressed
-    // previous warnings.
-    lock.lock();
-    mclock.advance(minLoggingGap + 1);
-    lock.unlock(); // t = 2800
-    assertEquals(2, wlogged.get());
-    assertEquals(1, wsuppresed.get());
-
-    // Ensure that nested acquisitions do not log.
-    wlogged.set(0);
-    wsuppresed.set(0);
-    lock.lock();
-    lock.lock();
-    mclock.advance(minLoggingGap + 1);
-    lock.unlock();
-    assertEquals(0, wlogged.get());    // No warnings on nested release.
-    assertEquals(0, wsuppresed.get());
-    lock.unlock();
-    assertEquals(1, wlogged.get());    // Last release immediately logs.
-    assertEquals(0, wsuppresed.get());
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[57/57] [abbrv] hadoop git commit: YARN-4767. Network issues can cause persistent RM UI outage. (Daniel Templeton via kasha)

Posted by in...@apache.org.
YARN-4767. Network issues can cause persistent RM UI outage. (Daniel Templeton via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/736d33cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/736d33cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/736d33cd

Branch: refs/heads/HDFS-10467
Commit: 736d33cddd88a0cec925a451940b2523999a9c51
Parents: 607705c
Author: Karthik Kambatla <ka...@cloudera.com>
Authored: Mon Oct 3 14:35:57 2016 -0700
Committer: Karthik Kambatla <ka...@cloudera.com>
Committed: Mon Oct 3 14:35:57 2016 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/webapp/YarnWebParams.java       |   1 +
 .../resourcemanager/webapp/ErrorBlock.java      |  39 +++
 .../server/resourcemanager/webapp/RMWebApp.java |   1 +
 .../webapp/RedirectionErrorPage.java            |  47 ++++
 .../resourcemanager/webapp/RmController.java    |   4 +
 .../webapp/TestRedirectionErrorPage.java        |  68 +++++
 .../server/webproxy/WebAppProxyServlet.java     | 274 ++++++++++++++-----
 .../server/webproxy/amfilter/AmIpFilter.java    |  64 +++--
 .../server/webproxy/TestWebAppProxyServlet.java |  24 +-
 .../server/webproxy/amfilter/TestAmFilter.java  |  29 +-
 10 files changed, 454 insertions(+), 97 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/736d33cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
index a34273c..ee9100f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnWebParams.java
@@ -41,4 +41,5 @@ public interface YarnWebParams {
   String NODE_LABEL = "node.label";
   String WEB_UI_TYPE = "web.ui.type";
   String NEXT_REFRESH_INTERVAL = "next.refresh.interval";
+  String ERROR_MESSAGE = "error.message";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/736d33cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ErrorBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ErrorBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ErrorBlock.java
new file mode 100644
index 0000000..963e53f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ErrorBlock.java
@@ -0,0 +1,39 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+
+import com.google.inject.Inject;
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.ERROR_MESSAGE;
+
+/**
+ * This class is used to display an error message to the user in the UI.
+ */
+public class ErrorBlock extends HtmlBlock {
+  @Inject
+  ErrorBlock(ViewContext ctx) {
+    super(ctx);
+  }
+
+  @Override
+  protected void render(Block html) {
+    html.p()._($(ERROR_MESSAGE))._();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/736d33cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
index 106065b..2d7139f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
@@ -71,6 +71,7 @@ public class RMWebApp extends WebApp implements YarnWebParams {
     route("/errors-and-warnings", RmController.class, "errorsAndWarnings");
     route(pajoin("/logaggregationstatus", APPLICATION_ID),
       RmController.class, "logaggregationstatus");
+    route(pajoin("/failure", APPLICATION_ID), RmController.class, "failure");
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/736d33cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RedirectionErrorPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RedirectionErrorPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RedirectionErrorPage.java
new file mode 100644
index 0000000..beb0cca
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RedirectionErrorPage.java
@@ -0,0 +1,47 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
+
+/**
+ * This class is used to display a message that the proxy request failed
+ * because of a redirection issue.
+ */
+public class RedirectionErrorPage extends RmView {
+  @Override protected void preHead(Page.HTML<_> html) {
+    String aid = $(YarnWebParams.APPLICATION_ID);
+
+    commonPreHead(html);
+    set(YarnWebParams.ERROR_MESSAGE,
+        "The application master for " + aid + " redirected the "
+        + "resource manager's web proxy's request back to the web proxy, "
+        + "which means your request to view the application master's web UI "
+        + "cannot be fulfilled. The typical cause for this error is a "
+        + "network misconfiguration that causes the resource manager's web "
+        + "proxy host to resolve to an unexpected IP address on the "
+        + "application master host. Please contact your cluster "
+        + "administrator to resolve the issue.");
+  }
+
+  @Override protected Class<? extends SubView> content() {
+    return ErrorBlock.class;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/736d33cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
index b124d75..a291e05 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
@@ -62,6 +62,10 @@ public class RmController extends Controller {
     render(ContainerPage.class);
   }
 
+  public void failure() {
+    render(RedirectionErrorPage.class);
+  }
+
   public void nodes() {
     render(NodesPage.class);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/736d33cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRedirectionErrorPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRedirectionErrorPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRedirectionErrorPage.java
new file mode 100644
index 0000000..408dc9b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRedirectionErrorPage.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+
+import java.io.IOException;
+
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
+import org.apache.hadoop.yarn.webapp.test.WebAppTests;
+import org.junit.Test;
+
+import com.google.inject.Binder;
+import com.google.inject.Injector;
+import com.google.inject.Module;
+
+/**
+ * This class tests the RedirectionErrorPage.
+ */
+public class TestRedirectionErrorPage {
+  @Test
+  public void testAppBlockRenderWithNullCurrentAppAttempt() throws Exception {
+    ApplicationId appId = ApplicationId.newInstance(1234L, 0);
+    Injector injector;
+
+    // initialize RM Context, and create RMApp, without creating RMAppAttempt
+    final RMContext rmContext = TestRMWebApp.mockRMContext(15, 1, 2, 8);
+
+    injector = WebAppTests.createMockInjector(RMContext.class, rmContext,
+        new Module() {
+          @Override
+          public void configure(Binder binder) {
+            try {
+              ResourceManager rm = TestRMWebApp.mockRm(rmContext);
+              binder.bind(ResourceManager.class).toInstance(rm);
+              binder.bind(ApplicationBaseProtocol.class).toInstance(
+                  rm.getClientRMService());
+            } catch (IOException e) {
+              throw new IllegalStateException(e);
+            }
+          }
+        });
+
+    ErrorBlock instance = injector.getInstance(ErrorBlock.class);
+    instance.set(YarnWebParams.APPLICATION_ID, appId.toString());
+    instance.set(YarnWebParams.ERROR_MESSAGE, "This is an error");
+    instance.render();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/736d33cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
index 0b621aa..b32ee30 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
@@ -26,6 +26,7 @@ import java.io.ObjectInputStream;
 import java.io.OutputStream;
 import java.io.PrintWriter;
 import java.net.InetAddress;
+import java.net.SocketException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URLEncoder;
@@ -42,8 +43,10 @@ import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.core.UriBuilder;
+import javax.ws.rs.core.UriBuilderException;
 
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -76,7 +79,8 @@ public class WebAppProxyServlet extends HttpServlet {
   private static final long serialVersionUID = 1L;
   private static final Logger LOG = LoggerFactory.getLogger(
       WebAppProxyServlet.class);
-  private static final Set<String> passThroughHeaders = 
+  private static final String REDIRECT = "/redirect";
+  private static final Set<String> PASS_THROUGH_HEADERS =
     new HashSet<>(Arrays.asList(
         "User-Agent",
         "Accept",
@@ -93,6 +97,7 @@ public class WebAppProxyServlet extends HttpServlet {
   private transient List<TrackingUriPlugin> trackingUriPlugins;
   private final String rmAppPageUrlBase;
   private final String ahsAppPageUrlBase;
+  private final String failurePageUrlBase;
   private transient YarnConfiguration conf;
 
   /**
@@ -126,11 +131,16 @@ public class WebAppProxyServlet extends HttpServlet {
     this.trackingUriPlugins =
         conf.getInstances(YarnConfiguration.YARN_TRACKING_URL_GENERATOR,
             TrackingUriPlugin.class);
-    this.rmAppPageUrlBase = StringHelper.pjoin(
-        WebAppUtils.getResolvedRMWebAppURLWithScheme(conf), "cluster", "app");
-    this.ahsAppPageUrlBase = StringHelper.pjoin(
-        WebAppUtils.getHttpSchemePrefix(conf) + WebAppUtils
-        .getAHSWebAppURLWithoutScheme(conf), "applicationhistory", "app");
+    this.rmAppPageUrlBase =
+        StringHelper.pjoin(WebAppUtils.getResolvedRMWebAppURLWithScheme(conf),
+          "cluster", "app");
+    this.failurePageUrlBase =
+        StringHelper.pjoin(WebAppUtils.getResolvedRMWebAppURLWithScheme(conf),
+          "cluster", "failure");
+    this.ahsAppPageUrlBase =
+        StringHelper.pjoin(WebAppUtils.getHttpSchemePrefix(conf)
+          + WebAppUtils.getAHSWebAppURLWithoutScheme(conf),
+          "applicationhistory", "app");
   }
 
   /**
@@ -220,9 +230,9 @@ public class WebAppProxyServlet extends HttpServlet {
 
     @SuppressWarnings("unchecked")
     Enumeration<String> names = req.getHeaderNames();
-    while(names.hasMoreElements()) {
+    while (names.hasMoreElements()) {
       String name = names.nextElement();
-      if(passThroughHeaders.contains(name)) {
+      if (PASS_THROUGH_HEADERS.contains(name)) {
         String value = req.getHeader(name);
         if (LOG.isDebugEnabled()) {
           LOG.debug("REQ HEADER: {} : {}", name, value);
@@ -312,30 +322,49 @@ public class WebAppProxyServlet extends HttpServlet {
       boolean userWasWarned = false;
       boolean userApproved = Boolean.parseBoolean(userApprovedParamS);
       boolean securityEnabled = isSecurityEnabled();
+      boolean isRedirect = false;
+      String pathInfo = req.getPathInfo();
       final String remoteUser = req.getRemoteUser();
-      final String pathInfo = req.getPathInfo();
 
       String[] parts = null;
+
       if (pathInfo != null) {
+        // If there's a redirect, strip the redirect so that the path can be
+        // parsed
+        if (pathInfo.startsWith(REDIRECT)) {
+          pathInfo = pathInfo.substring(REDIRECT.length());
+          isRedirect = true;
+        }
+
         parts = pathInfo.split("/", 3);
       }
-      if(parts == null || parts.length < 2) {
+
+      if ((parts == null) || (parts.length < 2)) {
         LOG.warn("{} gave an invalid proxy path {}", remoteUser,  pathInfo);
         notFound(resp, "Your path appears to be formatted incorrectly.");
         return;
       }
+
       //parts[0] is empty because path info always starts with a /
       String appId = parts[1];
       String rest = parts.length > 2 ? parts[2] : "";
       ApplicationId id = Apps.toAppID(appId);
-      if(id == null) {
+
+      if (id == null) {
         LOG.warn("{} attempting to access {} that is invalid",
             remoteUser, appId);
         notFound(resp, appId + " appears to be formatted incorrectly.");
         return;
       }
-      
-      if(securityEnabled) {
+
+      // If this call is from an AM redirect, we need to be careful about how
+      // we handle it.  If this method returns true, it means the method
+      // already redirected the response, so we can just return.
+      if (isRedirect && handleRedirect(appId, req, resp)) {
+        return;
+      }
+
+      if (securityEnabled) {
         String cookieName = getCheckCookieName(id); 
         Cookie[] cookies = req.getCookies();
         if (cookies != null) {
@@ -351,22 +380,21 @@ public class WebAppProxyServlet extends HttpServlet {
       
       boolean checkUser = securityEnabled && (!userWasWarned || !userApproved);
 
-      FetchedAppReport fetchedAppReport = null;
-      ApplicationReport applicationReport = null;
+      FetchedAppReport fetchedAppReport;
+
       try {
-        fetchedAppReport = getApplicationReport(id);
-        if (fetchedAppReport != null) {
-          if (fetchedAppReport.getAppReportSource() != AppReportSource.RM &&
-              fetchedAppReport.getAppReportSource() != AppReportSource.AHS) {
-            throw new UnsupportedOperationException("Application report not "
-                + "fetched from RM or history server.");
-          }
-          applicationReport = fetchedAppReport.getApplicationReport();
-        }
+        fetchedAppReport = getFetchedAppReport(id);
       } catch (ApplicationNotFoundException e) {
-        applicationReport = null;
+        fetchedAppReport = null;
       }
-      if(applicationReport == null) {
+
+      ApplicationReport applicationReport = null;
+
+      if (fetchedAppReport != null) {
+        applicationReport = fetchedAppReport.getApplicationReport();
+      }
+
+      if (applicationReport == null) {
         LOG.warn("{} attempting to access {} that was not found",
             remoteUser, id);
 
@@ -382,57 +410,31 @@ public class WebAppProxyServlet extends HttpServlet {
             "in RM or history server");
         return;
       }
-      String original = applicationReport.getOriginalTrackingUrl();
-      URI trackingUri;
-      if (original == null || original.equals("N/A") || original.equals("")) {
-        if (fetchedAppReport.getAppReportSource() == AppReportSource.RM) {
-          // fallback to ResourceManager's app page if no tracking URI provided
-          // and Application Report was fetched from RM
-          LOG.debug("Original tracking url is '{}'. Redirecting to RM app page",
-              original == null? "NULL" : original);
-          ProxyUtils.sendRedirect(req, resp,
-              StringHelper.pjoin(rmAppPageUrlBase, id.toString()));
-        } else if (fetchedAppReport.getAppReportSource()
-              == AppReportSource.AHS) {
-          // fallback to Application History Server app page if the application
-          // report was fetched from AHS
-          LOG.debug("Original tracking url is '{}'. Redirecting to AHS app page"
-              , original == null? "NULL" : original);
-          ProxyUtils.sendRedirect(req, resp,
-              StringHelper.pjoin(ahsAppPageUrlBase, id.toString()));
-        }
+
+      URI trackingUri = getTrackingUri(req, resp, id,
+          applicationReport.getOriginalTrackingUrl(),
+          fetchedAppReport.getAppReportSource());
+
+      // If the tracking URI is null, there was a redirect, so just return.
+      if (trackingUri == null) {
         return;
-      } else {
-        if (ProxyUriUtils.getSchemeFromUrl(original).isEmpty()) {
-          trackingUri = ProxyUriUtils.getUriFromAMUrl(
-              WebAppUtils.getHttpSchemePrefix(conf), original);
-        } else {
-          trackingUri = new URI(original);
-        }
       }
 
       String runningUser = applicationReport.getUser();
-      if(checkUser && !runningUser.equals(remoteUser)) {
+
+      if (checkUser && !runningUser.equals(remoteUser)) {
         LOG.info("Asking {} if they want to connect to the "
             + "app master GUI of {} owned by {}",
             remoteUser, appId, runningUser);
         warnUserPage(resp, ProxyUriUtils.getPathAndQuery(id, rest, 
             req.getQueryString(), true), runningUser, id);
+
         return;
       }
 
       // Append the user-provided path and query parameter to the original
       // tracking url.
-      UriBuilder builder = UriBuilder.fromUri(trackingUri);
-      String queryString = req.getQueryString();
-      if (queryString != null) {
-        List<NameValuePair> queryPairs =
-            URLEncodedUtils.parse(queryString, null);
-        for (NameValuePair pair : queryPairs) {
-          builder.queryParam(pair.getName(), pair.getValue());
-        }
-      }
-      URI toFetch = builder.path(rest).build();
+      URI toFetch = buildTrackingUrl(trackingUri, req, rest);
 
       LOG.info("{} is accessing unchecked {}"
           + " which is the app master GUI of {} owned by {}",
@@ -459,6 +461,152 @@ public class WebAppProxyServlet extends HttpServlet {
   }
 
   /**
+   * Return a URL based on the {@code trackingUri} that includes the
+   * user-provided path and query parameters.
+   *
+   * @param trackingUri the base tracking URI
+   * @param req the service request
+   * @param rest the user-provided path
+   * @return the new tracking URI
+   * @throws UriBuilderException if there's an error building the URL
+   */
+  private URI buildTrackingUrl(URI trackingUri, final HttpServletRequest req,
+      String rest) throws UriBuilderException {
+    UriBuilder builder = UriBuilder.fromUri(trackingUri);
+    String queryString = req.getQueryString();
+
+    if (queryString != null) {
+      List<NameValuePair> queryPairs = URLEncodedUtils.parse(queryString, null);
+
+      for (NameValuePair pair : queryPairs) {
+        builder.queryParam(pair.getName(), pair.getValue());
+      }
+    }
+
+    return builder.path(rest).build();
+  }
+
+  /**
+   * Locate the tracking URI for the application based on the reported tracking
+   * URI. If the reported URI is invalid, redirect to the history server or RM
+   * app page.  If the URI is valid, covert it into a usable URI object with a
+   * schema.  If the returned URI is null, that means there was a redirect.
+   *
+   * @param req the servlet request for redirects
+   * @param resp the servlet response for redirects
+   * @param id the application ID
+   * @param originalUri the reported tracking URI
+   * @param appReportSource the source of the application report
+   * @return a valid tracking URI or null if redirected instead
+   * @throws IOException thrown if the redirect fails
+   * @throws URISyntaxException if the tracking URI is invalid
+   */
+  private URI getTrackingUri(HttpServletRequest req, HttpServletResponse resp,
+      ApplicationId id, String originalUri, AppReportSource appReportSource)
+      throws IOException, URISyntaxException {
+    URI trackingUri = null;
+
+    if ((originalUri == null) ||
+        originalUri.equals("N/A") ||
+        originalUri.equals("")) {
+      if (appReportSource == AppReportSource.RM) {
+        // fallback to ResourceManager's app page if no tracking URI provided
+        // and Application Report was fetched from RM
+        LOG.debug("Original tracking url is '{}'. Redirecting to RM app page",
+            originalUri == null ? "NULL" : originalUri);
+        ProxyUtils.sendRedirect(req, resp,
+            StringHelper.pjoin(rmAppPageUrlBase, id.toString()));
+      } else if (appReportSource == AppReportSource.AHS) {
+        // fallback to Application History Server app page if the application
+        // report was fetched from AHS
+        LOG.debug("Original tracking url is '{}'. Redirecting to AHS app page",
+            originalUri == null ? "NULL" : originalUri);
+        ProxyUtils.sendRedirect(req, resp,
+            StringHelper.pjoin(ahsAppPageUrlBase, id.toString()));
+      }
+    } else if (ProxyUriUtils.getSchemeFromUrl(originalUri).isEmpty()) {
+      trackingUri =
+          ProxyUriUtils.getUriFromAMUrl(WebAppUtils.getHttpSchemePrefix(conf),
+            originalUri);
+    } else {
+      trackingUri = new URI(originalUri);
+    }
+
+    return trackingUri;
+  }
+
+  /**
+   * Fetch the application report from the RM.
+   *
+   * @param id the app ID
+   * @return the application report
+   * @throws IOException if the request to the RM fails
+   * @throws YarnException if the request to the RM fails
+   */
+  private FetchedAppReport getFetchedAppReport(ApplicationId id)
+      throws IOException, YarnException {
+    FetchedAppReport fetchedAppReport = getApplicationReport(id);
+
+    if (fetchedAppReport != null) {
+      if ((fetchedAppReport.getAppReportSource() != AppReportSource.RM) &&
+          (fetchedAppReport.getAppReportSource() != AppReportSource.AHS)) {
+        throw new UnsupportedOperationException("Application report not "
+            + "fetched from RM or history server.");
+      }
+    }
+
+    return fetchedAppReport;
+  }
+
+  /**
+   * Check whether the request is a redirect from the AM and handle it
+   * appropriately. This check exists to prevent the AM from forwarding back to
+   * the web proxy, which would contact the AM again, which would forward
+   * again... If this method returns true, there was a redirect, and
+   * it was handled by redirecting the current request to an error page.
+   *
+   * @param path the part of the request path after the app id
+   * @param id the app id
+   * @param req the request object
+   * @param resp the response object
+   * @return whether there was a redirect
+   * @throws IOException if a redirect fails
+   */
+  private boolean handleRedirect(String id, HttpServletRequest req,
+      HttpServletResponse resp) throws IOException {
+    // If this isn't a redirect, we don't care.
+    boolean badRedirect = false;
+
+    // If this is a redirect, check if we're calling ourselves.
+    try {
+      badRedirect = NetUtils.getLocalInetAddress(req.getRemoteHost()) != null;
+    } catch (SocketException ex) {
+      // This exception means we can't determine the calling host. Odds are
+      // that means it's not us.  Let it go and hope it works out better next
+      // time.
+    }
+
+    // If the proxy tries to call itself, it gets into an endless
+    // loop and consumes all available handler threads until the
+    // application completes.  Redirect to the app page with a flag
+    // that tells it to print an appropriate error message.
+    if (badRedirect) {
+      LOG.error("The AM's web app redirected the RM web proxy's request back "
+          + "to the web proxy. The typical cause is that the AM is resolving "
+          + "the RM's address as something other than what it expects. Check "
+          + "your network configuration and the value of the "
+          + "yarn.web-proxy.address property. Once the host resolution issue "
+          + "has been resolved, you will likely need to delete the "
+          + "misbehaving application, " + id);
+      String redirect = StringHelper.pjoin(failurePageUrlBase, id);
+      LOG.error("REDIRECT: sending redirect to " + redirect);
+      ProxyUtils.sendRedirect(req, resp, redirect);
+    }
+
+    return badRedirect;
+  }
+
+  /**
    * This method is used by Java object deserialization, to fill in the
    * transient {@link #trackingUriPlugins} field.
    * See {@link ObjectInputStream#defaultReadObject()}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/736d33cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
index e7617f0..fe6fc32 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
@@ -59,8 +59,9 @@ public class AmIpFilter implements Filter {
   public static final String PROXY_HOSTS_DELIMITER = ",";
   public static final String PROXY_URI_BASES = "PROXY_URI_BASES";
   public static final String PROXY_URI_BASES_DELIMITER = ",";
+  private static final String PROXY_PATH = "/proxy";
   //update the proxy IP list about every 5 min
-  private static final long updateInterval = 5 * 60 * 1000;
+  private static final long UPDATE_INTERVAL = 5 * 60 * 1000;
 
   private String[] proxyHosts;
   private Set<String> proxyAddresses = null;
@@ -96,7 +97,7 @@ public class AmIpFilter implements Filter {
   protected Set<String> getProxyAddresses() throws ServletException {
     long now = System.currentTimeMillis();
     synchronized(this) {
-      if(proxyAddresses == null || (lastUpdate + updateInterval) >= now) {
+      if (proxyAddresses == null || (lastUpdate + UPDATE_INTERVAL) >= now) {
         proxyAddresses = new HashSet<>();
         for (String proxyHost : proxyHosts) {
           try {
@@ -131,37 +132,52 @@ public class AmIpFilter implements Filter {
 
     HttpServletRequest httpReq = (HttpServletRequest)req;
     HttpServletResponse httpResp = (HttpServletResponse)resp;
+
     if (LOG.isDebugEnabled()) {
       LOG.debug("Remote address for request is: {}", httpReq.getRemoteAddr());
     }
+
     if (!getProxyAddresses().contains(httpReq.getRemoteAddr())) {
-      String redirectUrl = findRedirectUrl();
-      String target = redirectUrl + httpReq.getRequestURI();
-      ProxyUtils.sendRedirect(httpReq,  httpResp,  target);
-      return;
-    }
+      StringBuilder redirect = new StringBuilder(findRedirectUrl());
+
+      redirect.append(httpReq.getRequestURI());
+
+      int insertPoint = redirect.indexOf(PROXY_PATH);
+
+      if (insertPoint >= 0) {
+        // Add /redirect as the second component of the path so that the RM web
+        // proxy knows that this request was a redirect.
+        insertPoint += PROXY_PATH.length();
+        redirect.insert(insertPoint, "/redirect");
+      }
 
-    String user = null;
+      ProxyUtils.sendRedirect(httpReq, httpResp, redirect.toString());
+    } else {
+      String user = null;
 
-    if (httpReq.getCookies() != null) {
-      for(Cookie c: httpReq.getCookies()) {
-        if(WebAppProxyServlet.PROXY_USER_COOKIE_NAME.equals(c.getName())){
-          user = c.getValue();
-          break;
+      if (httpReq.getCookies() != null) {
+        for(Cookie c: httpReq.getCookies()) {
+          if(WebAppProxyServlet.PROXY_USER_COOKIE_NAME.equals(c.getName())){
+            user = c.getValue();
+            break;
+          }
         }
       }
-    }
-    if (user == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Could not find " + WebAppProxyServlet.PROXY_USER_COOKIE_NAME
-                 + " cookie, so user will not be set");
+      if (user == null) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Could not find "
+              + WebAppProxyServlet.PROXY_USER_COOKIE_NAME
+              + " cookie, so user will not be set");
+        }
+
+        chain.doFilter(req, resp);
+      } else {
+        AmIpPrincipal principal = new AmIpPrincipal(user);
+        ServletRequest requestWrapper = new AmIpServletRequestWrapper(httpReq,
+            principal);
+
+        chain.doFilter(requestWrapper, resp);
       }
-      chain.doFilter(req, resp);
-    } else {
-      final AmIpPrincipal principal = new AmIpPrincipal(user);
-      ServletRequest requestWrapper = new AmIpServletRequestWrapper(httpReq,
-          principal);
-      chain.doFilter(requestWrapper, resp);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/736d33cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java
index 330e4de..7236982 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java
@@ -155,7 +155,7 @@ public class TestWebAppProxyServlet {
       URL emptyUrl = new URL("http://localhost:" + proxyPort + "/proxy");
       HttpURLConnection emptyProxyConn = (HttpURLConnection) emptyUrl
           .openConnection();
-      emptyProxyConn.connect();;
+      emptyProxyConn.connect();
       assertEquals(HttpURLConnection.HTTP_NOT_FOUND, emptyProxyConn.getResponseCode());
 
       // wrong url. Set wrong app ID
@@ -176,6 +176,25 @@ public class TestWebAppProxyServlet {
       assertEquals(HttpURLConnection.HTTP_OK, proxyConn.getResponseCode());
       assertTrue(isResponseCookiePresent(
           proxyConn, "checked_application_0_0000", "true"));
+
+      // test that redirection is squashed correctly
+      URL redirectUrl = new URL("http://localhost:" + proxyPort
+          + "/proxy/redirect/application_00_0");
+      proxyConn = (HttpURLConnection) redirectUrl.openConnection();
+      proxyConn.setInstanceFollowRedirects(false);
+      proxyConn.connect();
+      assertEquals("The proxy returned an unexpected status code rather than"
+          + "redirecting the connection (302)",
+          HttpURLConnection.HTTP_MOVED_TEMP, proxyConn.getResponseCode());
+
+      String expected =
+          WebAppUtils.getResolvedRMWebAppURLWithScheme(configuration)
+            + "/cluster/failure/application_00_0";
+      String redirect = proxyConn.getHeaderField(ProxyUtils.LOCATION);
+
+      assertEquals("The proxy did not redirect the connection to the failure "
+          + "page of the RM", expected, redirect);
+
       // cannot found application 1: null
       appReportFetcher.answer = 1;
       proxyConn = (HttpURLConnection) url.openConnection();
@@ -185,6 +204,7 @@ public class TestWebAppProxyServlet {
           proxyConn.getResponseCode());
       assertFalse(isResponseCookiePresent(
           proxyConn, "checked_application_0_0000", "true"));
+
       // cannot found application 2: ApplicationNotFoundException
       appReportFetcher.answer = 4;
       proxyConn = (HttpURLConnection) url.openConnection();
@@ -194,6 +214,7 @@ public class TestWebAppProxyServlet {
           proxyConn.getResponseCode());
       assertFalse(isResponseCookiePresent(
           proxyConn, "checked_application_0_0000", "true"));
+
       // wrong user
       appReportFetcher.answer = 2;
       proxyConn = (HttpURLConnection) url.openConnection();
@@ -203,6 +224,7 @@ public class TestWebAppProxyServlet {
       assertTrue(s
           .contains("to continue to an Application Master web interface owned by"));
       assertTrue(s.contains("WARNING: The following page may not be safe!"));
+
       //case if task has a not running status
       appReportFetcher.answer = 3;
       proxyConn = (HttpURLConnection) url.openConnection();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/736d33cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
index 6f64777..9dc0ce0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.webproxy.amfilter;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.StringWriter;
+import java.net.HttpURLConnection;
 import java.util.*;
 import java.util.concurrent.atomic.AtomicBoolean;
 
@@ -147,8 +148,8 @@ public class TestAmFilter {
     testFilter.init(config);
 
     HttpServletResponseForTest response = new HttpServletResponseForTest();
-    // Test request should implements HttpServletRequest
 
+    // Test request should implements HttpServletRequest
     ServletRequest failRequest = Mockito.mock(ServletRequest.class);
     try {
       testFilter.doFilter(failRequest, response, chain);
@@ -159,22 +160,32 @@ public class TestAmFilter {
 
     // request with HttpServletRequest
     HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
-    Mockito.when(request.getRemoteAddr()).thenReturn("redirect");
-    Mockito.when(request.getRequestURI()).thenReturn("/redirect");
+    Mockito.when(request.getRemoteAddr()).thenReturn("nowhere");
+    Mockito.when(request.getRequestURI()).thenReturn("/app/application_00_0");
+
+    // address "redirect" is not in host list for non-proxy connection
     testFilter.doFilter(request, response, chain);
-    // address "redirect" is not in host list
-    assertEquals(302, response.status);
+    assertEquals(HttpURLConnection.HTTP_MOVED_TEMP, response.status);
     String redirect = response.getHeader(ProxyUtils.LOCATION);
-    assertEquals("http://bogus/redirect", redirect);
+    assertEquals("http://bogus/app/application_00_0", redirect);
+
+    // address "redirect" is not in host list for proxy connection
+    Mockito.when(request.getRequestURI()).thenReturn("/proxy/application_00_0");
+    testFilter.doFilter(request, response, chain);
+    assertEquals(HttpURLConnection.HTTP_MOVED_TEMP, response.status);
+    redirect = response.getHeader(ProxyUtils.LOCATION);
+    assertEquals("http://bogus/proxy/redirect/application_00_0", redirect);
+
     // "127.0.0.1" contains in host list. Without cookie
     Mockito.when(request.getRemoteAddr()).thenReturn("127.0.0.1");
     testFilter.doFilter(request, response, chain);
-
     assertTrue(doFilterRequest
         .contains("javax.servlet.http.HttpServletRequest"));
+
     // cookie added
-    Cookie[] cookies = new Cookie[1];
-    cookies[0] = new Cookie(WebAppProxyServlet.PROXY_USER_COOKIE_NAME, "user");
+    Cookie[] cookies = new Cookie[] {
+        new Cookie(WebAppProxyServlet.PROXY_USER_COOKIE_NAME, "user")
+    };
 
     Mockito.when(request.getCookies()).thenReturn(cookies);
     testFilter.doFilter(request, response, chain);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/57] [abbrv] hadoop git commit: MAPREDUCE-6718. add progress log to JHS during startup (haibochen via rkanter)

Posted by in...@apache.org.
MAPREDUCE-6718. add progress log to JHS during startup (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d6778d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d6778d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d6778d8

Branch: refs/heads/HDFS-10467
Commit: 0d6778d800ff16366911e3b064f3af6162dee2e4
Parents: bcb2528
Author: Robert Kanter <rk...@apache.org>
Authored: Wed Sep 28 15:41:40 2016 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Wed Sep 28 15:41:40 2016 -0700

----------------------------------------------------------------------
 .../mapreduce/v2/hs/HistoryFileManager.java     | 21 ++++++++++++++++++++
 1 file changed, 21 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6778d8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
index 12e7a27..0f09df2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
@@ -302,6 +302,10 @@ public class HistoryFileManager extends AbstractService {
     public boolean isFull() {
       return cache.size() >= maxSize;
     }
+
+    public int size() {
+      return cache.size();
+    }
   }
 
   /**
@@ -612,6 +616,9 @@ public class HistoryFileManager extends AbstractService {
     while (!done &&
         ((timeOutMillis == -1) || (clock.getTime() - start < timeOutMillis))) {
       done = tryCreatingHistoryDirs(counter++ % 3 == 0); // log every 3 attempts, 30sec
+      if (done) {
+        break;
+      }
       try {
         Thread.sleep(intervalCheckMillis);
       } catch (InterruptedException ex) {
@@ -760,15 +767,29 @@ public class HistoryFileManager extends AbstractService {
     List<FileStatus> timestampedDirList = findTimestampedDirectories();
     // Sort first just so insertion is in a consistent order
     Collections.sort(timestampedDirList);
+    LOG.info("Found " + timestampedDirList.size() + " directories to load");
     for (FileStatus fs : timestampedDirList) {
       // TODO Could verify the correct format for these directories.
       addDirectoryToSerialNumberIndex(fs.getPath());
     }
+    final double maxCacheSize = (double) jobListCache.maxSize;
+    int prevCacheSize = jobListCache.size();
     for (int i= timestampedDirList.size() - 1;
         i >= 0 && !jobListCache.isFull(); i--) {
       FileStatus fs = timestampedDirList.get(i); 
       addDirectoryToJobListCache(fs.getPath());
+
+      int currCacheSize = jobListCache.size();
+      if((currCacheSize - prevCacheSize)/maxCacheSize >= 0.05) {
+        LOG.info(currCacheSize * 100.0 / maxCacheSize +
+            "% of cache is loaded.");
+      }
+      prevCacheSize = currCacheSize;
     }
+    final double loadedPercent = maxCacheSize == 0.0 ?
+        100 : prevCacheSize * 100.0 / maxCacheSize;
+    LOG.info("Existing job initialization finished. " +
+        loadedPercent + "% of cache is occupied.");
   }
 
   private void removeDirectoryFromSerialNumberIndex(Path serialDirPath) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/57] [abbrv] hadoop git commit: YARN-5486. Update OpportunisticContainerAllocatorAMService::allocate method to handle OPPORTUNISTIC container requests. (Konstantinos Karanasos via asuresh)

Posted by in...@apache.org.
YARN-5486. Update OpportunisticContainerAllocatorAMService::allocate method to handle OPPORTUNISTIC container requests. (Konstantinos Karanasos via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/10be4598
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/10be4598
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/10be4598

Branch: refs/heads/HDFS-10467
Commit: 10be45986cdf86a89055065b752959bd6369d54f
Parents: 1e0ea27
Author: Arun Suresh <as...@apache.org>
Authored: Thu Sep 29 15:11:41 2016 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Thu Sep 29 15:11:41 2016 -0700

----------------------------------------------------------------------
 .../TestOpportunisticContainerAllocation.java   | 398 +++++++++++++++++++
 .../OpportunisticContainerAllocator.java        |  22 +-
 .../OpportunisticContainerContext.java          |  49 ++-
 .../yarn/server/nodemanager/NodeManager.java    |   3 +-
 .../amrmproxy/DefaultRequestInterceptor.java    |   4 +-
 .../scheduler/DistributedScheduler.java         |  59 +--
 ...pportunisticContainerAllocatorAMService.java | 215 ++++++----
 .../server/resourcemanager/ResourceManager.java |  12 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  58 ++-
 .../distributed/NodeQueueLoadMonitor.java       |  45 ++-
 ...pportunisticContainerAllocatorAMService.java |  10 +-
 11 files changed, 707 insertions(+), 168 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/10be4598/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java
new file mode 100644
index 0000000..b9b4b02
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java
@@ -0,0 +1,398 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client.api.impl;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.service.Service;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.NMToken;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.client.ClientRMProxy;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.client.api.NMTokenCache;
+import org.apache.hadoop.yarn.client.api.YarnClient;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.MiniYARNCluster;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.Records;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Class that tests the allocation of OPPORTUNISTIC containers through the
+ * centralized ResourceManager.
+ */
+public class TestOpportunisticContainerAllocation {
+  private static Configuration conf = null;
+  private static MiniYARNCluster yarnCluster = null;
+  private static YarnClient yarnClient = null;
+  private static List<NodeReport> nodeReports = null;
+  private static ApplicationAttemptId attemptId = null;
+  private static int nodeCount = 3;
+
+  private static final int ROLLING_INTERVAL_SEC = 13;
+  private static final long AM_EXPIRE_MS = 4000;
+
+  private static Resource capability;
+  private static Priority priority;
+  private static Priority priority2;
+  private static String node;
+  private static String rack;
+  private static String[] nodes;
+  private static String[] racks;
+  private final static int DEFAULT_ITERATION = 3;
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    // start minicluster
+    conf = new YarnConfiguration();
+    conf.setLong(
+        YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,
+        ROLLING_INTERVAL_SEC);
+    conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, AM_EXPIRE_MS);
+    conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 100);
+    // set the minimum allocation so that resource decrease can go under 1024
+    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
+    conf.setBoolean(
+        YarnConfiguration.OPPORTUNISTIC_CONTAINER_ALLOCATION_ENABLED, true);
+    conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
+    yarnCluster =
+        new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1);
+    yarnCluster.init(conf);
+    yarnCluster.start();
+
+    // start rm client
+    yarnClient = YarnClient.createYarnClient();
+    yarnClient.init(conf);
+    yarnClient.start();
+
+    // get node info
+    nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
+
+    priority = Priority.newInstance(1);
+    priority2 = Priority.newInstance(2);
+    capability = Resource.newInstance(1024, 1);
+
+    node = nodeReports.get(0).getNodeId().getHost();
+    rack = nodeReports.get(0).getRackName();
+    nodes = new String[]{node};
+    racks = new String[]{rack};
+  }
+
+  @Before
+  public void startApp() throws Exception {
+    // submit new app
+    ApplicationSubmissionContext appContext =
+        yarnClient.createApplication().getApplicationSubmissionContext();
+    ApplicationId appId = appContext.getApplicationId();
+    // set the application name
+    appContext.setApplicationName("Test");
+    // Set the priority for the application master
+    Priority pri = Records.newRecord(Priority.class);
+    pri.setPriority(0);
+    appContext.setPriority(pri);
+    // Set the queue to which this application is to be submitted in the RM
+    appContext.setQueue("default");
+    // Set up the container launch context for the application master
+    ContainerLaunchContext amContainer = BuilderUtils.newContainerLaunchContext(
+        Collections.<String, LocalResource>emptyMap(),
+        new HashMap<String, String>(), Arrays.asList("sleep", "100"),
+        new HashMap<String, ByteBuffer>(), null,
+        new HashMap<ApplicationAccessType, String>());
+    appContext.setAMContainerSpec(amContainer);
+    appContext.setResource(Resource.newInstance(1024, 1));
+    // Create the request to send to the applications manager
+    SubmitApplicationRequest appRequest =
+        Records.newRecord(SubmitApplicationRequest.class);
+    appRequest.setApplicationSubmissionContext(appContext);
+    // Submit the application to the applications manager
+    yarnClient.submitApplication(appContext);
+
+    // wait for app to start
+    RMAppAttempt appAttempt = null;
+    while (true) {
+      ApplicationReport appReport = yarnClient.getApplicationReport(appId);
+      if (appReport.getYarnApplicationState() ==
+          YarnApplicationState.ACCEPTED) {
+        attemptId = appReport.getCurrentApplicationAttemptId();
+        appAttempt = yarnCluster.getResourceManager().getRMContext().getRMApps()
+            .get(attemptId.getApplicationId()).getCurrentAppAttempt();
+        while (true) {
+          if (appAttempt.getAppAttemptState() == RMAppAttemptState.LAUNCHED) {
+            break;
+          }
+        }
+        break;
+      }
+    }
+    // Just dig into the ResourceManager and get the AMRMToken just for the sake
+    // of testing.
+    UserGroupInformation.setLoginUser(UserGroupInformation
+        .createRemoteUser(UserGroupInformation.getCurrentUser().getUserName()));
+
+    // emulate RM setup of AMRM token in credentials by adding the token
+    // *before* setting the token service
+    UserGroupInformation.getCurrentUser().addToken(appAttempt.getAMRMToken());
+    appAttempt.getAMRMToken()
+        .setService(ClientRMProxy.getAMRMTokenService(conf));
+  }
+
+  @After
+  public void cancelApp() throws YarnException, IOException {
+    yarnClient.killApplication(attemptId.getApplicationId());
+    attemptId = null;
+  }
+
+  @AfterClass
+  public static void tearDown() {
+    if (yarnClient != null &&
+        yarnClient.getServiceState() == Service.STATE.STARTED) {
+      yarnClient.stop();
+    }
+    if (yarnCluster != null &&
+        yarnCluster.getServiceState() == Service.STATE.STARTED) {
+      yarnCluster.stop();
+    }
+  }
+
+  @Test(timeout = 60000)
+  public void testAMRMClient() throws YarnException, IOException {
+    AMRMClient<AMRMClient.ContainerRequest> amClient = null;
+    try {
+      // start am rm client
+      amClient = AMRMClient.<AMRMClient.ContainerRequest>createAMRMClient();
+
+      //setting an instance NMTokenCache
+      amClient.setNMTokenCache(new NMTokenCache());
+      //asserting we are not using the singleton instance cache
+      Assert.assertNotSame(NMTokenCache.getSingleton(),
+          amClient.getNMTokenCache());
+
+      amClient.init(conf);
+      amClient.start();
+
+      amClient.registerApplicationMaster("Host", 10000, "");
+
+      testAllocation((AMRMClientImpl<AMRMClient.ContainerRequest>)amClient);
+
+      amClient
+          .unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null,
+              null);
+
+    } finally {
+      if (amClient != null &&
+          amClient.getServiceState() == Service.STATE.STARTED) {
+        amClient.stop();
+      }
+    }
+  }
+
+  private void testAllocation(
+      final AMRMClientImpl<AMRMClient.ContainerRequest> amClient)
+      throws YarnException, IOException {
+    // setup container request
+
+    assertEquals(0, amClient.ask.size());
+    assertEquals(0, amClient.release.size());
+
+    amClient.addContainerRequest(
+        new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
+    amClient.addContainerRequest(
+        new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
+    amClient.addContainerRequest(
+        new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
+    amClient.addContainerRequest(
+        new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
+    amClient.addContainerRequest(
+        new AMRMClient.ContainerRequest(capability, null, null, priority2, 0,
+            true, null,
+            ExecutionTypeRequest.newInstance(
+                ExecutionType.OPPORTUNISTIC, true)));
+    amClient.addContainerRequest(
+        new AMRMClient.ContainerRequest(capability, null, null, priority2, 0,
+            true, null,
+            ExecutionTypeRequest.newInstance(
+                ExecutionType.OPPORTUNISTIC, true)));
+
+    amClient.removeContainerRequest(
+        new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
+    amClient.removeContainerRequest(
+        new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
+    amClient.removeContainerRequest(
+        new AMRMClient.ContainerRequest(capability, null, null, priority2, 0,
+            true, null,
+            ExecutionTypeRequest.newInstance(
+                ExecutionType.OPPORTUNISTIC, true)));
+
+    int containersRequestedNode = amClient.getTable(0).get(priority,
+        node, ExecutionType.GUARANTEED, capability).remoteRequest
+        .getNumContainers();
+    int containersRequestedRack = amClient.getTable(0).get(priority,
+        rack, ExecutionType.GUARANTEED, capability).remoteRequest
+        .getNumContainers();
+    int containersRequestedAny = amClient.getTable(0).get(priority,
+        ResourceRequest.ANY, ExecutionType.GUARANTEED, capability)
+        .remoteRequest.getNumContainers();
+    int oppContainersRequestedAny =
+        amClient.getTable(0).get(priority2, ResourceRequest.ANY,
+            ExecutionType.OPPORTUNISTIC, capability).remoteRequest
+            .getNumContainers();
+
+    assertEquals(2, containersRequestedNode);
+    assertEquals(2, containersRequestedRack);
+    assertEquals(2, containersRequestedAny);
+    assertEquals(1, oppContainersRequestedAny);
+
+    assertEquals(4, amClient.ask.size());
+    assertEquals(0, amClient.release.size());
+
+    // RM should allocate container within 2 calls to allocate()
+    int allocatedContainerCount = 0;
+    int allocatedOpportContainerCount = 0;
+    int iterationsLeft = 10;
+    Set<ContainerId> releases = new TreeSet<>();
+
+    amClient.getNMTokenCache().clearCache();
+    Assert.assertEquals(0,
+        amClient.getNMTokenCache().numberOfTokensInCache());
+    HashMap<String, Token> receivedNMTokens = new HashMap<>();
+
+    while (allocatedContainerCount <
+        containersRequestedAny + oppContainersRequestedAny
+        && iterationsLeft-- > 0) {
+      AllocateResponse allocResponse = amClient.allocate(0.1f);
+      assertEquals(0, amClient.ask.size());
+      assertEquals(0, amClient.release.size());
+
+      allocatedContainerCount += allocResponse.getAllocatedContainers()
+          .size();
+      for (Container container : allocResponse.getAllocatedContainers()) {
+        if (container.getExecutionType() == ExecutionType.OPPORTUNISTIC) {
+          allocatedOpportContainerCount++;
+        }
+        ContainerId rejectContainerId = container.getId();
+        releases.add(rejectContainerId);
+      }
+
+      for (NMToken token : allocResponse.getNMTokens()) {
+        String nodeID = token.getNodeId().toString();
+        receivedNMTokens.put(nodeID, token.getToken());
+      }
+
+      if (allocatedContainerCount < containersRequestedAny) {
+        // sleep to let NM's heartbeat to RM and trigger allocations
+        sleep(100);
+      }
+    }
+
+    assertEquals(allocatedContainerCount,
+        containersRequestedAny + oppContainersRequestedAny);
+    assertEquals(allocatedOpportContainerCount, oppContainersRequestedAny);
+    for (ContainerId rejectContainerId : releases) {
+      amClient.releaseAssignedContainer(rejectContainerId);
+    }
+    assertEquals(3, amClient.release.size());
+    assertEquals(0, amClient.ask.size());
+
+    // need to tell the AMRMClient that we don't need these resources anymore
+    amClient.removeContainerRequest(
+        new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
+    amClient.removeContainerRequest(
+        new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
+    amClient.removeContainerRequest(
+        new AMRMClient.ContainerRequest(capability, nodes, racks, priority2, 0,
+            true, null,
+            ExecutionTypeRequest.newInstance(
+                ExecutionType.OPPORTUNISTIC, true)));
+    assertEquals(4, amClient.ask.size());
+
+    iterationsLeft = 3;
+    // do a few iterations to ensure RM is not going to send new containers
+    while (iterationsLeft-- > 0) {
+      // inform RM of rejection
+      AllocateResponse allocResponse = amClient.allocate(0.1f);
+      // RM did not send new containers because AM does not need any
+      assertEquals(0, allocResponse.getAllocatedContainers().size());
+      if (allocResponse.getCompletedContainersStatuses().size() > 0) {
+        for (ContainerStatus cStatus : allocResponse
+            .getCompletedContainersStatuses()) {
+          if (releases.contains(cStatus.getContainerId())) {
+            assertEquals(cStatus.getState(), ContainerState.COMPLETE);
+            assertEquals(-100, cStatus.getExitStatus());
+            releases.remove(cStatus.getContainerId());
+          }
+        }
+      }
+      if (iterationsLeft > 0) {
+        // sleep to make sure NM's heartbeat
+        sleep(100);
+      }
+    }
+    assertEquals(0, amClient.ask.size());
+    assertEquals(0, amClient.release.size());
+  }
+
+  private void sleep(int sleepTime) {
+    try {
+      Thread.sleep(sleepTime);
+    } catch (InterruptedException e) {
+      e.printStackTrace();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10be4598/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java
index 9b2fd38..9c158e9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.util.resource.Resources;
 
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -145,15 +146,6 @@ public class OpportunisticContainerAllocator {
     }
 
     /**
-     * Sets the underlying Atomic Long. To be used when implementation needs to
-     * share the underlying AtomicLong of an existing counter.
-     * @param counter AtomicLong
-     */
-    public void setContainerIdCounter(AtomicLong counter) {
-      this.containerIdCounter = counter;
-    }
-
-    /**
      * Generates a new long value. Default implementation increments the
      * underlying AtomicLong. Sub classes are encouraged to over-ride this
      * behaviour.
@@ -213,6 +205,10 @@ public class OpportunisticContainerAllocator {
     PartitionedResourceRequests partitionedAsks =
         partitionAskList(request.getAskList());
 
+    if (partitionedAsks.getOpportunistic().isEmpty()) {
+      return Collections.emptyList();
+    }
+
     List<ContainerId> releasedContainers = request.getReleaseList();
     int numReleasedContainers = releasedContainers.size();
     if (numReleasedContainers > 0) {
@@ -236,8 +232,8 @@ public class OpportunisticContainerAllocator {
         appContext.getOutstandingOpReqs().descendingKeySet()) {
       // Allocated containers :
       //  Key = Requested Capability,
-      //  Value = List of Containers of given Cap (The actual container size
-      //          might be different than what is requested.. which is why
+      //  Value = List of Containers of given cap (the actual container size
+      //          might be different than what is requested, which is why
       //          we need the requested capability (key) to match against
       //          the outstanding reqs)
       Map<Resource, List<Container>> allocated = allocate(rmIdentifier,
@@ -290,6 +286,10 @@ public class OpportunisticContainerAllocator {
       }
       nodesForScheduling.add(nodeEntry.getValue());
     }
+    if (nodesForScheduling.isEmpty()) {
+      LOG.warn("No nodes available for allocating opportunistic containers.");
+      return;
+    }
     int numAllocated = 0;
     int nextNodeToSchedule = 0;
     for (int numCont = 0; numCont < toAllocate; numCont++) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10be4598/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerContext.java
index 1b701ea..6fcddf8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerContext.java
@@ -18,9 +18,11 @@
 
 package org.apache.hadoop.yarn.server.scheduler;
 
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.NMToken;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -28,9 +30,11 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.LinkedHashMap;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -56,15 +60,13 @@ public class OpportunisticContainerContext {
   private ContainerIdGenerator containerIdGenerator =
       new ContainerIdGenerator();
 
-  private Map<String, NodeId> nodeMap = new LinkedHashMap<>();
+  private volatile List<NodeId> nodeList = new LinkedList<>();
+  private final Map<String, NodeId> nodeMap = new LinkedHashMap<>();
 
-  // Mapping of NodeId to NodeTokens. Populated either from RM response or
-  // generated locally if required.
-  private Map<NodeId, NMToken> nodeTokens = new HashMap<>();
   private final Set<String> blacklist = new HashSet<>();
 
   // This maintains a map of outstanding OPPORTUNISTIC Reqs. Key-ed by Priority,
-  // Resource Name (Host/rack/any) and capability. This mapping is required
+  // Resource Name (host/rack/any) and capability. This mapping is required
   // to match a received Container to an outstanding OPPORTUNISTIC
   // ResourceRequest (ask).
   private final TreeMap<Priority, Map<Resource, ResourceRequest>>
@@ -74,7 +76,7 @@ public class OpportunisticContainerContext {
     return containersAllocated;
   }
 
-  public OpportunisticContainerAllocator.AllocationParams getAppParams() {
+  public AllocationParams getAppParams() {
     return appParams;
   }
 
@@ -88,11 +90,29 @@ public class OpportunisticContainerContext {
   }
 
   public Map<String, NodeId> getNodeMap() {
-    return nodeMap;
+    return Collections.unmodifiableMap(nodeMap);
   }
 
-  public Map<NodeId, NMToken> getNodeTokens() {
-    return nodeTokens;
+  public synchronized void updateNodeList(List<NodeId> newNodeList) {
+    // This is an optimization for centralized placement. The
+    // OppContainerAllocatorAMService has a cached list of nodes which it sets
+    // here. The nodeMap needs to be updated only if the backing node list is
+    // modified.
+    if (newNodeList != nodeList) {
+      nodeList = newNodeList;
+      nodeMap.clear();
+      for (NodeId n : nodeList) {
+        nodeMap.put(n.getHost(), n);
+      }
+    }
+  }
+
+  public void updateAllocationParams(Resource minResource, Resource maxResource,
+      Resource incrResource, int containerTokenExpiryInterval) {
+    appParams.setMinResource(minResource);
+    appParams.setMaxResource(maxResource);
+    appParams.setIncrementResource(incrResource);
+    appParams.setContainerTokenExpiryInterval(containerTokenExpiryInterval);
   }
 
   public Set<String> getBlacklist() {
@@ -104,6 +124,15 @@ public class OpportunisticContainerContext {
     return outstandingOpReqs;
   }
 
+  public void updateCompletedContainers(AllocateResponse allocateResponse) {
+    for (ContainerStatus cs :
+        allocateResponse.getCompletedContainersStatuses()) {
+      if (cs.getExecutionType() == ExecutionType.OPPORTUNISTIC) {
+        containersAllocated.remove(cs.getContainerId());
+      }
+    }
+  }
+
   /**
    * Takes a list of ResourceRequests (asks), extracts the key information viz.
    * (Priority, ResourceName, Capability) and adds to the outstanding

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10be4598/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index 7f13334..37f67c4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -336,8 +336,7 @@ public class NodeManager extends CompositeService
     addService(nodeHealthChecker);
 
     boolean isDistSchedulingEnabled =
-        conf.getBoolean(YarnConfiguration.
-            OPPORTUNISTIC_CONTAINER_ALLOCATION_ENABLED,
+        conf.getBoolean(YarnConfiguration.DIST_SCHEDULING_ENABLED,
             YarnConfiguration.DIST_SCHEDULING_ENABLED_DEFAULT);
 
     this.context = createNMContext(containerTokenSecretManager,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10be4598/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/DefaultRequestInterceptor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/DefaultRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/DefaultRequestInterceptor.java
index efbdfb4..22fc8f6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/DefaultRequestInterceptor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/DefaultRequestInterceptor.java
@@ -152,7 +152,7 @@ public final class DefaultRequestInterceptor extends
       return ((DistributedSchedulingAMProtocol)rmClient)
           .registerApplicationMasterForDistributedScheduling(request);
     } else {
-      throw new YarnException("Distributed Scheduling is not enabled !!");
+      throw new YarnException("Distributed Scheduling is not enabled.");
     }
   }
 
@@ -174,7 +174,7 @@ public final class DefaultRequestInterceptor extends
       }
       return allocateResponse;
     } else {
-      throw new YarnException("Distributed Scheduling is not enabled !!");
+      throw new YarnException("Distributed Scheduling is not enabled.");
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10be4598/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/DistributedScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/DistributedScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/DistributedScheduler.java
index 368858c..8a40337 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/DistributedScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/scheduler/DistributedScheduler.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.nodemanager.scheduler;
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.api.protocolrecords.DistributedSchedulingAllocateRequest;
@@ -32,8 +33,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterReque
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerStatus;
-import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.NMToken;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -48,7 +47,9 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 /**
  * <p>The DistributedScheduler runs on the NodeManager and is modeled as an
@@ -74,6 +75,9 @@ public final class DistributedScheduler extends AbstractRequestInterceptor {
   private OpportunisticContainerContext oppContainerContext =
       new OpportunisticContainerContext();
 
+  // Mapping of NodeId to NodeTokens. Populated either from RM response or
+  // generated locally if required.
+  private Map<NodeId, NMToken> nodeTokens = new HashMap<>();
   private ApplicationAttemptId applicationAttemptId;
   private OpportunisticContainerAllocator containerAllocator;
   private NMTokenSecretManagerInNM nmSecretManager;
@@ -157,17 +161,17 @@ public final class DistributedScheduler extends AbstractRequestInterceptor {
   }
 
   /**
-   * Check if we already have a NMToken. if Not, generate the Token and
-   * add it to the response
+   * Adds all the newly allocated Containers to the allocate Response.
+   * Additionally, in case the NMToken for one of the nodes does not exist, it
+   * generates one and adds it to the response.
    */
-  private void updateResponseWithNMTokens(AllocateResponse response,
+  private void updateAllocateResponse(AllocateResponse response,
       List<NMToken> nmTokens, List<Container> allocatedContainers) {
     List<NMToken> newTokens = new ArrayList<>();
     if (allocatedContainers.size() > 0) {
       response.getAllocatedContainers().addAll(allocatedContainers);
       for (Container alloc : allocatedContainers) {
-        if (!oppContainerContext.getNodeTokens().containsKey(
-            alloc.getNodeId())) {
+        if (!nodeTokens.containsKey(alloc.getNodeId())) {
           newTokens.add(nmSecretManager.generateNMToken(appSubmitter, alloc));
         }
       }
@@ -179,17 +183,14 @@ public final class DistributedScheduler extends AbstractRequestInterceptor {
 
   private void updateParameters(
       RegisterDistributedSchedulingAMResponse registerResponse) {
-    oppContainerContext.getAppParams().setMinResource(
-        registerResponse.getMinContainerResource());
-    oppContainerContext.getAppParams().setMaxResource(
-        registerResponse.getMaxContainerResource());
-    oppContainerContext.getAppParams().setIncrementResource(
-        registerResponse.getIncrContainerResource());
-    if (oppContainerContext.getAppParams().getIncrementResource() == null) {
-      oppContainerContext.getAppParams().setIncrementResource(
-          oppContainerContext.getAppParams().getMinResource());
+    Resource incrementResource = registerResponse.getIncrContainerResource();
+    if (incrementResource == null) {
+      incrementResource = registerResponse.getMinContainerResource();
     }
-    oppContainerContext.getAppParams().setContainerTokenExpiryInterval(
+    oppContainerContext.updateAllocationParams(
+        registerResponse.getMinContainerResource(),
+        registerResponse.getMaxContainerResource(),
+        incrementResource,
         registerResponse.getContainerTokenExpiryInterval());
 
     oppContainerContext.getContainerIdGenerator()
@@ -198,14 +199,7 @@ public final class DistributedScheduler extends AbstractRequestInterceptor {
   }
 
   private void setNodeList(List<NodeId> nodeList) {
-    oppContainerContext.getNodeMap().clear();
-    addToNodeList(nodeList);
-  }
-
-  private void addToNodeList(List<NodeId> nodes) {
-    for (NodeId n : nodes) {
-      oppContainerContext.getNodeMap().put(n.getHost(), n);
-    }
+    oppContainerContext.updateNodeList(nodeList);
   }
 
   @Override
@@ -243,23 +237,14 @@ public final class DistributedScheduler extends AbstractRequestInterceptor {
     setNodeList(dsResp.getNodesForScheduling());
     List<NMToken> nmTokens = dsResp.getAllocateResponse().getNMTokens();
     for (NMToken nmToken : nmTokens) {
-      oppContainerContext.getNodeTokens().put(nmToken.getNodeId(), nmToken);
+      nodeTokens.put(nmToken.getNodeId(), nmToken);
     }
 
-    List<ContainerStatus> completedContainers =
-        dsResp.getAllocateResponse().getCompletedContainersStatuses();
-
-    // Only account for opportunistic containers
-    for (ContainerStatus cs : completedContainers) {
-      if (cs.getExecutionType() == ExecutionType.OPPORTUNISTIC) {
-        oppContainerContext.getContainersAllocated()
-            .remove(cs.getContainerId());
-      }
-    }
+    oppContainerContext.updateCompletedContainers(dsResp.getAllocateResponse());
 
     // Check if we have NM tokens for all the allocated containers. If not
     // generate one and update the response.
-    updateResponseWithNMTokens(
+    updateAllocateResponse(
         dsResp.getAllocateResponse(), nmTokens, allocatedContainers);
 
     if (LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10be4598/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
index a473b14..a7c0a50 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
@@ -24,9 +24,11 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocolPB;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.DistributedSchedulingAMProtocol;
 import org.apache.hadoop.yarn.api.impl.pb.service.ApplicationMasterProtocolPBServiceImpl;
 
@@ -65,12 +67,14 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateS
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
 
+
+import org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerAllocator;
+import org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerContext;
+import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils;
+
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.util.HashSet;
 import java.util.List;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
 
 /**
  * The OpportunisticContainerAllocatorAMService is started instead of the
@@ -88,17 +92,20 @@ public class OpportunisticContainerAllocatorAMService
       LogFactory.getLog(OpportunisticContainerAllocatorAMService.class);
 
   private final NodeQueueLoadMonitor nodeMonitor;
+  private final OpportunisticContainerAllocator oppContainerAllocator;
 
-  private final ConcurrentHashMap<String, Set<NodeId>> rackToNode =
-      new ConcurrentHashMap<>();
-  private final ConcurrentHashMap<String, Set<NodeId>> hostToNode =
-      new ConcurrentHashMap<>();
   private final int k;
 
+  private final long cacheRefreshInterval;
+  private List<NodeId> cachedNodeIds;
+  private long lastCacheUpdateTime;
+
   public OpportunisticContainerAllocatorAMService(RMContext rmContext,
       YarnScheduler scheduler) {
     super(OpportunisticContainerAllocatorAMService.class.getName(),
         rmContext, scheduler);
+    this.oppContainerAllocator = new OpportunisticContainerAllocator(
+        rmContext.getContainerTokenSecretManager(), 0);
     this.k = rmContext.getYarnConfiguration().getInt(
         YarnConfiguration.OPP_CONTAINER_ALLOCATION_NODES_NUMBER_USED,
         YarnConfiguration.OPP_CONTAINER_ALLOCATION_NODES_NUMBER_USED_DEFAULT);
@@ -106,6 +113,8 @@ public class OpportunisticContainerAllocatorAMService
         YarnConfiguration.NM_CONTAINER_QUEUING_SORTING_NODES_INTERVAL_MS,
         YarnConfiguration.
             NM_CONTAINER_QUEUING_SORTING_NODES_INTERVAL_MS_DEFAULT);
+    this.cacheRefreshInterval = nodeSortInterval;
+    this.lastCacheUpdateTime = System.currentTimeMillis();
     NodeQueueLoadMonitor.LoadComparator comparator =
         NodeQueueLoadMonitor.LoadComparator.valueOf(
             rmContext.getYarnConfiguration().get(
@@ -172,6 +181,27 @@ public class OpportunisticContainerAllocatorAMService
   public RegisterApplicationMasterResponse registerApplicationMaster
       (RegisterApplicationMasterRequest request) throws YarnException,
       IOException {
+    final ApplicationAttemptId appAttemptId = getAppAttemptId();
+    SchedulerApplicationAttempt appAttempt = ((AbstractYarnScheduler)
+        rmContext.getScheduler()).getApplicationAttempt(appAttemptId);
+    if (appAttempt.getOpportunisticContainerContext() == null) {
+      OpportunisticContainerContext opCtx = new OpportunisticContainerContext();
+      opCtx.setContainerIdGenerator(new OpportunisticContainerAllocator
+          .ContainerIdGenerator() {
+        @Override
+        public long generateContainerId() {
+          return appAttempt.getAppSchedulingInfo().getNewContainerId();
+        }
+      });
+      int tokenExpiryInterval = getConfig()
+          .getInt(YarnConfiguration.OPPORTUNISTIC_CONTAINERS_TOKEN_EXPIRY_MS,
+              YarnConfiguration.
+                  OPPORTUNISTIC_CONTAINERS_TOKEN_EXPIRY_MS_DEFAULT);
+      opCtx.updateAllocationParams(createMinContainerResource(),
+          createMaxContainerResource(), createIncrContainerResource(),
+          tokenExpiryInterval);
+      appAttempt.setOpportunisticContainerContext(opCtx);
+    }
     return super.registerApplicationMaster(request);
   }
 
@@ -185,7 +215,30 @@ public class OpportunisticContainerAllocatorAMService
   @Override
   public AllocateResponse allocate(AllocateRequest request) throws
       YarnException, IOException {
-    return super.allocate(request);
+
+    final ApplicationAttemptId appAttemptId = getAppAttemptId();
+    SchedulerApplicationAttempt appAttempt = ((AbstractYarnScheduler)
+        rmContext.getScheduler()).getApplicationAttempt(appAttemptId);
+    OpportunisticContainerContext oppCtx =
+        appAttempt.getOpportunisticContainerContext();
+    oppCtx.updateNodeList(getLeastLoadedNodes());
+    List<Container> oppContainers =
+        oppContainerAllocator.allocateContainers(request, appAttemptId, oppCtx,
+        ResourceManager.getClusterTimeStamp(), appAttempt.getUser());
+
+    if (!oppContainers.isEmpty()) {
+      handleNewContainers(oppContainers, false);
+      appAttempt.updateNMTokens(oppContainers);
+    }
+
+    // Allocate all guaranteed containers
+    AllocateResponse allocateResp = super.allocate(request);
+
+    oppCtx.updateCompletedContainers(allocateResp);
+
+    // Add all opportunistic containers
+    allocateResp.getAllocatedContainers().addAll(oppContainers);
+    return allocateResp;
   }
 
   @Override
@@ -198,39 +251,9 @@ public class OpportunisticContainerAllocatorAMService
     RegisterDistributedSchedulingAMResponse dsResp = recordFactory
         .newRecordInstance(RegisterDistributedSchedulingAMResponse.class);
     dsResp.setRegisterResponse(response);
-    dsResp.setMinContainerResource(
-        Resource.newInstance(
-            getConfig().getInt(
-                YarnConfiguration.OPPORTUNISTIC_CONTAINERS_MIN_MEMORY_MB,
-                YarnConfiguration.
-                    OPPORTUNISTIC_CONTAINERS_MIN_MEMORY_MB_DEFAULT),
-            getConfig().getInt(
-                YarnConfiguration.OPPORTUNISTIC_CONTAINERS_MIN_VCORES,
-                YarnConfiguration.OPPORTUNISTIC_CONTAINERS_MIN_VCORES_DEFAULT)
-        )
-    );
-    dsResp.setMaxContainerResource(
-        Resource.newInstance(
-            getConfig().getInt(
-                YarnConfiguration.OPPORTUNISTIC_CONTAINERS_MAX_MEMORY_MB,
-                YarnConfiguration
-                    .OPPORTUNISTIC_CONTAINERS_MAX_MEMORY_MB_DEFAULT),
-            getConfig().getInt(
-                YarnConfiguration.OPPORTUNISTIC_CONTAINERS_MAX_VCORES,
-                YarnConfiguration.OPPORTUNISTIC_CONTAINERS_MAX_VCORES_DEFAULT)
-        )
-    );
-    dsResp.setIncrContainerResource(
-        Resource.newInstance(
-            getConfig().getInt(
-                YarnConfiguration.OPPORTUNISTIC_CONTAINERS_INCR_MEMORY_MB,
-                YarnConfiguration.
-                    OPPORTUNISTIC_CONTAINERS_INCR_MEMORY_MB_DEFAULT),
-            getConfig().getInt(
-                YarnConfiguration.OPPORTUNISTIC_CONTAINERS_INCR_VCORES,
-                YarnConfiguration.OPPORTUNISTIC_CONTAINERS_INCR_VCORES_DEFAULT)
-        )
-    );
+    dsResp.setMinContainerResource(createMinContainerResource());
+    dsResp.setMaxContainerResource(createMaxContainerResource());
+    dsResp.setIncrContainerResource(createIncrContainerResource());
     dsResp.setContainerTokenExpiryInterval(
         getConfig().getInt(
             YarnConfiguration.OPPORTUNISTIC_CONTAINERS_TOKEN_EXPIRY_MS,
@@ -240,8 +263,7 @@ public class OpportunisticContainerAllocatorAMService
         this.rmContext.getEpoch() << ResourceManager.EPOCH_BIT_SHIFT);
 
     // Set nodes to be used for scheduling
-    dsResp.setNodesForScheduling(
-        this.nodeMonitor.selectLeastLoadedNodes(this.k));
+    dsResp.setNodesForScheduling(getLeastLoadedNodes());
     return dsResp;
   }
 
@@ -250,47 +272,30 @@ public class OpportunisticContainerAllocatorAMService
       DistributedSchedulingAllocateRequest request)
       throws YarnException, IOException {
     List<Container> distAllocContainers = request.getAllocatedContainers();
-    for (Container container : distAllocContainers) {
+    handleNewContainers(distAllocContainers, true);
+    AllocateResponse response = allocate(request.getAllocateRequest());
+    DistributedSchedulingAllocateResponse dsResp = recordFactory
+        .newRecordInstance(DistributedSchedulingAllocateResponse.class);
+    dsResp.setAllocateResponse(response);
+    dsResp.setNodesForScheduling(getLeastLoadedNodes());
+    return dsResp;
+  }
+
+  private void handleNewContainers(List<Container> allocContainers,
+                                   boolean isRemotelyAllocated) {
+    for (Container container : allocContainers) {
       // Create RMContainer
       SchedulerApplicationAttempt appAttempt =
           ((AbstractYarnScheduler) rmContext.getScheduler())
               .getCurrentAttemptForContainer(container.getId());
       RMContainer rmContainer = new RMContainerImpl(container,
           appAttempt.getApplicationAttemptId(), container.getNodeId(),
-          appAttempt.getUser(), rmContext, true);
+          appAttempt.getUser(), rmContext, isRemotelyAllocated);
       appAttempt.addRMContainer(container.getId(), rmContainer);
       rmContainer.handle(
           new RMContainerEvent(container.getId(),
               RMContainerEventType.LAUNCHED));
     }
-    AllocateResponse response = allocate(request.getAllocateRequest());
-    DistributedSchedulingAllocateResponse dsResp = recordFactory
-        .newRecordInstance(DistributedSchedulingAllocateResponse.class);
-    dsResp.setAllocateResponse(response);
-    dsResp.setNodesForScheduling(
-        this.nodeMonitor.selectLeastLoadedNodes(this.k));
-    return dsResp;
-  }
-
-  private void addToMapping(ConcurrentHashMap<String, Set<NodeId>> mapping,
-                            String rackName, NodeId nodeId) {
-    if (rackName != null) {
-      mapping.putIfAbsent(rackName, new HashSet<NodeId>());
-      Set<NodeId> nodeIds = mapping.get(rackName);
-      synchronized (nodeIds) {
-        nodeIds.add(nodeId);
-      }
-    }
-  }
-
-  private void removeFromMapping(ConcurrentHashMap<String, Set<NodeId>> mapping,
-                                 String rackName, NodeId nodeId) {
-    if (rackName != null) {
-      Set<NodeId> nodeIds = mapping.get(rackName);
-      synchronized (nodeIds) {
-        nodeIds.remove(nodeId);
-      }
-    }
   }
 
   @Override
@@ -303,10 +308,6 @@ public class OpportunisticContainerAllocatorAMService
       NodeAddedSchedulerEvent nodeAddedEvent = (NodeAddedSchedulerEvent) event;
       nodeMonitor.addNode(nodeAddedEvent.getContainerReports(),
           nodeAddedEvent.getAddedRMNode());
-      addToMapping(rackToNode, nodeAddedEvent.getAddedRMNode().getRackName(),
-          nodeAddedEvent.getAddedRMNode().getNodeID());
-      addToMapping(hostToNode, nodeAddedEvent.getAddedRMNode().getHostName(),
-          nodeAddedEvent.getAddedRMNode().getNodeID());
       break;
     case NODE_REMOVED:
       if (!(event instanceof NodeRemovedSchedulerEvent)) {
@@ -315,12 +316,6 @@ public class OpportunisticContainerAllocatorAMService
       NodeRemovedSchedulerEvent nodeRemovedEvent =
           (NodeRemovedSchedulerEvent) event;
       nodeMonitor.removeNode(nodeRemovedEvent.getRemovedRMNode());
-      removeFromMapping(rackToNode,
-          nodeRemovedEvent.getRemovedRMNode().getRackName(),
-          nodeRemovedEvent.getRemovedRMNode().getNodeID());
-      removeFromMapping(hostToNode,
-          nodeRemovedEvent.getRemovedRMNode().getHostName(),
-          nodeRemovedEvent.getRemovedRMNode().getNodeID());
       break;
     case NODE_UPDATE:
       if (!(event instanceof NodeUpdateSchedulerEvent)) {
@@ -364,4 +359,58 @@ public class OpportunisticContainerAllocatorAMService
   public QueueLimitCalculator getNodeManagerQueueLimitCalculator() {
     return nodeMonitor.getThresholdCalculator();
   }
+
+  private Resource createIncrContainerResource() {
+    return Resource.newInstance(
+        getConfig().getInt(
+            YarnConfiguration.OPPORTUNISTIC_CONTAINERS_INCR_MEMORY_MB,
+            YarnConfiguration.
+                OPPORTUNISTIC_CONTAINERS_INCR_MEMORY_MB_DEFAULT),
+        getConfig().getInt(
+            YarnConfiguration.OPPORTUNISTIC_CONTAINERS_INCR_VCORES,
+            YarnConfiguration.OPPORTUNISTIC_CONTAINERS_INCR_VCORES_DEFAULT)
+    );
+  }
+
+  private synchronized List<NodeId> getLeastLoadedNodes() {
+    long currTime = System.currentTimeMillis();
+    if ((currTime - lastCacheUpdateTime > cacheRefreshInterval)
+        || cachedNodeIds == null) {
+      cachedNodeIds = this.nodeMonitor.selectLeastLoadedNodes(this.k);
+      lastCacheUpdateTime = currTime;
+    }
+    return cachedNodeIds;
+  }
+
+  private Resource createMaxContainerResource() {
+    return Resource.newInstance(
+        getConfig().getInt(
+            YarnConfiguration.OPPORTUNISTIC_CONTAINERS_MAX_MEMORY_MB,
+            YarnConfiguration
+                .OPPORTUNISTIC_CONTAINERS_MAX_MEMORY_MB_DEFAULT),
+        getConfig().getInt(
+            YarnConfiguration.OPPORTUNISTIC_CONTAINERS_MAX_VCORES,
+            YarnConfiguration.OPPORTUNISTIC_CONTAINERS_MAX_VCORES_DEFAULT)
+    );
+  }
+
+  private Resource createMinContainerResource() {
+    return Resource.newInstance(
+        getConfig().getInt(
+            YarnConfiguration.OPPORTUNISTIC_CONTAINERS_MIN_MEMORY_MB,
+            YarnConfiguration.
+                OPPORTUNISTIC_CONTAINERS_MIN_MEMORY_MB_DEFAULT),
+        getConfig().getInt(
+            YarnConfiguration.OPPORTUNISTIC_CONTAINERS_MIN_VCORES,
+            YarnConfiguration.OPPORTUNISTIC_CONTAINERS_MIN_VCORES_DEFAULT)
+    );
+  }
+
+  private static ApplicationAttemptId getAppAttemptId() throws YarnException {
+    AMRMTokenIdentifier amrmTokenIdentifier =
+        YarnServerSecurityUtils.authorizeRequest();
+    ApplicationAttemptId applicationAttemptId =
+        amrmTokenIdentifier.getApplicationAttemptId();
+    return applicationAttemptId;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10be4598/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 5e9bece..d2d706d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -1184,6 +1184,13 @@ public class ResourceManager extends CompositeService implements Recoverable {
     Configuration config = this.rmContext.getYarnConfiguration();
     if (YarnConfiguration.isOpportunisticContainerAllocationEnabled(config)
         || YarnConfiguration.isDistSchedulingEnabled(config)) {
+      if (YarnConfiguration.isDistSchedulingEnabled(config) &&
+          !YarnConfiguration
+              .isOpportunisticContainerAllocationEnabled(config)) {
+        throw new YarnRuntimeException(
+            "Invalid parameters: opportunistic container allocation has to " +
+                "be enabled when distributed scheduling is enabled.");
+      }
       OpportunisticContainerAllocatorAMService
           oppContainerAllocatingAMService =
           new OpportunisticContainerAllocatorAMService(this.rmContext,
@@ -1193,9 +1200,8 @@ public class ResourceManager extends CompositeService implements Recoverable {
               OpportunisticContainerAllocatorAMService.class.getName());
       // Add an event dispatcher for the
       // OpportunisticContainerAllocatorAMService to handle node
-      // updates/additions and removals.
-      // Since the SchedulerEvent is currently a super set of theses,
-      // we register interest for it..
+      // additions, updates and removals. Since the SchedulerEvent is currently
+      // a super set of theses, we register interest for it.
       addService(oppContainerAllocEventDispatcher);
       rmDispatcher.register(SchedulerEventType.class,
           oppContainerAllocEventDispatcher);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10be4598/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index adc3a97..9675fac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.LogAggregationContext;
 import org.apache.hadoop.yarn.api.records.NMToken;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -68,6 +69,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerUpda
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.SchedulableEntity;
+
+import org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerContext;
 import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -114,6 +117,9 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
   private boolean isAttemptRecovering;
 
   protected ResourceUsage attemptResourceUsage = new ResourceUsage();
+  /** Resource usage of opportunistic containers. */
+  protected ResourceUsage attemptOpportunisticResourceUsage =
+      new ResourceUsage();
   /** Scheduled by a remote scheduler. */
   protected ResourceUsage attemptResourceUsageAllocatedRemotely =
       new ResourceUsage();
@@ -132,6 +138,8 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
   // by NM should not be recovered.
   private Set<ContainerId> pendingRelease = null;
 
+  private OpportunisticContainerContext oppContainerContext;
+
   /**
    * Count how many times the application has been given an opportunity to
    * schedule a task at each priority. Each time the scheduler asks the
@@ -199,7 +207,17 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
     readLock = lock.readLock();
     writeLock = lock.writeLock();
   }
-  
+
+  public void setOpportunisticContainerContext(
+      OpportunisticContainerContext oppContext) {
+    this.oppContainerContext = oppContext;
+  }
+
+  public OpportunisticContainerContext
+      getOpportunisticContainerContext() {
+    return this.oppContainerContext;
+  }
+
   /**
    * Get the live containers of the application.
    * @return live containers of the application
@@ -331,6 +349,10 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
     try {
       writeLock.lock();
       liveContainers.put(id, rmContainer);
+      if (rmContainer.getExecutionType() == ExecutionType.OPPORTUNISTIC) {
+        this.attemptOpportunisticResourceUsage.incUsed(
+            rmContainer.getAllocatedResource());
+      }
       if (rmContainer.isRemotelyAllocated()) {
         this.attemptResourceUsageAllocatedRemotely.incUsed(
             rmContainer.getAllocatedResource());
@@ -344,9 +366,15 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
     try {
       writeLock.lock();
       RMContainer rmContainer = liveContainers.remove(containerId);
-      if (rmContainer != null && rmContainer.isRemotelyAllocated()) {
-        this.attemptResourceUsageAllocatedRemotely.decUsed(
-            rmContainer.getAllocatedResource());
+      if (rmContainer != null) {
+        if (rmContainer.getExecutionType() == ExecutionType.OPPORTUNISTIC) {
+          this.attemptOpportunisticResourceUsage
+              .decUsed(rmContainer.getAllocatedResource());
+        }
+        if (rmContainer.isRemotelyAllocated()) {
+          this.attemptResourceUsageAllocatedRemotely
+              .decUsed(rmContainer.getAllocatedResource());
+        }
       }
     } finally {
       writeLock.unlock();
@@ -612,12 +640,7 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
               container.getPriority(), rmContainer.getCreationTime(),
               this.logAggregationContext, rmContainer.getNodeLabelExpression(),
               containerType));
-      NMToken nmToken =
-          rmContext.getNMTokenSecretManager().createAndGetNMToken(getUser(),
-              getApplicationAttemptId(), container);
-      if (nmToken != null) {
-        updatedNMTokens.add(nmToken);
-      }
+      updateNMToken(container);
     } catch (IllegalArgumentException e) {
       // DNS might be down, skip returning this container.
       LOG.error("Error trying to assign container token and NM token to"
@@ -635,6 +658,21 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
     return container;
   }
 
+  public void updateNMTokens(Collection<Container> containers) {
+    for (Container container : containers) {
+      updateNMToken(container);
+    }
+  }
+
+  private void updateNMToken(Container container) {
+    NMToken nmToken =
+        rmContext.getNMTokenSecretManager().createAndGetNMToken(getUser(),
+            getApplicationAttemptId(), container);
+    if (nmToken != null) {
+      updatedNMTokens.add(nmToken);
+    }
+  }
+
   // Create container token and update NMToken altogether, if either of them fails for
   // some reason like DNS unavailable, do not return this container and keep it
   // in the newlyAllocatedContainers waiting to be refetched.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10be4598/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java
index 017a256..b80a17c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
@@ -37,6 +38,7 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 /**
  * The NodeQueueLoadMonitor keeps track of load metrics (such as queue length
@@ -103,16 +105,23 @@ public class NodeQueueLoadMonitor implements ClusterMonitor {
       new ConcurrentHashMap<>();
   private final LoadComparator comparator;
   private QueueLimitCalculator thresholdCalculator;
+  private ReentrantReadWriteLock sortedNodesLock = new ReentrantReadWriteLock();
+  private ReentrantReadWriteLock clusterNodesLock =
+      new ReentrantReadWriteLock();
 
   Runnable computeTask = new Runnable() {
     @Override
     public void run() {
-      synchronized (sortedNodes) {
+      ReentrantReadWriteLock.WriteLock writeLock = sortedNodesLock.writeLock();
+      writeLock.lock();
+      try {
         sortedNodes.clear();
         sortedNodes.addAll(sortNodes());
         if (thresholdCalculator != null) {
           thresholdCalculator.update();
         }
+      } finally {
+        writeLock.unlock();
       }
     }
   };
@@ -166,9 +175,16 @@ public class NodeQueueLoadMonitor implements ClusterMonitor {
   @Override
   public void removeNode(RMNode removedRMNode) {
     LOG.debug("Node delete event for: " + removedRMNode.getNode().getName());
-    synchronized (this.clusterNodes) {
-      if (this.clusterNodes.containsKey(removedRMNode.getNodeID())) {
-        this.clusterNodes.remove(removedRMNode.getNodeID());
+    ReentrantReadWriteLock.WriteLock writeLock = clusterNodesLock.writeLock();
+    writeLock.lock();
+    ClusterNode node;
+    try {
+      node = this.clusterNodes.remove(removedRMNode.getNodeID());
+    } finally {
+      writeLock.unlock();
+    }
+    if (LOG.isDebugEnabled()) {
+      if (node != null) {
         LOG.debug("Delete ClusterNode: " + removedRMNode.getNodeID());
       } else {
         LOG.debug("Node not in list!");
@@ -186,7 +202,9 @@ public class NodeQueueLoadMonitor implements ClusterMonitor {
     int waitQueueLength = queuedContainersStatus.getWaitQueueLength();
     // Add nodes to clusterNodes. If estimatedQueueTime is -1, ignore node
     // UNLESS comparator is based on queue length.
-    synchronized (this.clusterNodes) {
+    ReentrantReadWriteLock.WriteLock writeLock = clusterNodesLock.writeLock();
+    writeLock.lock();
+    try {
       ClusterNode currentNode = this.clusterNodes.get(rmNode.getNodeID());
       if (currentNode == null) {
         if (estimatedQueueWaitTime != -1
@@ -222,6 +240,8 @@ public class NodeQueueLoadMonitor implements ClusterMonitor {
               "wait queue length [" + currentNode.queueLength + "]");
         }
       }
+    } finally {
+      writeLock.unlock();
     }
   }
 
@@ -245,15 +265,22 @@ public class NodeQueueLoadMonitor implements ClusterMonitor {
    * @return ordered list of nodes
    */
   public List<NodeId> selectLeastLoadedNodes(int k) {
-    synchronized (this.sortedNodes) {
-      return ((k < this.sortedNodes.size()) && (k >= 0)) ?
+    ReentrantReadWriteLock.ReadLock readLock = sortedNodesLock.readLock();
+    readLock.lock();
+    try {
+      List<NodeId> retVal = ((k < this.sortedNodes.size()) && (k >= 0)) ?
           new ArrayList<>(this.sortedNodes).subList(0, k) :
           new ArrayList<>(this.sortedNodes);
+      return Collections.unmodifiableList(retVal);
+    } finally {
+      readLock.unlock();
     }
   }
 
   private List<NodeId> sortNodes() {
-    synchronized (this.clusterNodes) {
+    ReentrantReadWriteLock.ReadLock readLock = clusterNodesLock.readLock();
+    readLock.lock();
+    try {
       ArrayList aList = new ArrayList<>(this.clusterNodes.values());
       List<NodeId> retList = new ArrayList<>();
       Object[] nodes = aList.toArray();
@@ -267,6 +294,8 @@ public class NodeQueueLoadMonitor implements ClusterMonitor {
         retList.add(((ClusterNode)nodes[j]).nodeId);
       }
       return retList;
+    } finally {
+      readLock.unlock();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10be4598/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
index 07c6b54..207f5ba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.DistributedSche
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterDistributedSchedulingAMResponsePBImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
 
+import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -97,6 +98,11 @@ public class TestOpportunisticContainerAllocatorAMService {
       public Configuration getYarnConfiguration() {
         return new YarnConfiguration();
       }
+
+      @Override
+      public RMContainerTokenSecretManager getContainerTokenSecretManager() {
+        return new RMContainerTokenSecretManager(conf);
+      }
     };
     Container c = factory.newRecordInstance(Container.class);
     c.setExecutionType(ExecutionType.OPPORTUNISTIC);
@@ -117,8 +123,8 @@ public class TestOpportunisticContainerAllocatorAMService {
     Server server = service.getServer(rpc, conf, addr, null);
     server.start();
 
-    // Verify that the DistrubutedSchedulingService can handle vanilla
-    // ApplicationMasterProtocol clients
+    // Verify that the OpportunisticContainerAllocatorAMSercvice can handle
+    // vanilla ApplicationMasterProtocol clients
     RPC.setProtocolEngine(conf, ApplicationMasterProtocolPB.class,
         ProtobufRpcEngine.class);
     ApplicationMasterProtocolPB ampProxy =


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[53/57] [abbrv] hadoop git commit: HDFS-10619. Cache path in InodesInPath. Contributed by Daryn Sharp.

Posted by in...@apache.org.
HDFS-10619. Cache path in InodesInPath. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90020624
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90020624
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90020624

Branch: refs/heads/HDFS-10467
Commit: 90020624b05230ad4a7fbd666d0177ecb107a4d6
Parents: 0da54e8
Author: Kihwal Lee <ki...@apache.org>
Authored: Mon Oct 3 09:13:04 2016 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Mon Oct 3 09:13:04 2016 -0500

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/server/namenode/INodesInPath.java    | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90020624/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index 04d3bda..f05fa37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -278,6 +278,8 @@ public class INodesInPath {
   }
 
   private final byte[][] path;
+  private final String pathname;
+
   /**
    * Array with the specified number of INodes resolved for a given path.
    */
@@ -306,6 +308,7 @@ public class INodesInPath {
     Preconditions.checkArgument(inodes != null && path != null);
     this.inodes = inodes;
     this.path = path;
+    this.pathname = DFSUtil.byteArray2PathString(path);
     this.isRaw = isRaw;
     this.isSnapshot = isSnapshot;
     this.snapshotId = snapshotId;
@@ -366,7 +369,7 @@ public class INodesInPath {
 
   /** @return the full path in string form */
   public String getPath() {
-    return DFSUtil.byteArray2PathString(path);
+    return pathname;
   }
 
   public String getParentPath() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/57] [abbrv] hadoop git commit: HDFS-10779. Rename does not need to re-solve destination. Contributed by Daryn Sharp.

Posted by in...@apache.org.
HDFS-10779. Rename does not need to re-solve destination. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f34402a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f34402a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f34402a

Branch: refs/heads/HDFS-10467
Commit: 5f34402adae191232fe78e62990396ca07f314bb
Parents: 0d6778d
Author: Kihwal Lee <ki...@apache.org>
Authored: Wed Sep 28 17:57:23 2016 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Wed Sep 28 17:57:23 2016 -0500

----------------------------------------------------------------------
 .../hdfs/server/namenode/FSDirRenameOp.java     | 184 ++++++++++---------
 .../hdfs/server/namenode/FSDirectory.java       |  14 --
 .../hdfs/server/namenode/FSNamesystem.java      |  11 +-
 3 files changed, 103 insertions(+), 106 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f34402a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index f98f8b1..0fdc545 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -38,23 +38,18 @@ import org.apache.hadoop.util.Time;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.util.AbstractMap;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
-import java.util.Map;
-
 import static org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
 import static org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
 
 class FSDirRenameOp {
   @Deprecated
-  static RenameOldResult renameToInt(
-      FSDirectory fsd, final String srcArg, final String dstArg,
+  static RenameResult renameToInt(
+      FSDirectory fsd, final String src, final String dst,
       boolean logRetryCache)
       throws IOException {
-    String src = srcArg;
-    String dst = dstArg;
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: " + src +
           " to " + dst);
@@ -64,18 +59,12 @@ class FSDirRenameOp {
     }
     FSPermissionChecker pc = fsd.getPermissionChecker();
 
-    HdfsFileStatus resultingStat = null;
     // Rename does not operate on link targets
     // Do not resolveLink when checking permissions of src and dst
     INodesInPath srcIIP = fsd.resolvePathForWrite(pc, src, false);
     INodesInPath dstIIP = fsd.resolvePathForWrite(pc, dst, false);
-    @SuppressWarnings("deprecation")
-    final boolean status = renameTo(fsd, pc, srcIIP, dstIIP, logRetryCache);
-    if (status) {
-      dstIIP = fsd.getINodesInPath(dstIIP.getPath(), false);
-      resultingStat = fsd.getAuditFileInfo(dstIIP);
-    }
-    return new RenameOldResult(status, resultingStat);
+    dstIIP = dstForRenameTo(srcIIP, dstIIP);
+    return renameTo(fsd, pc, srcIIP, dstIIP, logRetryCache);
   }
 
   /**
@@ -124,15 +113,30 @@ class FSDirRenameOp {
    * <br>
    */
   @Deprecated
-  @SuppressWarnings("deprecation")
-  static boolean renameForEditLog(FSDirectory fsd, String src, String dst,
+  static INodesInPath renameForEditLog(FSDirectory fsd, String src, String dst,
       long timestamp) throws IOException {
-    if (fsd.isDir(dst)) {
-      dst += Path.SEPARATOR + new Path(src).getName();
-    }
     final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false);
-    final INodesInPath dstIIP = fsd.getINodesInPath4Write(dst, false);
-    return unprotectedRenameTo(fsd, src, dst, srcIIP, dstIIP, timestamp);
+    INodesInPath dstIIP = fsd.getINodesInPath4Write(dst, false);
+    // this is wrong but accidentally works.  the edit contains the full path
+    // so the following will do nothing, but shouldn't change due to backward
+    // compatibility when maybe full path wasn't logged.
+    dstIIP = dstForRenameTo(srcIIP, dstIIP);
+    return unprotectedRenameTo(fsd, srcIIP, dstIIP, timestamp);
+  }
+
+  // if destination is a directory, append source child's name, else return
+  // iip as-is.
+  private static INodesInPath dstForRenameTo(
+      INodesInPath srcIIP, INodesInPath dstIIP) throws IOException {
+    INode dstINode = dstIIP.getLastINode();
+    if (dstINode != null && dstINode.isDirectory()) {
+      byte[] childName = srcIIP.getLastLocalName();
+      // new dest might exist so look it up.
+      INode childINode = dstINode.asDirectory().getChild(
+          childName, dstIIP.getPathSnapshotId());
+      dstIIP = INodesInPath.append(dstIIP, childINode, childName);
+    }
+    return dstIIP;
   }
 
   /**
@@ -141,12 +145,12 @@ class FSDirRenameOp {
    * @param fsd FSDirectory
    * @param src source path
    * @param dst destination path
-   * @return true if rename succeeds; false otherwise
+   * @return true INodesInPath if rename succeeds; null otherwise
    * @deprecated See {@link #renameToInt(FSDirectory, String, String,
    * boolean, Options.Rename...)}
    */
   @Deprecated
-  static boolean unprotectedRenameTo(FSDirectory fsd, String src, String dst,
+  static INodesInPath unprotectedRenameTo(FSDirectory fsd,
       final INodesInPath srcIIP, final INodesInPath dstIIP, long timestamp)
       throws IOException {
     assert fsd.hasWriteLock();
@@ -156,32 +160,34 @@ class FSDirRenameOp {
     } catch (SnapshotException e) {
       throw e;
     } catch (IOException ignored) {
-      return false;
+      return null;
     }
 
+    String src = srcIIP.getPath();
+    String dst = dstIIP.getPath();
     // validate the destination
     if (dst.equals(src)) {
-      return true;
+      return dstIIP;
     }
 
     try {
       validateDestination(src, dst, srcInode);
     } catch (IOException ignored) {
-      return false;
+      return null;
     }
 
     if (dstIIP.getLastINode() != null) {
       NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " +
           "failed to rename " + src + " to " + dst + " because destination " +
           "exists");
-      return false;
+      return null;
     }
     INode dstParent = dstIIP.getINode(-2);
     if (dstParent == null) {
       NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " +
           "failed to rename " + src + " to " + dst + " because destination's " +
           "parent does not exist");
-      return false;
+      return null;
     }
 
     fsd.ezManager.checkMoveValidity(srcIIP, dstIIP, src);
@@ -189,17 +195,19 @@ class FSDirRenameOp {
     verifyFsLimitsForRename(fsd, srcIIP, dstIIP);
     verifyQuotaForRename(fsd, srcIIP, dstIIP);
 
-    RenameOperation tx = new RenameOperation(fsd, src, dst, srcIIP, dstIIP);
+    RenameOperation tx = new RenameOperation(fsd, srcIIP, dstIIP);
 
     boolean added = false;
 
+    INodesInPath renamedIIP = null;
     try {
       // remove src
       if (!tx.removeSrc4OldRename()) {
-        return false;
+        return null;
       }
 
-      added = tx.addSourceToDestination();
+      renamedIIP = tx.addSourceToDestination();
+      added = (renamedIIP != null);
       if (added) {
         if (NameNode.stateChangeLog.isDebugEnabled()) {
           NameNode.stateChangeLog.debug("DIR* FSDirectory" +
@@ -209,7 +217,7 @@ class FSDirRenameOp {
         tx.updateMtimeAndLease(timestamp);
         tx.updateQuotasInSourceTree(fsd.getBlockStoragePolicySuite());
 
-        return true;
+        return renamedIIP;
       }
     } finally {
       if (!added) {
@@ -218,13 +226,13 @@ class FSDirRenameOp {
     }
     NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " +
         "failed to rename " + src + " to " + dst);
-    return false;
+    return null;
   }
 
   /**
    * The new rename which has the POSIX semantic.
    */
-  static Map.Entry<BlocksMapUpdateInfo, HdfsFileStatus> renameToInt(
+  static RenameResult renameToInt(
       FSDirectory fsd, final String srcArg, final String dstArg,
       boolean logRetryCache, Options.Rename... options)
       throws IOException {
@@ -241,25 +249,19 @@ class FSDirRenameOp {
 
     BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
     // returns resolved path
-    dst = renameTo(fsd, pc, src, dst, collectedBlocks, logRetryCache, options);
-    INodesInPath dstIIP = fsd.getINodesInPath(dst, false);
-    HdfsFileStatus resultingStat = fsd.getAuditFileInfo(dstIIP);
-
-    return new AbstractMap.SimpleImmutableEntry<>(
-        collectedBlocks, resultingStat);
+    return renameTo(fsd, pc, src, dst, collectedBlocks, logRetryCache, options);
   }
 
   /**
    * @see {@link #unprotectedRenameTo(FSDirectory, String, String, INodesInPath,
    * INodesInPath, long, BlocksMapUpdateInfo, Options.Rename...)}
    */
-  static String renameTo(FSDirectory fsd, FSPermissionChecker pc, String src,
-      String dst, BlocksMapUpdateInfo collectedBlocks, boolean logRetryCache,
-      Options.Rename... options) throws IOException {
+  static RenameResult renameTo(FSDirectory fsd, FSPermissionChecker pc,
+      String src, String dst, BlocksMapUpdateInfo collectedBlocks,
+      boolean logRetryCache,Options.Rename... options)
+          throws IOException {
     final INodesInPath srcIIP = fsd.resolvePathForWrite(pc, src, false);
     final INodesInPath dstIIP = fsd.resolvePathForWrite(pc, dst, false);
-    src = srcIIP.getPath();
-    dst = dstIIP.getPath();
     if (fsd.isPermissionEnabled()) {
       boolean renameToTrash = false;
       if (null != options &&
@@ -295,16 +297,19 @@ class FSDirRenameOp {
     }
     final long mtime = Time.now();
     fsd.writeLock();
+    final RenameResult result;
     try {
-      if (unprotectedRenameTo(fsd, src, dst, srcIIP, dstIIP, mtime,
-          collectedBlocks, options)) {
+      result = unprotectedRenameTo(fsd, srcIIP, dstIIP, mtime,
+          collectedBlocks, options);
+      if (result.filesDeleted) {
         FSDirDeleteOp.incrDeletedFileCount(1);
       }
     } finally {
       fsd.writeUnlock();
     }
-    fsd.getEditLog().logRename(src, dst, mtime, logRetryCache, options);
-    return dst;
+    fsd.getEditLog().logRename(
+        srcIIP.getPath(), dstIIP.getPath(), mtime, logRetryCache, options);
+    return result;
   }
 
   /**
@@ -327,7 +332,7 @@ class FSDirRenameOp {
     BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
     final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false);
     final INodesInPath dstIIP = fsd.getINodesInPath4Write(dst, false);
-    unprotectedRenameTo(fsd, src, dst, srcIIP, dstIIP, timestamp,
+    unprotectedRenameTo(fsd, srcIIP, dstIIP, timestamp,
         collectedBlocks, options);
     if (!collectedBlocks.getToDeleteList().isEmpty()) {
       fsd.getFSNamesystem().getBlockManager()
@@ -348,7 +353,7 @@ class FSDirRenameOp {
    * @param options         Rename options
    * @return whether a file/directory gets overwritten in the dst path
    */
-  static boolean unprotectedRenameTo(FSDirectory fsd, String src, String dst,
+  static RenameResult unprotectedRenameTo(FSDirectory fsd,
       final INodesInPath srcIIP, final INodesInPath dstIIP, long timestamp,
       BlocksMapUpdateInfo collectedBlocks, Options.Rename... options)
       throws IOException {
@@ -356,6 +361,8 @@ class FSDirRenameOp {
     boolean overwrite = options != null
         && Arrays.asList(options).contains(Options.Rename.OVERWRITE);
 
+    final String src = srcIIP.getPath();
+    final String dst = dstIIP.getPath();
     final String error;
     final INode srcInode = srcIIP.getLastINode();
     validateRenameSource(srcIIP);
@@ -401,7 +408,7 @@ class FSDirRenameOp {
     verifyFsLimitsForRename(fsd, srcIIP, dstIIP);
     verifyQuotaForRename(fsd, srcIIP, dstIIP);
 
-    RenameOperation tx = new RenameOperation(fsd, src, dst, srcIIP, dstIIP);
+    RenameOperation tx = new RenameOperation(fsd, srcIIP, dstIIP);
 
     boolean undoRemoveSrc = true;
     tx.removeSrc();
@@ -417,7 +424,8 @@ class FSDirRenameOp {
       }
 
       // add src as dst to complete rename
-      if (tx.addSourceToDestination()) {
+      INodesInPath renamedIIP = tx.addSourceToDestination();
+      if (renamedIIP != null) {
         undoRemoveSrc = false;
         if (NameNode.stateChangeLog.isDebugEnabled()) {
           NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedRenameTo: "
@@ -442,7 +450,8 @@ class FSDirRenameOp {
         }
 
         tx.updateQuotasInSourceTree(bsps);
-        return filesDeleted;
+        return createRenameResult(
+            fsd, renamedIIP, filesDeleted, collectedBlocks);
       }
     } finally {
       if (undoRemoveSrc) {
@@ -462,17 +471,9 @@ class FSDirRenameOp {
    * boolean, Options.Rename...)}
    */
   @Deprecated
-  @SuppressWarnings("deprecation")
-  private static boolean renameTo(FSDirectory fsd, FSPermissionChecker pc,
+  private static RenameResult renameTo(FSDirectory fsd, FSPermissionChecker pc,
       INodesInPath srcIIP, INodesInPath dstIIP, boolean logRetryCache)
           throws IOException {
-    String src = srcIIP.getPath();
-    String dst = dstIIP.getPath();
-    // Note: We should not be doing this.  This is move() not renameTo().
-    if (fsd.isDir(dst)) {
-      dstIIP = INodesInPath.append(dstIIP, null, srcIIP.getLastLocalName());
-    }
-    final String actualDst = dstIIP.getPath();
     if (fsd.isPermissionEnabled()) {
       // Check write access to parent of src
       fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, null,
@@ -483,22 +484,24 @@ class FSDirRenameOp {
     }
 
     if (NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: " + src + " to "
-          + dst);
+      NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: " +
+          srcIIP.getPath() + " to " + dstIIP.getPath());
     }
     final long mtime = Time.now();
-    boolean stat = false;
+    INodesInPath renameIIP;
     fsd.writeLock();
     try {
-      stat = unprotectedRenameTo(fsd, src, actualDst, srcIIP, dstIIP, mtime);
+      renameIIP = unprotectedRenameTo(fsd, srcIIP, dstIIP, mtime);
     } finally {
       fsd.writeUnlock();
     }
-    if (stat) {
-      fsd.getEditLog().logRename(src, actualDst, mtime, logRetryCache);
-      return true;
+    if (renameIIP != null) {
+      fsd.getEditLog().logRename(
+          srcIIP.getPath(), dstIIP.getPath(), mtime, logRetryCache);
     }
-    return false;
+    // this rename never overwrites the dest so files deleted and collected
+    // are irrelevant.
+    return createRenameResult(fsd, renameIIP, false, null);
   }
 
   private static void validateDestination(
@@ -584,8 +587,6 @@ class FSDirRenameOp {
     private final INodesInPath srcParentIIP;
     private INodesInPath dstIIP;
     private final INodesInPath dstParentIIP;
-    private final String src;
-    private final String dst;
     private final INodeReference.WithCount withCount;
     private final int srcRefDstSnapshot;
     private final INodeDirectory srcParent;
@@ -596,12 +597,9 @@ class FSDirRenameOp {
     private INode srcChild;
     private INode oldDstChild;
 
-    RenameOperation(FSDirectory fsd, String src, String dst,
-                    INodesInPath srcIIP, INodesInPath dstIIP)
+    RenameOperation(FSDirectory fsd, INodesInPath srcIIP, INodesInPath dstIIP)
         throws QuotaExceededException {
       this.fsd = fsd;
-      this.src = src;
-      this.dst = dst;
       this.srcIIP = srcIIP;
       this.dstIIP = dstIIP;
       this.srcParentIIP = srcIIP.getParentINodesInPath();
@@ -647,8 +645,8 @@ class FSDirRenameOp {
     long removeSrc() throws IOException {
       long removedNum = fsd.removeLastINode(srcIIP);
       if (removedNum == -1) {
-        String error = "Failed to rename " + src + " to " + dst +
-            " because the source can not be removed";
+        String error = "Failed to rename " + srcIIP.getPath() + " to " +
+            dstIIP.getPath() + " because the source can not be removed";
         NameNode.stateChangeLog.warn("DIR* FSDirRenameOp.unprotectedRenameTo:" +
             error);
         throw new IOException(error);
@@ -664,8 +662,8 @@ class FSDirRenameOp {
       final long removedSrc = fsd.removeLastINode(srcIIP);
       if (removedSrc == -1) {
         NameNode.stateChangeLog.warn("DIR* FSDirRenameOp.unprotectedRenameTo: "
-            + "failed to rename " + src + " to " + dst + " because the source" +
-            " can not be removed");
+            + "failed to rename " + srcIIP.getPath() + " to "
+            + dstIIP.getPath() + " because the source can not be removed");
         return false;
       } else {
         // update the quota count if necessary
@@ -686,7 +684,7 @@ class FSDirRenameOp {
       return removedNum;
     }
 
-    boolean addSourceToDestination() {
+    INodesInPath addSourceToDestination() {
       final INode dstParent = dstParentIIP.getLastINode();
       final byte[] dstChildName = dstIIP.getLastLocalName();
       final INode toDst;
@@ -698,7 +696,7 @@ class FSDirRenameOp {
         toDst = new INodeReference.DstReference(dstParent.asDirectory(),
             withCount, dstIIP.getLatestSnapshotId());
       }
-      return fsd.addLastINodeNoQuotaCheck(dstParentIIP, toDst) != null;
+      return fsd.addLastINodeNoQuotaCheck(dstParentIIP, toDst);
     }
 
     void updateMtimeAndLease(long timestamp) throws QuotaExceededException {
@@ -785,13 +783,27 @@ class FSDirRenameOp {
     }
   }
 
-  static class RenameOldResult {
+  private static RenameResult createRenameResult(FSDirectory fsd,
+      INodesInPath dst, boolean filesDeleted,
+      BlocksMapUpdateInfo collectedBlocks) throws IOException {
+    boolean success = (dst != null);
+    HdfsFileStatus auditStat = success ? fsd.getAuditFileInfo(dst) : null;
+    return new RenameResult(
+        success, auditStat, filesDeleted, collectedBlocks);
+  }
+
+  static class RenameResult {
     final boolean success;
     final HdfsFileStatus auditStat;
+    final boolean filesDeleted;
+    final BlocksMapUpdateInfo collectedBlocks;
 
-    RenameOldResult(boolean success, HdfsFileStatus auditStat) {
+    RenameResult(boolean success, HdfsFileStatus auditStat,
+        boolean filesDeleted, BlocksMapUpdateInfo collectedBlocks) {
       this.success = success;
       this.auditStat = auditStat;
+      this.filesDeleted = filesDeleted;
+      this.collectedBlocks = collectedBlocks;
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f34402a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 2a3cabb..2c7a268 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -648,20 +648,6 @@ public class FSDirectory implements Closeable {
   }
 
   /**
-   * Check whether the path specifies a directory
-   */
-  boolean isDir(String src) throws UnresolvedLinkException {
-    src = normalizePath(src);
-    readLock();
-    try {
-      INode node = getINode(src, false);
-      return node != null && node.isDirectory();
-    } finally {
-      readUnlock();
-    }
-  }
-
-  /**
    * Tell the block manager to update the replication factors when delete
    * happens. Deleting a file or a snapshot might decrease the replication
    * factor of the blocks as the blocks are always replicated to the highest

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f34402a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 7f8981f..4700263 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2845,7 +2845,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   @Deprecated
   boolean renameTo(String src, String dst, boolean logRetryCache)
       throws IOException {
-    FSDirRenameOp.RenameOldResult ret = null;
+    FSDirRenameOp.RenameResult ret = null;
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -2857,7 +2857,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     } finally {
       writeUnlock();
     }
-    boolean success = ret != null && ret.success;
+    boolean success = ret.success;
     if (success) {
       getEditLog().logSync();
       logAuditEvent(success, "rename", src, dst, ret.auditStat);
@@ -2868,7 +2868,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   void renameTo(final String src, final String dst,
                 boolean logRetryCache, Options.Rename... options)
       throws IOException {
-    Map.Entry<BlocksMapUpdateInfo, HdfsFileStatus> res = null;
+    FSDirRenameOp.RenameResult res = null;
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -2884,15 +2884,14 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
     getEditLog().logSync();
 
-    BlocksMapUpdateInfo collectedBlocks = res.getKey();
-    HdfsFileStatus auditStat = res.getValue();
+    BlocksMapUpdateInfo collectedBlocks = res.collectedBlocks;
     if (!collectedBlocks.getToDeleteList().isEmpty()) {
       removeBlocks(collectedBlocks);
       collectedBlocks.clear();
     }
 
     logAuditEvent(true, "rename (options=" + Arrays.toString(options) +
-        ")", src, dst, auditStat);
+        ")", src, dst, res.auditStat);
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/57] [abbrv] hadoop git commit: YARN-5400. Light cleanup in ZKRMStateStore (templedf via rkanter)

Posted by in...@apache.org.
YARN-5400. Light cleanup in ZKRMStateStore (templedf via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bcb2528a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bcb2528a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bcb2528a

Branch: refs/heads/HDFS-10467
Commit: bcb2528a51c33e4caff8d744c5e14c1accfc47d0
Parents: c3b235e
Author: Robert Kanter <rk...@apache.org>
Authored: Wed Sep 28 14:56:41 2016 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Wed Sep 28 14:56:41 2016 -0700

----------------------------------------------------------------------
 .../yarn/server/resourcemanager/RMZKUtils.java  |  19 +-
 .../server/resourcemanager/ResourceManager.java |   2 +-
 .../recovery/ZKRMStateStore.java                | 260 +++++++++++--------
 3 files changed, 161 insertions(+), 120 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcb2528a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMZKUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMZKUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMZKUtils.java
index d78068f..4b8561d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMZKUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMZKUtils.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager;
 
+import java.io.IOException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -37,9 +38,12 @@ public class RMZKUtils {
   private static final Log LOG = LogFactory.getLog(RMZKUtils.class);
 
   /**
-   * Utility method to fetch the ZK ACLs from the configuration
+   * Utility method to fetch the ZK ACLs from the configuration.
+   *
+   * @throws java.io.IOException if the Zookeeper ACLs configuration file
+   * cannot be read
    */
-  public static List<ACL> getZKAcls(Configuration conf) throws Exception {
+  public static List<ACL> getZKAcls(Configuration conf) throws IOException {
     // Parse authentication from configuration.
     String zkAclConf =
         conf.get(YarnConfiguration.RM_ZK_ACL,
@@ -47,17 +51,20 @@ public class RMZKUtils {
     try {
       zkAclConf = ZKUtil.resolveConfIndirection(zkAclConf);
       return ZKUtil.parseACLs(zkAclConf);
-    } catch (Exception e) {
+    } catch (IOException | ZKUtil.BadAclFormatException e) {
       LOG.error("Couldn't read ACLs based on " + YarnConfiguration.RM_ZK_ACL);
       throw e;
     }
   }
 
   /**
-   * Utility method to fetch ZK auth info from the configuration
+   * Utility method to fetch ZK auth info from the configuration.
+   *
+   * @throws java.io.IOException if the Zookeeper ACLs configuration file
+   * cannot be read
    */
   public static List<ZKUtil.ZKAuthInfo> getZKAuths(Configuration conf)
-      throws Exception {
+      throws IOException {
     String zkAuthConf = conf.get(YarnConfiguration.RM_ZK_AUTH);
     try {
       zkAuthConf = ZKUtil.resolveConfIndirection(zkAuthConf);
@@ -66,7 +73,7 @@ public class RMZKUtils {
       } else {
         return Collections.emptyList();
       }
-    } catch (Exception e) {
+    } catch (IOException | ZKUtil.BadAuthFormatException e) {
       LOG.error("Couldn't read Auth based on " + YarnConfiguration.RM_ZK_AUTH);
       throw e;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcb2528a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index bf72fc1..8a6997d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -320,7 +320,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
   }
 
   public CuratorFramework createAndStartCurator(Configuration conf)
-      throws Exception {
+      throws IOException {
     String zkHostPort = conf.get(YarnConfiguration.RM_ZK_ADDRESS);
     if (zkHostPort == null) {
       throw new YarnRuntimeException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcb2528a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
index c24b3e9..51bb74d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
@@ -56,7 +56,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.AM
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.EpochPBImpl;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.ZooDefs;
 import org.apache.zookeeper.data.ACL;
@@ -68,8 +67,8 @@ import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
+import java.io.IOException;
 import java.security.NoSuchAlgorithmException;
-import java.security.SecureRandom;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -121,21 +120,18 @@ import java.util.List;
 @Private
 @Unstable
 public class ZKRMStateStore extends RMStateStore {
-
-  public static final Log LOG = LogFactory.getLog(ZKRMStateStore.class);
-  private final SecureRandom random = new SecureRandom();
-
-  protected static final String ROOT_ZNODE_NAME = "ZKRMStateRoot";
-  protected static final Version CURRENT_VERSION_INFO = Version
-      .newInstance(1, 3);
+  private static final Log LOG = LogFactory.getLog(ZKRMStateStore.class);
   private static final String RM_DELEGATION_TOKENS_ROOT_ZNODE_NAME =
       "RMDelegationTokensRoot";
   private static final String RM_DT_SEQUENTIAL_NUMBER_ZNODE_NAME =
       "RMDTSequentialNumber";
   private static final String RM_DT_MASTER_KEYS_ROOT_ZNODE_NAME =
       "RMDTMasterKeysRoot";
+  protected static final String ROOT_ZNODE_NAME = "ZKRMStateRoot";
+  protected static final Version CURRENT_VERSION_INFO =
+      Version.newInstance(1, 3);
 
-  /** Znode paths */
+  /* Znode paths */
   private String zkRootNodePath;
   private String rmAppRoot;
   private String rmDTSecretManagerRoot;
@@ -144,44 +140,54 @@ public class ZKRMStateStore extends RMStateStore {
   private String dtSequenceNumberPath;
   private String amrmTokenSecretManagerRoot;
   private String reservationRoot;
+
   @VisibleForTesting
   protected String znodeWorkingPath;
 
-  /** Fencing related variables */
+  /* Fencing related variables */
   private static final String FENCING_LOCK = "RM_ZK_FENCING_LOCK";
   private String fencingNodePath;
   private Thread verifyActiveStatusThread;
   private int zkSessionTimeout;
 
-  /** ACL and auth info */
+  /* ACL and auth info */
   private List<ACL> zkAcl;
   @VisibleForTesting
   List<ACL> zkRootNodeAcl;
   private String zkRootNodeUsername;
-  public static final int CREATE_DELETE_PERMS =
+
+  private static final int CREATE_DELETE_PERMS =
       ZooDefs.Perms.CREATE | ZooDefs.Perms.DELETE;
   private final String zkRootNodeAuthScheme =
       new DigestAuthenticationProvider().getScheme();
 
   @VisibleForTesting
   protected CuratorFramework curatorFramework;
+
   /**
-   * Given the {@link Configuration} and {@link ACL}s used (zkAcl) for
+   * Given the {@link Configuration} and {@link ACL}s used (sourceACLs) for
    * ZooKeeper access, construct the {@link ACL}s for the store's root node.
-   * In the constructed {@link ACL}, all the users allowed by zkAcl are given
-   * rwa access, while the current RM has exclude create-delete access.
+   * In the constructed {@link ACL}, all the users allowed by sourceACLs are
+   * given read-write-admin access, while the current RM has exclusive
+   * create-delete access.
    *
-   * To be called only when HA is enabled and the configuration doesn't set ACL
-   * for the root node.
+   * To be called only when HA is enabled and the configuration doesn't set an
+   * ACL for the root node.
+   * @param conf the configuration
+   * @param sourceACLs the source ACLs
+   * @return ACLs for the store's root node
+   * @throws java.security.NoSuchAlgorithmException thrown if the digest
+   * algorithm used by Zookeeper cannot be found
    */
   @VisibleForTesting
   @Private
   @Unstable
-  protected List<ACL> constructZkRootNodeACL(
-      Configuration conf, List<ACL> sourceACLs) throws NoSuchAlgorithmException {
-    List<ACL> zkRootNodeAcl = new ArrayList<>();
+  protected List<ACL> constructZkRootNodeACL(Configuration conf,
+      List<ACL> sourceACLs) throws NoSuchAlgorithmException {
+    List<ACL> zkRootNodeAclList = new ArrayList<>();
+
     for (ACL acl : sourceACLs) {
-      zkRootNodeAcl.add(new ACL(
+      zkRootNodeAclList.add(new ACL(
           ZKUtil.removeSpecificPerms(acl.getPerms(), CREATE_DELETE_PERMS),
           acl.getId()));
     }
@@ -190,15 +196,16 @@ public class ZKRMStateStore extends RMStateStore {
         YarnConfiguration.RM_ADDRESS,
         YarnConfiguration.DEFAULT_RM_ADDRESS, conf);
     Id rmId = new Id(zkRootNodeAuthScheme,
-        DigestAuthenticationProvider.generateDigest(
-            zkRootNodeUsername + ":" + resourceManager.getZkRootNodePassword()));
-    zkRootNodeAcl.add(new ACL(CREATE_DELETE_PERMS, rmId));
-    return zkRootNodeAcl;
+        DigestAuthenticationProvider.generateDigest(zkRootNodeUsername + ":"
+            + resourceManager.getZkRootNodePassword()));
+    zkRootNodeAclList.add(new ACL(CREATE_DELETE_PERMS, rmId));
+
+    return zkRootNodeAclList;
   }
 
   @Override
-  public synchronized void initInternal(Configuration conf) throws Exception {
-
+  public synchronized void initInternal(Configuration conf)
+      throws IOException, NoSuchAlgorithmException {
     /* Initialize fencing related paths, acls, and ops */
     znodeWorkingPath =
         conf.get(YarnConfiguration.ZK_RM_STATE_STORE_PARENT_PATH,
@@ -210,16 +217,19 @@ public class ZKRMStateStore extends RMStateStore {
         YarnConfiguration.DEFAULT_RM_ZK_TIMEOUT_MS);
 
     zkAcl = RMZKUtils.getZKAcls(conf);
+
     if (HAUtil.isHAEnabled(conf)) {
       String zkRootNodeAclConf = HAUtil.getConfValueForRMInstance
           (YarnConfiguration.ZK_RM_STATE_STORE_ROOT_NODE_ACL, conf);
+
       if (zkRootNodeAclConf != null) {
         zkRootNodeAclConf = ZKUtil.resolveConfIndirection(zkRootNodeAclConf);
+
         try {
           zkRootNodeAcl = ZKUtil.parseACLs(zkRootNodeAclConf);
         } catch (ZKUtil.BadAclFormatException bafe) {
-          LOG.error("Invalid format for " +
-              YarnConfiguration.ZK_RM_STATE_STORE_ROOT_NODE_ACL);
+          LOG.error("Invalid format for "
+              + YarnConfiguration.ZK_RM_STATE_STORE_ROOT_NODE_ACL);
           throw bafe;
         }
       } else {
@@ -239,6 +249,7 @@ public class ZKRMStateStore extends RMStateStore {
         getNodePath(zkRootNodePath, AMRMTOKEN_SECRET_MANAGER_ROOT);
     reservationRoot = getNodePath(zkRootNodePath, RESERVATION_SYSTEM_ROOT);
     curatorFramework = resourceManager.getCurator();
+
     if (curatorFramework == null) {
       curatorFramework = resourceManager.createAndStartCurator(conf);
     }
@@ -246,7 +257,6 @@ public class ZKRMStateStore extends RMStateStore {
 
   @Override
   public synchronized void startInternal() throws Exception {
-
     // ensure root dirs exist
     createRootDirRecursively(znodeWorkingPath);
     create(zkRootNodePath);
@@ -272,9 +282,11 @@ public class ZKRMStateStore extends RMStateStore {
 
     StringBuilder builder = new StringBuilder();
     builder.append(prefix);
+
     for (ACL acl : getAcls) {
       builder.append(acl.toString());
     }
+
     builder.append(getStat.toString());
     LOG.debug(builder.toString());
   }
@@ -301,6 +313,7 @@ public class ZKRMStateStore extends RMStateStore {
       verifyActiveStatusThread.interrupt();
       verifyActiveStatusThread.join(1000);
     }
+
     if (!HAUtil.isHAEnabled(getConfig())) {
       IOUtils.closeStream(curatorFramework);
     }
@@ -316,6 +329,7 @@ public class ZKRMStateStore extends RMStateStore {
     String versionNodePath = getNodePath(zkRootNodePath, VERSION_NODE);
     byte[] data =
         ((VersionPBImpl) CURRENT_VERSION_INFO).getProto().toByteArray();
+
     if (exists(versionNodePath)) {
       safeSetData(versionNodePath, data, -1);
     } else {
@@ -331,6 +345,7 @@ public class ZKRMStateStore extends RMStateStore {
       byte[] data = getData(versionNodePath);
       return new VersionPBImpl(VersionProto.parseFrom(data));
     }
+
     return null;
   }
 
@@ -338,6 +353,7 @@ public class ZKRMStateStore extends RMStateStore {
   public synchronized long getAndIncrementEpoch() throws Exception {
     String epochNodePath = getNodePath(zkRootNodePath, EPOCH_NODE);
     long currentEpoch = 0;
+
     if (exists(epochNodePath)) {
       // load current epoch
       byte[] data = getData(epochNodePath);
@@ -353,6 +369,7 @@ public class ZKRMStateStore extends RMStateStore {
           .toByteArray();
       safeCreate(epochNodePath, storeData, zkAcl, CreateMode.PERSISTENT);
     }
+
     return currentEpoch;
   }
 
@@ -367,31 +384,37 @@ public class ZKRMStateStore extends RMStateStore {
     loadAMRMTokenSecretManagerState(rmState);
     // recover reservation state
     loadReservationSystemState(rmState);
+
     return rmState;
   }
 
   private void loadReservationSystemState(RMState rmState) throws Exception {
     List<String> planNodes = getChildren(reservationRoot);
+
     for (String planName : planNodes) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Loading plan from znode: " + planName);
       }
-      String planNodePath = getNodePath(reservationRoot, planName);
 
+      String planNodePath = getNodePath(reservationRoot, planName);
       List<String> reservationNodes = getChildren(planNodePath);
+
       for (String reservationNodeName : reservationNodes) {
-        String reservationNodePath = getNodePath(planNodePath,
-            reservationNodeName);
+        String reservationNodePath =
+            getNodePath(planNodePath, reservationNodeName);
+
         if (LOG.isDebugEnabled()) {
           LOG.debug("Loading reservation from znode: " + reservationNodePath);
         }
+
         byte[] reservationData = getData(reservationNodePath);
         ReservationAllocationStateProto allocationState =
             ReservationAllocationStateProto.parseFrom(reservationData);
+
         if (!rmState.getReservationState().containsKey(planName)) {
-          rmState.getReservationState().put(planName,
-              new HashMap<ReservationId, ReservationAllocationStateProto>());
+          rmState.getReservationState().put(planName, new HashMap<>());
         }
+
         ReservationId reservationId =
             ReservationId.parseReservationId(reservationNodeName);
         rmState.getReservationState().get(planName).put(reservationId,
@@ -403,16 +426,17 @@ public class ZKRMStateStore extends RMStateStore {
   private void loadAMRMTokenSecretManagerState(RMState rmState)
       throws Exception {
     byte[] data = getData(amrmTokenSecretManagerRoot);
+
     if (data == null) {
       LOG.warn("There is no data saved");
-      return;
+    } else {
+      AMRMTokenSecretManagerStatePBImpl stateData =
+          new AMRMTokenSecretManagerStatePBImpl(
+            AMRMTokenSecretManagerStateProto.parseFrom(data));
+      rmState.amrmTokenSecretManagerState =
+          AMRMTokenSecretManagerState.newInstance(
+            stateData.getCurrentMasterKey(), stateData.getNextMasterKey());
     }
-    AMRMTokenSecretManagerStatePBImpl stateData =
-        new AMRMTokenSecretManagerStatePBImpl(
-          AMRMTokenSecretManagerStateProto.parseFrom(data));
-    rmState.amrmTokenSecretManagerState =
-        AMRMTokenSecretManagerState.newInstance(
-          stateData.getCurrentMasterKey(), stateData.getNextMasterKey());
   }
 
   private synchronized void loadRMDTSecretManagerState(RMState rmState)
@@ -423,8 +447,8 @@ public class ZKRMStateStore extends RMStateStore {
   }
 
   private void loadRMDelegationKeyState(RMState rmState) throws Exception {
-    List<String> childNodes =
-        getChildren(dtMasterKeysRootPath);
+    List<String> childNodes = getChildren(dtMasterKeysRootPath);
+
     for (String childNodeName : childNodes) {
       String childNodePath = getNodePath(dtMasterKeysRootPath, childNodeName);
       byte[] childData = getData(childNodePath);
@@ -435,34 +459,30 @@ public class ZKRMStateStore extends RMStateStore {
       }
 
       ByteArrayInputStream is = new ByteArrayInputStream(childData);
-      DataInputStream fsIn = new DataInputStream(is);
 
-      try {
+      try (DataInputStream fsIn = new DataInputStream(is)) {
         if (childNodeName.startsWith(DELEGATION_KEY_PREFIX)) {
           DelegationKey key = new DelegationKey();
           key.readFields(fsIn);
           rmState.rmSecretManagerState.masterKeyState.add(key);
+
           if (LOG.isDebugEnabled()) {
             LOG.debug("Loaded delegation key: keyId=" + key.getKeyId()
                 + ", expirationDate=" + key.getExpiryDate());
           }
         }
-      } finally {
-        is.close();
       }
     }
   }
 
   private void loadRMSequentialNumberState(RMState rmState) throws Exception {
     byte[] seqData = getData(dtSequenceNumberPath);
+
     if (seqData != null) {
       ByteArrayInputStream seqIs = new ByteArrayInputStream(seqData);
-      DataInputStream seqIn = new DataInputStream(seqIs);
 
-      try {
+      try (DataInputStream seqIn = new DataInputStream(seqIs)) {
         rmState.rmSecretManagerState.dtSequenceNumber = seqIn.readInt();
-      } finally {
-        seqIn.close();
       }
     }
   }
@@ -470,6 +490,7 @@ public class ZKRMStateStore extends RMStateStore {
   private void loadRMDelegationTokenState(RMState rmState) throws Exception {
     List<String> childNodes =
         getChildren(delegationTokensRootPath);
+
     for (String childNodeName : childNodes) {
       String childNodePath =
           getNodePath(delegationTokensRootPath, childNodeName);
@@ -481,9 +502,8 @@ public class ZKRMStateStore extends RMStateStore {
       }
 
       ByteArrayInputStream is = new ByteArrayInputStream(childData);
-      DataInputStream fsIn = new DataInputStream(is);
 
-      try {
+      try (DataInputStream fsIn = new DataInputStream(is)) {
         if (childNodeName.startsWith(DELEGATION_TOKEN_PREFIX)) {
           RMDelegationTokenIdentifierData identifierData =
               new RMDelegationTokenIdentifierData();
@@ -493,36 +513,40 @@ public class ZKRMStateStore extends RMStateStore {
           long renewDate = identifierData.getRenewDate();
           rmState.rmSecretManagerState.delegationTokenState.put(identifier,
               renewDate);
+
           if (LOG.isDebugEnabled()) {
             LOG.debug("Loaded RMDelegationTokenIdentifier: " + identifier
                 + " renewDate=" + renewDate);
           }
         }
-      } finally {
-        is.close();
       }
     }
   }
 
   private synchronized void loadRMAppState(RMState rmState) throws Exception {
     List<String> childNodes = getChildren(rmAppRoot);
+
     for (String childNodeName : childNodes) {
       String childNodePath = getNodePath(rmAppRoot, childNodeName);
       byte[] childData = getData(childNodePath);
+
       if (childNodeName.startsWith(ApplicationId.appIdStrPrefix)) {
         // application
         if (LOG.isDebugEnabled()) {
           LOG.debug("Loading application from znode: " + childNodeName);
         }
+
         ApplicationId appId = ApplicationId.fromString(childNodeName);
         ApplicationStateDataPBImpl appState =
             new ApplicationStateDataPBImpl(
                 ApplicationStateDataProto.parseFrom(childData));
+
         if (!appId.equals(
             appState.getApplicationSubmissionContext().getApplicationId())) {
-          throw new YarnRuntimeException("The child node name is different " +
-              "from the application id");
+          throw new YarnRuntimeException("The child node name is different "
+              + "from the application id");
         }
+
         rmState.appState.put(appId, appState);
         loadApplicationAttemptState(appState, appId);
       } else {
@@ -536,6 +560,7 @@ public class ZKRMStateStore extends RMStateStore {
       throws Exception {
     String appPath = getNodePath(rmAppRoot, appId.toString());
     List<String> attempts = getChildren(appPath);
+
     for (String attemptIDStr : attempts) {
       if (attemptIDStr.startsWith(ApplicationAttemptId.appAttemptIdStrPrefix)) {
         String attemptPath = getNodePath(appPath, attemptIDStr);
@@ -548,6 +573,7 @@ public class ZKRMStateStore extends RMStateStore {
         appState.attempts.put(attemptState.getAttemptId(), attemptState);
       }
     }
+
     LOG.debug("Done loading applications from ZK state store");
   }
 
@@ -559,21 +585,23 @@ public class ZKRMStateStore extends RMStateStore {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Storing info for app: " + appId + " at: " + nodeCreatePath);
     }
+
     byte[] appStateData = appStateDataPB.getProto().toByteArray();
     safeCreate(nodeCreatePath, appStateData, zkAcl,
         CreateMode.PERSISTENT);
-
   }
 
   @Override
-  public synchronized void updateApplicationStateInternal(ApplicationId appId,
-      ApplicationStateData appStateDataPB) throws Exception {
+  protected synchronized void updateApplicationStateInternal(
+      ApplicationId appId, ApplicationStateData appStateDataPB)
+      throws Exception {
     String nodeUpdatePath = getNodePath(rmAppRoot, appId.toString());
 
     if (LOG.isDebugEnabled()) {
       LOG.debug("Storing final state info for app: " + appId + " at: "
           + nodeUpdatePath);
     }
+
     byte[] appStateData = appStateDataPB.getProto().toByteArray();
 
     if (exists(nodeUpdatePath)) {
@@ -587,7 +615,7 @@ public class ZKRMStateStore extends RMStateStore {
   }
 
   @Override
-  public synchronized void storeApplicationAttemptStateInternal(
+  protected synchronized void storeApplicationAttemptStateInternal(
       ApplicationAttemptId appAttemptId,
       ApplicationAttemptStateData attemptStateDataPB)
       throws Exception {
@@ -599,13 +627,13 @@ public class ZKRMStateStore extends RMStateStore {
       LOG.debug("Storing info for attempt: " + appAttemptId + " at: "
           + nodeCreatePath);
     }
+
     byte[] attemptStateData = attemptStateDataPB.getProto().toByteArray();
-    safeCreate(nodeCreatePath, attemptStateData, zkAcl,
-        CreateMode.PERSISTENT);
+    safeCreate(nodeCreatePath, attemptStateData, zkAcl, CreateMode.PERSISTENT);
   }
 
   @Override
-  public synchronized void updateApplicationAttemptStateInternal(
+  protected synchronized void updateApplicationAttemptStateInternal(
       ApplicationAttemptId appAttemptId,
       ApplicationAttemptStateData attemptStateDataPB)
       throws Exception {
@@ -613,10 +641,12 @@ public class ZKRMStateStore extends RMStateStore {
     String appAttemptIdStr = appAttemptId.toString();
     String appDirPath = getNodePath(rmAppRoot, appIdStr);
     String nodeUpdatePath = getNodePath(appDirPath, appAttemptIdStr);
+
     if (LOG.isDebugEnabled()) {
       LOG.debug("Storing final state info for attempt: " + appAttemptIdStr
           + " at: " + nodeUpdatePath);
     }
+
     byte[] attemptStateData = attemptStateDataPB.getProto().toByteArray();
 
     if (exists(nodeUpdatePath)) {
@@ -630,25 +660,24 @@ public class ZKRMStateStore extends RMStateStore {
   }
 
   @Override
-  public synchronized void removeApplicationAttemptInternal(
-      ApplicationAttemptId appAttemptId)
-      throws Exception {
+  protected synchronized void removeApplicationAttemptInternal(
+      ApplicationAttemptId appAttemptId) throws Exception {
     String appId = appAttemptId.getApplicationId().toString();
     String appIdRemovePath = getNodePath(rmAppRoot, appId);
-    String attemptIdRemovePath = getNodePath(appIdRemovePath,
-        appAttemptId.toString());
+    String attemptIdRemovePath =
+        getNodePath(appIdRemovePath, appAttemptId.toString());
 
     if (LOG.isDebugEnabled()) {
       LOG.debug("Removing info for attempt: " + appAttemptId + " at: "
           + attemptIdRemovePath);
     }
+
     safeDelete(attemptIdRemovePath);
   }
 
   @Override
-  public synchronized void removeApplicationStateInternal(
-      ApplicationStateData  appState)
-      throws Exception {
+  protected synchronized void removeApplicationStateInternal(
+      ApplicationStateData appState) throws Exception {
     String appId = appState.getApplicationSubmissionContext().getApplicationId()
         .toString();
     String appIdRemovePath = getNodePath(rmAppRoot, appId);
@@ -659,9 +688,11 @@ public class ZKRMStateStore extends RMStateStore {
     }
 
     for (ApplicationAttemptId attemptId : appState.attempts.keySet()) {
-      String attemptRemovePath = getNodePath(appIdRemovePath, attemptId.toString());
+      String attemptRemovePath =
+          getNodePath(appIdRemovePath, attemptId.toString());
       safeDelete(attemptRemovePath);
     }
+
     safeDelete(appIdRemovePath);
   }
 
@@ -680,10 +711,12 @@ public class ZKRMStateStore extends RMStateStore {
     String nodeRemovePath =
         getNodePath(delegationTokensRootPath, DELEGATION_TOKEN_PREFIX
             + rmDTIdentifier.getSequenceNumber());
+
     if (LOG.isDebugEnabled()) {
       LOG.debug("Removing RMDelegationToken_"
           + rmDTIdentifier.getSequenceNumber());
     }
+
     safeDelete(nodeRemovePath);
   }
 
@@ -695,6 +728,7 @@ public class ZKRMStateStore extends RMStateStore {
     String nodeRemovePath =
         getNodePath(delegationTokensRootPath, DELEGATION_TOKEN_PREFIX
             + rmDTIdentifier.getSequenceNumber());
+
     if (exists(nodeRemovePath)) {
       // in case znode exists
       addStoreOrUpdateOps(trx, rmDTIdentifier, renewDate, true);
@@ -703,6 +737,7 @@ public class ZKRMStateStore extends RMStateStore {
       addStoreOrUpdateOps(trx, rmDTIdentifier, renewDate, false);
       LOG.debug("Attempted to update a non-existing znode " + nodeRemovePath);
     }
+
     trx.commit();
   }
 
@@ -710,17 +745,16 @@ public class ZKRMStateStore extends RMStateStore {
       RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate,
       boolean isUpdate) throws Exception {
     // store RM delegation token
-    String nodeCreatePath =
-        getNodePath(delegationTokensRootPath, DELEGATION_TOKEN_PREFIX
-            + rmDTIdentifier.getSequenceNumber());
-    ByteArrayOutputStream seqOs = new ByteArrayOutputStream();
-    DataOutputStream seqOut = new DataOutputStream(seqOs);
+    String nodeCreatePath = getNodePath(delegationTokensRootPath,
+        DELEGATION_TOKEN_PREFIX + rmDTIdentifier.getSequenceNumber());
     RMDelegationTokenIdentifierData identifierData =
         new RMDelegationTokenIdentifierData(rmDTIdentifier, renewDate);
-    try {
+    ByteArrayOutputStream seqOs = new ByteArrayOutputStream();
+
+    try (DataOutputStream seqOut = new DataOutputStream(seqOs)) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug((isUpdate ? "Storing " : "Updating ") + "RMDelegationToken_" +
-            rmDTIdentifier.getSequenceNumber());
+        LOG.debug((isUpdate ? "Storing " : "Updating ") + "RMDelegationToken_"
+            + rmDTIdentifier.getSequenceNumber());
       }
 
       if (isUpdate) {
@@ -730,24 +764,23 @@ public class ZKRMStateStore extends RMStateStore {
             CreateMode.PERSISTENT);
         // Update Sequence number only while storing DT
         seqOut.writeInt(rmDTIdentifier.getSequenceNumber());
+
         if (LOG.isDebugEnabled()) {
-          LOG.debug((isUpdate ? "Storing " : "Updating ") +
-              dtSequenceNumberPath + ". SequenceNumber: "
+          LOG.debug((isUpdate ? "Storing " : "Updating ")
+              + dtSequenceNumberPath + ". SequenceNumber: "
               + rmDTIdentifier.getSequenceNumber());
         }
+
         trx.setData(dtSequenceNumberPath, seqOs.toByteArray(), -1);
       }
-    } finally {
-      seqOs.close();
     }
   }
 
   @Override
   protected synchronized void storeRMDTMasterKeyState(
       DelegationKey delegationKey) throws Exception {
-    String nodeCreatePath =
-        getNodePath(dtMasterKeysRootPath, DELEGATION_KEY_PREFIX
-            + delegationKey.getKeyId());
+    String nodeCreatePath = getNodePath(dtMasterKeysRootPath,
+        DELEGATION_KEY_PREFIX + delegationKey.getKeyId());
     if (LOG.isDebugEnabled()) {
       LOG.debug("Storing RMDelegationKey_" + delegationKey.getKeyId());
     }
@@ -765,9 +798,11 @@ public class ZKRMStateStore extends RMStateStore {
     String nodeRemovePath =
         getNodePath(dtMasterKeysRootPath, DELEGATION_KEY_PREFIX
             + delegationKey.getKeyId());
+
     if (LOG.isDebugEnabled()) {
       LOG.debug("Removing RMDelegationKey_" + delegationKey.getKeyId());
     }
+
     safeDelete(nodeRemovePath);
   }
 
@@ -789,30 +824,31 @@ public class ZKRMStateStore extends RMStateStore {
   }
 
   @Override
-  public synchronized void storeOrUpdateAMRMTokenSecretManagerState(
+  protected synchronized void storeOrUpdateAMRMTokenSecretManagerState(
       AMRMTokenSecretManagerState amrmTokenSecretManagerState, boolean isUpdate)
       throws Exception {
     AMRMTokenSecretManagerState data =
         AMRMTokenSecretManagerState.newInstance(amrmTokenSecretManagerState);
     byte[] stateData = data.getProto().toByteArray();
+
     safeSetData(amrmTokenSecretManagerRoot, stateData, -1);
   }
 
   @Override
   protected synchronized void removeReservationState(String planName,
-      String reservationIdName)
-      throws Exception {
-    String planNodePath =
-        getNodePath(reservationRoot, planName);
-    String reservationPath = getNodePath(planNodePath,
-        reservationIdName);
+      String reservationIdName) throws Exception {
+    String planNodePath = getNodePath(reservationRoot, planName);
+    String reservationPath = getNodePath(planNodePath, reservationIdName);
+
     if (LOG.isDebugEnabled()) {
-      LOG.debug("Removing reservationallocation " + reservationIdName + " for" +
-          " plan " + planName);
+      LOG.debug("Removing reservationallocation " + reservationIdName
+          + " for" + " plan " + planName);
     }
+
     safeDelete(reservationPath);
 
     List<String> reservationNodes = getChildren(planNodePath);
+
     if (reservationNodes.isEmpty()) {
       safeDelete(planNodePath);
     }
@@ -821,11 +857,10 @@ public class ZKRMStateStore extends RMStateStore {
   @Override
   protected synchronized void storeReservationState(
       ReservationAllocationStateProto reservationAllocation, String planName,
-      String reservationIdName)
-      throws Exception {
+      String reservationIdName) throws Exception {
     SafeTransaction trx = new SafeTransaction();
-    addOrUpdateReservationState(
-        reservationAllocation, planName, reservationIdName, trx, false);
+    addOrUpdateReservationState(reservationAllocation, planName,
+        reservationIdName, trx, false);
     trx.commit();
   }
 
@@ -843,6 +878,7 @@ public class ZKRMStateStore extends RMStateStore {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Creating plan node: " + planName + " at: " + planCreatePath);
       }
+
       trx.create(planCreatePath, null, zkAcl, CreateMode.PERSISTENT);
     }
 
@@ -871,6 +907,7 @@ public class ZKRMStateStore extends RMStateStore {
     Preconditions.checkArgument(pathParts.length >= 1 && pathParts[0].isEmpty(),
         "Invalid path: %s", path);
     StringBuilder sb = new StringBuilder();
+
     for (int i = 1; i < pathParts.length; i++) {
       sb.append("/").append(pathParts[i]);
       create(sb.toString());
@@ -947,10 +984,9 @@ public class ZKRMStateStore extends RMStateStore {
 
     SafeTransaction() throws Exception {
       CuratorTransaction transaction = curatorFramework.inTransaction();
-      transactionFinal =
-          transaction.create()
-              .withMode(CreateMode.PERSISTENT).withACL(zkAcl)
-              .forPath(fencingNodePath, new byte[0]).and();
+      transactionFinal = transaction.create()
+          .withMode(CreateMode.PERSISTENT).withACL(zkAcl)
+          .forPath(fencingNodePath, new byte[0]).and();
     }
 
     public void commit() throws Exception {
@@ -985,19 +1021,17 @@ public class ZKRMStateStore extends RMStateStore {
       super(VerifyActiveStatusThread.class.getName());
     }
 
+    @Override
     public void run() {
       try {
-        while (true) {
-          if(isFencedState()) {
-            break;
-          }
+        while (!isFencedState()) {
           // Create and delete fencing node
           new SafeTransaction().commit();
           Thread.sleep(zkSessionTimeout);
         }
       } catch (InterruptedException ie) {
-        LOG.info(VerifyActiveStatusThread.class.getName() + " thread " +
-            "interrupted! Exiting!");
+        LOG.info(getName() + " thread interrupted! Exiting!");
+        interrupt();
       } catch (Exception e) {
         notifyStoreOperationFailed(new StoreFencedException());
       }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/57] [abbrv] hadoop git commit: HDFS-10900. DiskBalancer: Complete the documents for the report command. Contributed by Yiqun Lin.

Posted by in...@apache.org.
HDFS-10900. DiskBalancer: Complete the documents for the report command. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c973646
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c973646
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c973646

Branch: refs/heads/HDFS-10467
Commit: 9c9736463b2b30350c78fce4fa0d56c73280d0ff
Parents: d144398
Author: Anu Engineer <ae...@apache.org>
Authored: Tue Sep 27 19:28:41 2016 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Tue Sep 27 19:28:41 2016 -0700

----------------------------------------------------------------------
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md               | 4 +++-
 .../hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md           | 5 ++++-
 2 files changed, 7 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c973646/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index ec94afd..9f9fba5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -445,6 +445,8 @@ Usage:
          [-query <datanode>]
          [-cancel <planfile>]
          [-cancel <planID> -node <datanode>]
+         [-report -node [<DataNodeID|IP|Hostname>,...]]
+         [-report -node -top <topnum>]
 
 | COMMAND\_OPTION | Description |
 |:---- |:---- |
@@ -452,7 +454,7 @@ Usage:
 |-execute| Executes a given plan on a datanode|
 |-query| Gets the current diskbalancer status from a datanode|
 |-cancel| Cancels a running plan|
-
+|-report| Reports the volume information from datanode(s)|
 
 Runs the diskbalancer CLI. See [HDFS Diskbalancer](./HDFSDiskbalancer.html) for more information on this command.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c973646/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
index d4775c2..dfb43b6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
@@ -106,10 +106,13 @@ or
 Plan ID can be read from datanode using query command.
 
 ### Report
-Report command provides detailed report about node(s).
+Report command provides detailed report of specified node(s) or top nodes that will benefit from running disk balancer.
 
 `hdfs diskbalancer -fs http://namenode.uri -report -node [<DataNodeID|IP|Hostname>,...]`
 
+or
+
+`hdfs diskbalancer -fs http://namenode.uri -report -top topnum`
 
 Settings
 --------


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/57] [abbrv] hadoop git commit: HADOOP-13544. JDiff reports unncessarily show unannotated APIs and cause confusion while our javadocs only show annotated and public APIs. (vinodkv via wangda)

Posted by in...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/875062b5/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.7.2.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.7.2.xml b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.7.2.xml
index 77074d3..46355b1 100644
--- a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.7.2.xml
+++ b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.7.2.xml
@@ -17,7 +17,7 @@
 -->
 <!-- Generated by the JDiff Javadoc doclet -->
 <!-- (http://www.jdiff.org) -->
-<!-- on Mon Jun 13 20:32:44 PDT 2016 -->
+<!-- on Wed Aug 24 13:56:25 PDT 2016 -->
 
 <api
   xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
@@ -25,7 +25,7 @@
   name="hadoop-mapreduce-client-core 2.7.2"
   jdversion="1.0.9">
 
-<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/classes:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.7.2.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/
 Users/vinodkv/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/co
 dehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/vinodkv/.m2/repository/com/google/inject/guice/3.0/guice-3.0.jar:/Users/vinodkv/.m2/repository/javax/inject/javax.inject/1/javax.inject-1.jar:/Users/vinodkv/.m2/repository/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/
 .m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/vinodkv/.
 m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/d
 irectory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-
 api-1.7.10.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.7.0_45.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/.m2/repository/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-coll
 ections-3.2.2.jar -sourcepath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/site/jdiff/xml -apiname hadoop-mapreduce-client-core 2.7.2 -->
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/classes:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.7.2.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/U
 sers/vinodkv/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/cod
 ehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/vinodkv/.m2/repository/com/google/inject/guice/3.0/guice-3.0.jar:/Users/vinodkv/.m2/repository/javax/inject/javax.inject/1/javax.inject-1.jar:/Users/vinodkv/.m2/repository/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.
 m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/vinodkv/.m
 2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/di
 rectory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-a
 pi-1.7.10.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_40.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/.m2/repository/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-colle
 ctions-3.2.2.jar -sourcepath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/jdiff.jar -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/site/jdiff/xml -apiname hadoop-mapreduce-client-core 2.7.2 -->
 <package name="org.apache.hadoop.filecache">
   <!-- start class org.apache.hadoop.filecache.DistributedCache -->
   <class name="DistributedCache" extends="org.apache.hadoop.mapreduce.filecache.DistributedCache"
@@ -591,70 +591,6 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.mapred.ClusterStatus -->
-  <!-- start class org.apache.hadoop.mapred.ClusterStatus.BlackListInfo -->
-  <class name="ClusterStatus.BlackListInfo" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.io.Writable"/>
-    <method name="getTrackerName" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Gets the blacklisted tasktracker's name.
-
- @return tracker's name.]]>
-      </doc>
-    </method>
-    <method name="getReasonForBlackListing" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Gets the reason for which the tasktracker was blacklisted.
-
- @return reason which tracker was blacklisted]]>
-      </doc>
-    </method>
-    <method name="getBlackListReport" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Gets a descriptive report about why the tasktracker was blacklisted.
-
- @return report describing why the tasktracker was blacklisted.]]>
-      </doc>
-    </method>
-    <method name="readFields"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="in" type="java.io.DataInput"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="write"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="out" type="java.io.DataOutput"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="toString" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <doc>
-    <![CDATA[Class which encapsulates information about a blacklisted tasktracker.
-
- The information includes the tasktracker's name and reasons for
- getting blacklisted. The toString method of the class will print
- the information in a whitespace separated fashion to enable parsing.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.ClusterStatus.BlackListInfo -->
   <!-- start class org.apache.hadoop.mapred.Counters -->
   <class name="Counters" extends="org.apache.hadoop.mapreduce.counters.AbstractCounters"
     abstract="false"
@@ -843,7 +779,7 @@
     static="true" final="false" visibility="public"
     deprecated="not deprecated">
     <implements name="org.apache.hadoop.mapreduce.Counter"/>
-    <constructor name="Counters.Counter"
+    <constructor name="Counter"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
@@ -945,31 +881,13 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.mapred.Counters.Counter -->
-  <!-- start class org.apache.hadoop.mapred.Counters.CountersExceededException -->
-  <class name="Counters.CountersExceededException" extends="java.lang.RuntimeException"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="Counters.CountersExceededException" type="java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="Counters.CountersExceededException" type="org.apache.hadoop.mapred.Counters.CountersExceededException"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <doc>
-    <![CDATA[Counter exception thrown when the number of counters exceed the limit]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.Counters.CountersExceededException -->
   <!-- start class org.apache.hadoop.mapred.Counters.Group -->
   <class name="Counters.Group" extends="java.lang.Object"
     abstract="false"
     static="true" final="false" visibility="public"
     deprecated="not deprecated">
     <implements name="org.apache.hadoop.mapreduce.counters.CounterGroupBase"/>
-    <constructor name="Counters.Group"
+    <constructor name="Group"
       static="false" final="false" visibility="protected"
       deprecated="not deprecated">
     </constructor>
@@ -1422,24 +1340,6 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.mapred.FileInputFormat -->
-  <!-- start class org.apache.hadoop.mapred.FileInputFormat.Counter -->
-  <class name="FileInputFormat.Counter" extends="java.lang.Enum"
-    abstract="false"
-    static="true" final="true" visibility="public"
-    deprecated="not deprecated">
-    <method name="values" return="org.apache.hadoop.mapred.FileInputFormat.Counter[]"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="valueOf" return="org.apache.hadoop.mapred.FileInputFormat.Counter"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.FileInputFormat.Counter -->
   <!-- start class org.apache.hadoop.mapred.FileOutputCommitter -->
   <class name="FileOutputCommitter" extends="org.apache.hadoop.mapred.OutputCommitter"
     abstract="false"
@@ -1449,13 +1349,6 @@
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
-    <method name="getTaskAttemptPath" return="org.apache.hadoop.fs.Path"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
     <method name="getWorkPath" return="org.apache.hadoop.fs.Path"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
@@ -1660,23 +1553,6 @@
  the map-reduce job.]]>
       </doc>
     </method>
-    <method name="setWorkOutputPath"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
-      <param name="outputDir" type="org.apache.hadoop.fs.Path"/>
-      <doc>
-      <![CDATA[Set the {@link Path} of the task's temporary output directory
- for the map-reduce job.
-
- <p><i>Note</i>: Task output path is set by the framework.
- </p>
- @param conf The configuration of the job.
- @param outputDir the {@link Path} of the output directory
- for the map-reduce job.]]>
-      </doc>
-    </method>
     <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
       abstract="false" native="false" synchronized="false"
       static="true" final="false" visibility="public"
@@ -1813,24 +1689,6 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.mapred.FileOutputFormat -->
-  <!-- start class org.apache.hadoop.mapred.FileOutputFormat.Counter -->
-  <class name="FileOutputFormat.Counter" extends="java.lang.Enum"
-    abstract="false"
-    static="true" final="true" visibility="public"
-    deprecated="not deprecated">
-    <method name="values" return="org.apache.hadoop.mapred.FileOutputFormat.Counter[]"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="valueOf" return="org.apache.hadoop.mapred.FileOutputFormat.Counter"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.FileOutputFormat.Counter -->
   <!-- start class org.apache.hadoop.mapred.FileSplit -->
   <class name="FileSplit" extends="org.apache.hadoop.mapreduce.InputSplit"
     abstract="false"
@@ -2047,7 +1905,7 @@
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
@@ -2069,7 +1927,7 @@
       </doc>
     </method>
     <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
@@ -2136,7 +1994,7 @@
     deprecated="not deprecated">
     <implements name="org.apache.hadoop.io.Writable"/>
     <method name="getLength" return="long"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -2148,7 +2006,7 @@
       </doc>
     </method>
     <method name="getLocations" return="java.lang.String[]"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -2179,7 +2037,7 @@
     deprecated="not deprecated">
     <implements name="org.apache.hadoop.mapred.InputSplit"/>
     <method name="getLocationInfo" return="org.apache.hadoop.mapred.SplitLocationInfo[]"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -2408,14 +2266,6 @@
  @throws IOException]]>
       </doc>
     </method>
-    <method name="submitJobInternal" return="org.apache.hadoop.mapred.RunningJob"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
-      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
     <method name="getJobInner" return="org.apache.hadoop.mapred.RunningJob"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="protected"
@@ -2977,24 +2827,6 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.mapred.JobClient -->
-  <!-- start class org.apache.hadoop.mapred.JobClient.TaskStatusFilter -->
-  <class name="JobClient.TaskStatusFilter" extends="java.lang.Enum"
-    abstract="false"
-    static="true" final="true" visibility="public"
-    deprecated="not deprecated">
-    <method name="values" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter[]"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="valueOf" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.JobClient.TaskStatusFilter -->
   <!-- start class org.apache.hadoop.mapred.JobConf -->
   <class name="JobConf" extends="org.apache.hadoop.conf.Configuration"
     abstract="false"
@@ -3075,12 +2907,6 @@
  @return credentials for the job]]>
       </doc>
     </method>
-    <method name="setCredentials"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="credentials" type="org.apache.hadoop.security.Credentials"/>
-    </method>
     <method name="getJar" return="java.lang.String"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
@@ -4989,7 +4815,7 @@
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="configure"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
@@ -5010,7 +4836,7 @@
     deprecated="not deprecated">
     <implements name="org.apache.hadoop.mapreduce.JobContext"/>
     <method name="getJobConf" return="org.apache.hadoop.mapred.JobConf"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <doc>
@@ -5020,7 +4846,7 @@
       </doc>
     </method>
     <method name="getProgressible" return="org.apache.hadoop.util.Progressable"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <doc>
@@ -5119,27 +4945,6 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.mapred.JobID -->
-  <!-- start class org.apache.hadoop.mapred.JobInProgress.Counter -->
-  <class name="JobInProgress.Counter" extends="java.lang.Enum"
-    abstract="false"
-    static="true" final="true" visibility="public"
-    deprecated="Provided for compatibility. Use {@link JobCounter} instead.">
-    <method name="values" return="org.apache.hadoop.mapred.JobInProgress.Counter[]"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="valueOf" return="org.apache.hadoop.mapred.JobInProgress.Counter"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-    </method>
-    <doc>
-    <![CDATA[@deprecated Provided for compatibility. Use {@link JobCounter} instead.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.JobInProgress.Counter -->
   <!-- start class org.apache.hadoop.mapred.JobPriority -->
   <class name="JobPriority" extends="java.lang.Enum"
     abstract="false"
@@ -5185,38 +4990,6 @@
  queue]]>
       </doc>
     </constructor>
-    <method name="setQueueName"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="queueName" type="java.lang.String"/>
-      <doc>
-      <![CDATA[Set the queue name of the JobQueueInfo
-
- @param queueName Name of the job queue.]]>
-      </doc>
-    </method>
-    <method name="setSchedulingInfo"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="schedulingInfo" type="java.lang.String"/>
-      <doc>
-      <![CDATA[Set the scheduling information associated to particular job queue
-
- @param schedulingInfo]]>
-      </doc>
-    </method>
-    <method name="setQueueState"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="state" type="java.lang.String"/>
-      <doc>
-      <![CDATA[Set the state of the queue
- @param state state of the queue.]]>
-      </doc>
-    </method>
     <method name="getQueueState" return="java.lang.String"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
@@ -5225,29 +4998,11 @@
       <![CDATA[Use getState() instead]]>
       </doc>
     </method>
-    <method name="setChildren"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="children" type="java.util.List"/>
-    </method>
     <method name="getChildren" return="java.util.List"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
-    <method name="setProperties"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="props" type="java.util.Properties"/>
-    </method>
-    <method name="setJobStatuses"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="stats" type="org.apache.hadoop.mapreduce.JobStatus[]"/>
-    </method>
     <doc>
     <![CDATA[Class that contains the information regarding the Job Queues which are
  maintained by the Hadoop Map/Reduce framework.]]>
@@ -5545,20 +5300,6 @@
       <![CDATA[Set the job retire flag to true.]]>
       </doc>
     </method>
-    <method name="setRunState"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="state" type="int"/>
-      <doc>
-      <![CDATA[Change the current run state of the job.
-
- The setter is public to be compatible with M/R 1.x, however, it should be
- used internally.
-
- @param state the state of the job]]>
-      </doc>
-    </method>
     <method name="getRunState" return="int"
       abstract="false" native="false" synchronized="true"
       static="false" final="false" visibility="public"
@@ -5586,20 +5327,6 @@
       <![CDATA[@param userName The username of the job]]>
       </doc>
     </method>
-    <method name="setSchedulingInfo"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="schedulingInfo" type="java.lang.String"/>
-      <doc>
-      <![CDATA[Used to set the scheduling information associated to a particular Job.
-
- The setter is public to be compatible with M/R 1.x, however, it should be
- used internally.
-
- @param schedulingInfo Scheduling information of the job]]>
-      </doc>
-    </method>
     <method name="setJobACLs"
       abstract="false" native="false" synchronized="true"
       static="false" final="false" visibility="protected"
@@ -5686,62 +5413,25 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.mapred.JobStatus -->
-  <!-- start class org.apache.hadoop.mapred.JobTracker -->
-  <class name="JobTracker" extends="java.lang.Object"
+  <!-- start class org.apache.hadoop.mapred.KeyValueLineRecordReader -->
+  <class name="KeyValueLineRecordReader" extends="java.lang.Object"
     abstract="false"
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
-    <constructor name="JobTracker"
+    <implements name="org.apache.hadoop.mapred.RecordReader"/>
+    <constructor name="KeyValueLineRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
     </constructor>
-    <doc>
-    <![CDATA[<code>JobTracker</code> is no longer used since M/R 2.x. This is a dummy
- JobTracker class, which is used to be compatible with M/R 1.x applications.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.JobTracker -->
-  <!-- start class org.apache.hadoop.mapred.JobTracker.State -->
-  <class name="JobTracker.State" extends="java.lang.Enum"
-    abstract="false"
-    static="true" final="true" visibility="public"
-    deprecated="not deprecated">
-    <method name="values" return="org.apache.hadoop.mapred.JobTracker.State[]"
+    <method name="getKeyClass" return="java.lang.Class"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
-    <method name="valueOf" return="org.apache.hadoop.mapred.JobTracker.State"
+    <method name="createKey" return="org.apache.hadoop.io.Text"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-    </method>
-    <doc>
-    <![CDATA[<code>State</code> is no longer used since M/R 2.x. It is kept in case
- that M/R 1.x applications may still use it.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.JobTracker.State -->
-  <!-- start class org.apache.hadoop.mapred.KeyValueLineRecordReader -->
-  <class name="KeyValueLineRecordReader" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.mapred.RecordReader"/>
-    <constructor name="KeyValueLineRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </constructor>
-    <method name="getKeyClass" return="java.lang.Class"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="createKey" return="org.apache.hadoop.io.Text"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
     <method name="createValue" return="org.apache.hadoop.io.Text"
@@ -5835,27 +5525,6 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.mapred.KeyValueTextInputFormat -->
-  <!-- start class org.apache.hadoop.mapred.LineRecordReader.LineReader -->
-  <class name="LineRecordReader.LineReader" extends="org.apache.hadoop.util.LineReader"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="Use {@link org.apache.hadoop.util.LineReader} instead.">
-    <constructor name="LineRecordReader.LineReader" type="java.io.InputStream, org.apache.hadoop.conf.Configuration"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </constructor>
-    <constructor name="LineRecordReader.LineReader" type="java.io.InputStream, org.apache.hadoop.conf.Configuration, byte[]"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </constructor>
-    <doc>
-    <![CDATA[A class that provides a line reader from an input stream.
- @deprecated Use {@link org.apache.hadoop.util.LineReader} instead.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.LineRecordReader.LineReader -->
   <!-- start class org.apache.hadoop.mapred.MapFileOutputFormat -->
   <class name="MapFileOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat"
     abstract="false"
@@ -5912,7 +5581,7 @@
     <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
     <implements name="org.apache.hadoop.io.Closeable"/>
     <method name="map"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="key" type="K1"/>
@@ -6091,7 +5760,7 @@
     deprecated="not deprecated">
     <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
     <method name="run"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="input" type="org.apache.hadoop.mapred.RecordReader"/>
@@ -6155,177 +5824,6 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.mapred.MapRunner -->
-  <!-- start class org.apache.hadoop.mapred.MapTask.MapOutputBuffer.BlockingBuffer -->
-  <class name="MapTask.MapOutputBuffer.BlockingBuffer" extends="java.io.DataOutputStream"
-    abstract="false"
-    static="false" final="false" visibility="protected"
-    deprecated="not deprecated">
-    <constructor name="MapTask.MapOutputBuffer.BlockingBuffer"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="markRecord" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Mark end of record. Note that this is required if the buffer is to
- cut the spill in the proper place.]]>
-      </doc>
-    </method>
-    <method name="shiftBufferedKey"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Set position from last mark to end of writable buffer, then rewrite
- the data between last mark and kvindex.
- This handles a special case where the key wraps around the buffer.
- If the key is to be passed to a RawComparator, then it must be
- contiguous in the buffer. This recopies the data in the buffer back
- into itself, but starting at the beginning of the buffer. Note that
- this method should <b>only</b> be called immediately after detecting
- this condition. To call it at any other time is undefined and would
- likely result in data loss or corruption.
- @see #markRecord()]]>
-      </doc>
-    </method>
-    <doc>
-    <![CDATA[Inner class managing the spill of serialized records to disk.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.MapTask.MapOutputBuffer.BlockingBuffer -->
-  <!-- start class org.apache.hadoop.mapred.MapTask.MapOutputBuffer.Buffer -->
-  <class name="MapTask.MapOutputBuffer.Buffer" extends="java.io.OutputStream"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="MapTask.MapOutputBuffer.Buffer"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="write"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="v" type="int"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="write"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="b" type="byte[]"/>
-      <param name="off" type="int"/>
-      <param name="len" type="int"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Attempt to write a sequence of bytes to the collection buffer.
- This method will block if the spill thread is running and it
- cannot write.
- @throws MapBufferTooSmallException if record is too large to
-    deserialize into the collection buffer.]]>
-      </doc>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.MapTask.MapOutputBuffer.Buffer -->
-  <!-- start class org.apache.hadoop.mapred.MapTask.MapOutputBuffer.InMemValBytes -->
-  <class name="MapTask.MapOutputBuffer.InMemValBytes" extends="org.apache.hadoop.io.DataInputBuffer"
-    abstract="false"
-    static="false" final="false" visibility="protected"
-    deprecated="not deprecated">
-    <constructor name="MapTask.MapOutputBuffer.InMemValBytes"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="reset"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="buffer" type="byte[]"/>
-      <param name="start" type="int"/>
-      <param name="length" type="int"/>
-    </method>
-    <doc>
-    <![CDATA[Inner class wrapping valuebytes, used for appendRaw.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.MapTask.MapOutputBuffer.InMemValBytes -->
-  <!-- start class org.apache.hadoop.mapred.MapTask.MapOutputBuffer.MRResultIterator -->
-  <class name="MapTask.MapOutputBuffer.MRResultIterator" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="protected"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.mapred.RawKeyValueIterator"/>
-    <constructor name="MapTask.MapOutputBuffer.MRResultIterator" type="int, int"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="next" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getKey" return="org.apache.hadoop.io.DataInputBuffer"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getValue" return="org.apache.hadoop.io.DataInputBuffer"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getProgress" return="org.apache.hadoop.util.Progress"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="close"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.MapTask.MapOutputBuffer.MRResultIterator -->
-  <!-- start class org.apache.hadoop.mapred.MapTask.MapOutputBuffer.SpillThread -->
-  <class name="MapTask.MapOutputBuffer.SpillThread" extends="java.lang.Thread"
-    abstract="false"
-    static="false" final="false" visibility="protected"
-    deprecated="not deprecated">
-    <constructor name="MapTask.MapOutputBuffer.SpillThread"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="run"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.MapTask.MapOutputBuffer.SpillThread -->
-  <!-- start class org.apache.hadoop.mapred.Master.State -->
-  <class name="Master.State" extends="java.lang.Enum"
-    abstract="false"
-    static="true" final="true" visibility="public"
-    deprecated="not deprecated">
-    <method name="values" return="org.apache.hadoop.mapred.Master.State[]"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="valueOf" return="org.apache.hadoop.mapred.Master.State"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.Master.State -->
   <!-- start class org.apache.hadoop.mapred.MultiFileInputFormat -->
   <class name="MultiFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat"
     abstract="true"
@@ -6399,7 +5897,7 @@
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="collect"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="key" type="K"/>
@@ -6798,7 +6296,7 @@
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
@@ -6818,7 +6316,7 @@
       </doc>
     </method>
     <method name="checkOutputSpecs"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
@@ -6890,7 +6388,7 @@
     deprecated="not deprecated">
     <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
     <method name="getPartition" return="int"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="key" type="K2"/>
@@ -6927,7 +6425,7 @@
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="next" return="boolean"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="key" type="K"/>
@@ -6942,7 +6440,7 @@
       </doc>
     </method>
     <method name="createKey" return="K"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <doc>
@@ -6952,7 +6450,7 @@
       </doc>
     </method>
     <method name="createValue" return="V"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <doc>
@@ -6962,7 +6460,7 @@
       </doc>
     </method>
     <method name="getPos" return="long"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -6974,7 +6472,7 @@
       </doc>
     </method>
     <method name="close"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -6985,7 +6483,7 @@
       </doc>
     </method>
     <method name="getProgress" return="float"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -7017,7 +6515,7 @@
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="write"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="key" type="K"/>
@@ -7032,7 +6530,7 @@
       </doc>
     </method>
     <method name="close"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
@@ -7062,7 +6560,7 @@
     <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
     <implements name="org.apache.hadoop.io.Closeable"/>
     <method name="reduce"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="key" type="K2"/>
@@ -7244,7 +6742,7 @@
     deprecated="not deprecated">
     <implements name="org.apache.hadoop.util.Progressable"/>
     <method name="setStatus"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="status" type="java.lang.String"/>
@@ -7255,7 +6753,7 @@
       </doc>
     </method>
     <method name="getCounter" return="org.apache.hadoop.mapred.Counters.Counter"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="name" type="java.lang.Enum"/>
@@ -7267,7 +6765,7 @@
       </doc>
     </method>
     <method name="getCounter" return="org.apache.hadoop.mapred.Counters.Counter"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="group" type="java.lang.String"/>
@@ -7281,7 +6779,7 @@
       </doc>
     </method>
     <method name="incrCounter"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="key" type="java.lang.Enum"/>
@@ -7297,7 +6795,7 @@
       </doc>
     </method>
     <method name="incrCounter"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="group" type="java.lang.String"/>
@@ -7314,7 +6812,7 @@
       </doc>
     </method>
     <method name="getInputSplit" return="org.apache.hadoop.mapred.InputSplit"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
@@ -7326,7 +6824,7 @@
       </doc>
     </method>
     <method name="getProgress" return="float"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <doc>
@@ -7365,7 +6863,7 @@
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <method name="getConfiguration" return="org.apache.hadoop.conf.Configuration"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <doc>
@@ -7375,7 +6873,7 @@
       </doc>
     </method>
     <method name="getID" return="org.apache.hadoop.mapred.JobID"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <doc>
@@ -7385,7 +6883,7 @@
       </doc>
     </method>
     <method name="getJobID" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="This method is deprecated and will be removed. Applications should
  rather use {@link #getID()}.">
@@ -7395,7 +6893,7 @@
       </doc>
     </method>
     <method name="getJobName" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <doc>
@@ -7405,7 +6903,7 @@
       </doc>
     </method>
     <method name="getJobFile" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <doc>
@@ -7415,7 +6913,7 @@
       </doc>
     </method>
     <method name="getTrackingURL" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <doc>
@@ -7425,7 +6923,7 @@
       </doc>
     </method>
     <method name="mapProgress" return="float"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -7438,7 +6936,7 @@
       </doc>
     </method>
     <method name="reduceProgress" return="float"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -7451,7 +6949,7 @@
       </doc>
     </method>
     <method name="cleanupProgress" return="float"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -7464,7 +6962,7 @@
       </doc>
     </method>
     <method name="setupProgress" return="float"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -7477,7 +6975,7 @@
       </doc>
     </method>
     <method name="isComplete" return="boolean"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -7490,7 +6988,7 @@
       </doc>
     </method>
     <method name="isSuccessful" return="boolean"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -7502,7 +7000,7 @@
       </doc>
     </method>
     <method name="waitForCompletion"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -7513,7 +7011,7 @@
       </doc>
     </method>
     <method name="getJobState" return="int"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -7525,7 +7023,7 @@
       </doc>
     </method>
     <method name="getJobStatus" return="org.apache.hadoop.mapred.JobStatus"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -7537,7 +7035,7 @@
       </doc>
     </method>
     <method name="killJob"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -7549,7 +7047,7 @@
       </doc>
     </method>
     <method name="setJobPriority"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="priority" type="java.lang.String"/>
@@ -7561,7 +7059,7 @@
       </doc>
     </method>
     <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="startFrom" type="int"/>
@@ -7575,7 +7073,7 @@
       </doc>
     </method>
     <method name="killTask"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
@@ -7592,7 +7090,7 @@
       </doc>
     </method>
     <method name="killTask"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="Applications should rather use {@link #killTask(TaskAttemptID, boolean)}">
       <param name="taskId" type="java.lang.String"/>
@@ -7603,7 +7101,7 @@
       </doc>
     </method>
     <method name="getCounters" return="org.apache.hadoop.mapred.Counters"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -7615,7 +7113,7 @@
       </doc>
     </method>
     <method name="getTaskDiagnostics" return="java.lang.String[]"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
@@ -7628,7 +7126,7 @@
       </doc>
     </method>
     <method name="getHistoryUrl" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -7641,7 +7139,7 @@
       </doc>
     </method>
     <method name="isRetired" return="boolean"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -7654,7 +7152,7 @@
       </doc>
     </method>
     <method name="getFailureInfo" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
       <exception name="IOException" type="java.io.IOException"/>
@@ -7700,95 +7198,18 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat -->
-  <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader -->
-  <class name="SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader" extends="java.lang.Object"
+  <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat -->
+  <class name="SequenceFileAsBinaryOutputFormat" extends="org.apache.hadoop.mapred.SequenceFileOutputFormat"
     abstract="false"
-    static="true" final="false" visibility="public"
+    static="false" final="false" visibility="public"
     deprecated="not deprecated">
-    <implements name="org.apache.hadoop.mapred.RecordReader"/>
-    <constructor name="SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+    <constructor name="SequenceFileAsBinaryOutputFormat"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
     </constructor>
-    <method name="createKey" return="org.apache.hadoop.io.BytesWritable"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="createValue" return="org.apache.hadoop.io.BytesWritable"
+    <method name="setSequenceFileOutputKeyClass"
       abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getKeyClassName" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Retrieve the name of the key class for this SequenceFile.
- @see org.apache.hadoop.io.SequenceFile.Reader#getKeyClassName]]>
-      </doc>
-    </method>
-    <method name="getValueClassName" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Retrieve the name of the value class for this SequenceFile.
- @see org.apache.hadoop.io.SequenceFile.Reader#getValueClassName]]>
-      </doc>
-    </method>
-    <method name="next" return="boolean"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="key" type="org.apache.hadoop.io.BytesWritable"/>
-      <param name="val" type="org.apache.hadoop.io.BytesWritable"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Read raw bytes from a SequenceFile.]]>
-      </doc>
-    </method>
-    <method name="getPos" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="close"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getProgress" return="float"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Return the progress within the input split
- @return 0.0 to 1.0 of the input byte range]]>
-      </doc>
-    </method>
-    <doc>
-    <![CDATA[Read records from a SequenceFile as binary (raw) bytes.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader -->
-  <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat -->
-  <class name="SequenceFileAsBinaryOutputFormat" extends="org.apache.hadoop.mapred.SequenceFileOutputFormat"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="SequenceFileAsBinaryOutputFormat"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="setSequenceFileOutputKeyClass"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="true" final="false" visibility="public"
       deprecated="not deprecated">
       <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
       <param name="theClass" type="java.lang.Class"/>
@@ -7862,24 +7283,6 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat -->
-  <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat.WritableValueBytes -->
-  <class name="SequenceFileAsBinaryOutputFormat.WritableValueBytes" extends="org.apache.hadoop.mapreduce.lib.output.SequenceFileAsBinaryOutputFormat.WritableValueBytes"
-    abstract="false"
-    static="true" final="false" visibility="protected"
-    deprecated="not deprecated">
-    <constructor name="SequenceFileAsBinaryOutputFormat.WritableValueBytes"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="SequenceFileAsBinaryOutputFormat.WritableValueBytes" type="org.apache.hadoop.io.BytesWritable"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <doc>
-    <![CDATA[Inner class used for appendRaw]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat.WritableValueBytes -->
   <!-- start class org.apache.hadoop.mapred.SequenceFileAsTextInputFormat -->
   <class name="SequenceFileAsTextInputFormat" extends="org.apache.hadoop.mapred.SequenceFileInputFormat"
     abstract="false"
@@ -8007,180 +7410,6 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter -->
-  <!-- start interface org.apache.hadoop.mapred.SequenceFileInputFilter.Filter -->
-  <interface name="SequenceFileInputFilter.Filter"    abstract="true"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFilter.Filter"/>
-    <doc>
-    <![CDATA[filter interface]]>
-    </doc>
-  </interface>
-  <!-- end interface org.apache.hadoop.mapred.SequenceFileInputFilter.Filter -->
-  <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase -->
-  <class name="SequenceFileInputFilter.FilterBase" extends="org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFilter.FilterBase"
-    abstract="true"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.mapred.SequenceFileInputFilter.Filter"/>
-    <constructor name="SequenceFileInputFilter.FilterBase"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <doc>
-    <![CDATA[base class for Filters]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase -->
-  <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.MD5Filter -->
-  <class name="SequenceFileInputFilter.MD5Filter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="SequenceFileInputFilter.MD5Filter"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="setFrequency"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="frequency" type="int"/>
-      <doc>
-      <![CDATA[set the filtering frequency in configuration
-
- @param conf configuration
- @param frequency filtering frequency]]>
-      </doc>
-    </method>
-    <method name="setConf"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <doc>
-      <![CDATA[configure the filter according to configuration
-
- @param conf configuration]]>
-      </doc>
-    </method>
-    <method name="accept" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="key" type="java.lang.Object"/>
-      <doc>
-      <![CDATA[Filtering method
- If MD5(key) % frequency==0, return true; otherwise return false
- @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
-      </doc>
-    </method>
-    <field name="MD5_LEN" type="int"
-      transient="false" volatile="false"
-      static="true" final="true" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <doc>
-    <![CDATA[This class returns a set of records by examing the MD5 digest of its
- key against a filtering frequency <i>f</i>. The filtering criteria is
- MD5(key) % f == 0.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.MD5Filter -->
-  <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.PercentFilter -->
-  <class name="SequenceFileInputFilter.PercentFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="SequenceFileInputFilter.PercentFilter"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="setFrequency"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="frequency" type="int"/>
-      <doc>
-      <![CDATA[set the frequency and stores it in conf
- @param conf configuration
- @param frequency filtering frequencey]]>
-      </doc>
-    </method>
-    <method name="setConf"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <doc>
-      <![CDATA[configure the filter by checking the configuration
-
- @param conf configuration]]>
-      </doc>
-    </method>
-    <method name="accept" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="key" type="java.lang.Object"/>
-      <doc>
-      <![CDATA[Filtering method
- If record# % frequency==0, return true; otherwise return false
- @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
-      </doc>
-    </method>
-    <doc>
-    <![CDATA[This class returns a percentage of records
- The percentage is determined by a filtering frequency <i>f</i> using
- the criteria record# % f == 0.
- For example, if the frequency is 10, one out of 10 records is returned.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.PercentFilter -->
-  <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.RegexFilter -->
-  <class name="SequenceFileInputFilter.RegexFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="SequenceFileInputFilter.RegexFilter"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="setPattern"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="regex" type="java.lang.String"/>
-      <exception name="PatternSyntaxException" type="java.util.regex.PatternSyntaxException"/>
-    </method>
-    <method name="setConf"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <doc>
-      <![CDATA[configure the Filter by checking the configuration]]>
-      </doc>
-    </method>
-    <method name="accept" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="key" type="java.lang.Object"/>
-      <doc>
-      <![CDATA[Filtering method
- If key matches the regex, return true; otherwise return false
- @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(Object)]]>
-      </doc>
-    </method>
-    <doc>
-    <![CDATA[Records filter by matching key to regex]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.RegexFilter -->
   <!-- start class org.apache.hadoop.mapred.SequenceFileInputFormat -->
   <class name="SequenceFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat"
     abstract="false"
@@ -8696,44 +7925,23 @@
     </method>
   </class>
   <!-- end class org.apache.hadoop.mapred.SplitLocationInfo -->
-  <!-- start class org.apache.hadoop.mapred.Task.Counter -->
-  <class name="Task.Counter" extends="java.lang.Enum"
-    abstract="false"
-    static="true" final="true" visibility="public"
-    deprecated="Provided for compatibility. Use {@link TaskCounter} instead.">
-    <method name="values" return="org.apache.hadoop.mapred.Task.Counter[]"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="valueOf" return="org.apache.hadoop.mapred.Task.Counter"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-    </method>
-    <doc>
-    <![CDATA[@deprecated Provided for compatibility. Use {@link TaskCounter} instead.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.Task.Counter -->
   <!-- start interface org.apache.hadoop.mapred.TaskAttemptContext -->
   <interface name="TaskAttemptContext"    abstract="true"
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
     <implements name="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
     <method name="getTaskAttemptID" return="org.apache.hadoop.mapred.TaskAttemptID"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
     <method name="getProgressible" return="org.apache.hadoop.util.Progressable"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
     <method name="getJobConf" return="org.apache.hadoop.mapred.JobConf"
-      abstract="false" native="false" synchronized="false"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
@@ -8925,12 +8133,6 @@
  @param taskTrackerHttp task tracker's host:port for http.]]>
       </doc>
     </constructor>
-    <method name="downgrade" return="org.apache.hadoop.mapred.TaskCompletionEvent"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="event" type="org.apache.hadoop.mapreduce.TaskCompletionEvent"/>
-    </method>
     <method name="getTaskId" return="java.lang.String"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
@@ -8991,46 +8193,6 @@
  @param taskId]]>
       </doc>
     </method>
-    <method name="setTaskStatus"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="status" type="org.apache.hadoop.mapred.TaskCompletionEvent.Status"/>
-      <doc>
-      <![CDATA[Set task status.
- @param status]]>
-      </doc>
-    </method>
-    <method name="setTaskRunTime"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="taskCompletionTime" type="int"/>
-      <doc>
-      <![CDATA[Set the task completion time
- @param taskCompletionTime time (in millisec) the task took to complete]]>
-      </doc>
-    </method>
-    <method name="setEventId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="eventId" type="int"/>
-      <doc>
-      <![CDATA[set event Id. should be assigned incrementally starting from 0.
- @param eventId]]>
-      </doc>
-    </method>
-    <method name="setTaskTrackerHttp"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="taskTrackerHttp" type="java.lang.String"/>
-      <doc>
-      <![CDATA[Set task tracker http location.
- @param taskTrackerHttp]]>
-      </doc>
-    </method>
     <field name="EMPTY_ARRAY" type="org.apache.hadoop.mapred.TaskCompletionEvent[]"
       transient="false" volatile="false"
       static="true" final="true" visibility="public"
@@ -9222,147 +8384,12 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.mapred.TaskID -->
-  <!-- start class org.apache.hadoop.mapred.TaskLog.Reader -->
-  <class name="TaskLog.Reader" extends="java.io.InputStream"
+  <!-- start class org.apache.hadoop.mapred.TaskReport -->
+  <class name="TaskReport" extends="org.apache.hadoop.mapreduce.TaskReport"
     abstract="false"
-    static="true" final="false" visibility="public"
+    static="false" final="false" visibility="public"
     deprecated="not deprecated">
-    <constructor name="TaskLog.Reader" type="org.apache.hadoop.mapred.TaskAttemptID, org.apache.hadoop.mapred.TaskLog.LogName, long, long, boolean"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Read a log file from start to end positions. The offsets may be negative,
- in which case they are relative to the end of the file. For example,
- Reader(taskid, kind, 0, -1) is the entire file and
- Reader(taskid, kind, -4197, -1) is the last 4196 bytes.
- @param taskid the id of the task to read the log file for
- @param kind the kind of log to read
- @param start the offset to read from (negative is relative to tail)
- @param end the offset to read upto (negative is relative to tail)
- @param isCleanup whether the attempt is cleanup attempt or not
- @throws IOException]]>
-      </doc>
-    </constructor>
-    <method name="read" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="read" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="buffer" type="byte[]"/>
-      <param name="offset" type="int"/>
-      <param name="length" type="int"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="available" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="close"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.TaskLog.Reader -->
-  <!-- start class org.apache.hadoop.mapred.TaskLogAppender -->
-  <class name="TaskLogAppender" extends="org.apache.log4j.FileAppender"
-    abstract="false"
-    static="false" final="fa

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/57] [abbrv] hadoop git commit: HDFS-9850. DiskBalancer: Explore removing references to FsVolumeSpi. Contributed by Manoj Govindassamy.

Posted by in...@apache.org.
HDFS-9850. DiskBalancer: Explore removing references to FsVolumeSpi. Contributed by Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03f519a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03f519a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03f519a7

Branch: refs/heads/HDFS-10467
Commit: 03f519a757ce83d76e7fc9f6aadf271e38bb9f6d
Parents: 6437ba1
Author: Anu Engineer <ae...@apache.org>
Authored: Tue Sep 27 21:35:06 2016 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Tue Sep 27 21:35:06 2016 -0700

----------------------------------------------------------------------
 .../hdfs/server/datanode/DiskBalancer.java      | 210 +++++++++++++------
 .../server/diskbalancer/TestDiskBalancer.java   | 156 ++++++++++++++
 2 files changed, 299 insertions(+), 67 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03f519a7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index d853ae9..e7e9105 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -22,6 +22,8 @@ import com.google.common.base.Preconditions;
 import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi
+    .FsVolumeReferences;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -41,6 +43,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.nio.charset.Charset;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.LinkedList;
@@ -192,7 +195,30 @@ public class DiskBalancer {
   }
 
   /**
-   * Returns the Current Work Status of a submitted Plan.
+   * Get FsVolume by volume UUID.
+   * @param fsDataset
+   * @param volUuid
+   * @return FsVolumeSpi
+   */
+  private static FsVolumeSpi getFsVolume(final FsDatasetSpi<?> fsDataset,
+      final String volUuid) {
+    FsVolumeSpi fsVolume = null;
+    try (FsVolumeReferences volumeReferences =
+           fsDataset.getFsVolumeReferences()) {
+      for (int i = 0; i < volumeReferences.size(); i++) {
+        if (volumeReferences.get(i).getStorageID().equals(volUuid)) {
+          fsVolume = volumeReferences.get(i);
+          break;
+        }
+      }
+    } catch (IOException e) {
+      LOG.warn("Disk Balancer - Error when closing volume references: ", e);
+    }
+    return fsVolume;
+  }
+
+  /**
+   * Returns the current work status of a previously submitted Plan.
    *
    * @return DiskBalancerWorkStatus.
    * @throws DiskBalancerException
@@ -214,8 +240,8 @@ public class DiskBalancer {
       for (Map.Entry<VolumePair, DiskBalancerWorkItem> entry :
           workMap.entrySet()) {
         DiskBalancerWorkEntry workEntry = new DiskBalancerWorkEntry(
-            entry.getKey().getSource().getBasePath(),
-            entry.getKey().getDest().getBasePath(),
+            entry.getKey().getSourceVolBasePath(),
+            entry.getKey().getDestVolBasePath(),
             entry.getValue());
         status.addWorkEntry(workEntry);
       }
@@ -269,12 +295,7 @@ public class DiskBalancer {
     lock.lock();
     try {
       checkDiskBalancerEnabled();
-      Map<String, String> pathMap = new HashMap<>();
-      Map<String, FsVolumeSpi> volMap = getStorageIDToVolumeMap();
-      for (Map.Entry<String, FsVolumeSpi> entry : volMap.entrySet()) {
-        pathMap.put(entry.getKey(), entry.getValue().getBasePath());
-      }
-      return JsonUtil.toJsonString(pathMap);
+      return JsonUtil.toJsonString(getStorageIDToVolumeBasePathMap());
     } catch (DiskBalancerException ex) {
       throw ex;
     } catch (IOException e) {
@@ -434,47 +455,52 @@ public class DiskBalancer {
 
     // Cleanup any residual work in the map.
     workMap.clear();
-    Map<String, FsVolumeSpi> pathMap = getStorageIDToVolumeMap();
+    Map<String, String> storageIDToVolBasePathMap =
+        getStorageIDToVolumeBasePathMap();
 
     for (Step step : plan.getVolumeSetPlans()) {
-      String sourceuuid = step.getSourceVolume().getUuid();
-      String destinationuuid = step.getDestinationVolume().getUuid();
-
-      FsVolumeSpi sourceVol = pathMap.get(sourceuuid);
-      if (sourceVol == null) {
-        LOG.error("Disk Balancer - Unable to find source volume. submitPlan " +
-            "failed.");
-        throw new DiskBalancerException("Unable to find source volume.",
+      String sourceVolUuid = step.getSourceVolume().getUuid();
+      String destVolUuid = step.getDestinationVolume().getUuid();
+
+      String sourceVolBasePath = storageIDToVolBasePathMap.get(sourceVolUuid);
+      if (sourceVolBasePath == null) {
+        final String errMsg = "Disk Balancer - Unable to find volume: "
+            + step.getSourceVolume().getPath() + ". SubmitPlan failed.";
+        LOG.error(errMsg);
+        throw new DiskBalancerException(errMsg,
             DiskBalancerException.Result.INVALID_VOLUME);
       }
 
-      FsVolumeSpi destVol = pathMap.get(destinationuuid);
-      if (destVol == null) {
-        LOG.error("Disk Balancer - Unable to find destination volume. " +
-            "submitPlan failed.");
-        throw new DiskBalancerException("Unable to find destination volume.",
+      String destVolBasePath = storageIDToVolBasePathMap.get(destVolUuid);
+      if (destVolBasePath == null) {
+        final String errMsg = "Disk Balancer - Unable to find volume: "
+            + step.getDestinationVolume().getPath() + ". SubmitPlan failed.";
+        LOG.error(errMsg);
+        throw new DiskBalancerException(errMsg,
             DiskBalancerException.Result.INVALID_VOLUME);
       }
-      createWorkPlan(sourceVol, destVol, step);
+      VolumePair volumePair = new VolumePair(sourceVolUuid,
+          sourceVolBasePath, destVolUuid, destVolBasePath);
+      createWorkPlan(volumePair, step);
     }
   }
 
   /**
-   * Returns a path to Volume Map.
+   * Returns volume UUID to volume base path map.
    *
    * @return Map
    * @throws DiskBalancerException
    */
-  private Map<String, FsVolumeSpi> getStorageIDToVolumeMap()
+  private Map<String, String> getStorageIDToVolumeBasePathMap()
       throws DiskBalancerException {
-    Map<String, FsVolumeSpi> pathMap = new HashMap<>();
+    Map<String, String> storageIDToVolBasePathMap = new HashMap<>();
     FsDatasetSpi.FsVolumeReferences references;
     try {
       try(AutoCloseableLock lock = this.dataset.acquireDatasetLock()) {
         references = this.dataset.getFsVolumeReferences();
         for (int ndx = 0; ndx < references.size(); ndx++) {
           FsVolumeSpi vol = references.get(ndx);
-          pathMap.put(vol.getStorageID(), vol);
+          storageIDToVolBasePathMap.put(vol.getStorageID(), vol.getBasePath());
         }
         references.close();
       }
@@ -483,7 +509,7 @@ public class DiskBalancer {
       throw new DiskBalancerException("Internal error", ex,
           DiskBalancerException.Result.INTERNAL_ERROR);
     }
-    return pathMap;
+    return storageIDToVolBasePathMap;
   }
 
   /**
@@ -513,26 +539,24 @@ public class DiskBalancer {
 
   /**
    * Insert work items to work map.
-   *
-   * @param source - Source vol
-   * @param dest   - destination volume
-   * @param step   - Move Step
+   * @param volumePair - VolumePair
+   * @param step - Move Step
    */
-  private void createWorkPlan(FsVolumeSpi source, FsVolumeSpi dest,
-                              Step step) throws DiskBalancerException {
-
-    if (source.getStorageID().equals(dest.getStorageID())) {
-      LOG.info("Disk Balancer - source & destination volumes are same.");
-      throw new DiskBalancerException("source and destination volumes are " +
-          "same.", DiskBalancerException.Result.INVALID_MOVE);
+  private void createWorkPlan(final VolumePair volumePair, Step step)
+      throws DiskBalancerException {
+    if (volumePair.getSourceVolUuid().equals(volumePair.getDestVolUuid())) {
+      final String errMsg = "Disk Balancer - Source and destination volumes " +
+          "are same: " + volumePair.getSourceVolUuid();
+      LOG.warn(errMsg);
+      throw new DiskBalancerException(errMsg,
+          DiskBalancerException.Result.INVALID_MOVE);
     }
-    VolumePair pair = new VolumePair(source, dest);
     long bytesToMove = step.getBytesToMove();
     // In case we have a plan with more than
-    // one line of same <source, dest>
+    // one line of same VolumePair
     // we compress that into one work order.
-    if (workMap.containsKey(pair)) {
-      bytesToMove += workMap.get(pair).getBytesToCopy();
+    if (workMap.containsKey(volumePair)) {
+      bytesToMove += workMap.get(volumePair).getBytesToCopy();
     }
 
     DiskBalancerWorkItem work = new DiskBalancerWorkItem(bytesToMove, 0);
@@ -542,7 +566,7 @@ public class DiskBalancer {
     work.setBandwidth(step.getBandwidth());
     work.setTolerancePercent(step.getTolerancePercent());
     work.setMaxDiskErrors(step.getMaxDiskErrors());
-    workMap.put(pair, work);
+    workMap.put(volumePair, work);
   }
 
   /**
@@ -591,39 +615,63 @@ public class DiskBalancer {
   }
 
   /**
-   * Holds references to actual volumes that we will be operating against.
+   * Holds source and dest volumes UUIDs and their BasePaths
+   * that disk balancer will be operating against.
    */
   public static class VolumePair {
-    private final FsVolumeSpi source;
-    private final FsVolumeSpi dest;
+    private final String sourceVolUuid;
+    private final String destVolUuid;
+    private final String sourceVolBasePath;
+    private final String destVolBasePath;
 
     /**
      * Constructs a volume pair.
+     * @param sourceVolUuid     - Source Volume
+     * @param sourceVolBasePath - Source Volume Base Path
+     * @param destVolUuid       - Destination Volume
+     * @param destVolBasePath   - Destination Volume Base Path
+     */
+    public VolumePair(final String sourceVolUuid,
+        final String sourceVolBasePath, final String destVolUuid,
+        final String destVolBasePath) {
+      this.sourceVolUuid = sourceVolUuid;
+      this.sourceVolBasePath = sourceVolBasePath;
+      this.destVolUuid = destVolUuid;
+      this.destVolBasePath = destVolBasePath;
+    }
+
+    /**
+     * Gets source volume UUID.
      *
-     * @param source - Source Volume
-     * @param dest   - Destination Volume
+     * @return UUID String
      */
-    public VolumePair(FsVolumeSpi source, FsVolumeSpi dest) {
-      this.source = source;
-      this.dest = dest;
+    public String getSourceVolUuid() {
+      return sourceVolUuid;
     }
 
     /**
-     * gets source volume.
+     * Gets source volume base path.
+     * @return String
+     */
+    public String getSourceVolBasePath() {
+      return sourceVolBasePath;
+    }
+    /**
+     * Gets destination volume UUID.
      *
-     * @return volume
+     * @return UUID String
      */
-    public FsVolumeSpi getSource() {
-      return source;
+    public String getDestVolUuid() {
+      return destVolUuid;
     }
 
     /**
-     * Gets Destination volume.
+     * Gets desitnation volume base path.
      *
-     * @return volume.
+     * @return String
      */
-    public FsVolumeSpi getDest() {
-      return dest;
+    public String getDestVolBasePath() {
+      return destVolBasePath;
     }
 
     @Override
@@ -636,13 +684,21 @@ public class DiskBalancer {
       }
 
       VolumePair that = (VolumePair) o;
-      return source.equals(that.source) && dest.equals(that.dest);
+      return sourceVolUuid.equals(that.sourceVolUuid)
+          && sourceVolBasePath.equals(that.sourceVolBasePath)
+          && destVolUuid.equals(that.destVolUuid)
+          && destVolBasePath.equals(that.destVolBasePath);
     }
 
     @Override
     public int hashCode() {
-      int result = source.getBasePath().hashCode();
-      result = 31 * result + dest.getBasePath().hashCode();
+      final int primeNum = 31;
+      final List<String> volumeStrList = Arrays.asList(sourceVolUuid,
+          sourceVolBasePath, destVolUuid, destVolBasePath);
+      int result = 1;
+      for (String str : volumeStrList) {
+        result = (result * primeNum) + str.hashCode();
+      }
       return result;
     }
   }
@@ -932,8 +988,28 @@ public class DiskBalancer {
      */
     @Override
     public void copyBlocks(VolumePair pair, DiskBalancerWorkItem item) {
-      FsVolumeSpi source = pair.getSource();
-      FsVolumeSpi dest = pair.getDest();
+      String sourceVolUuid = pair.getSourceVolUuid();
+      String destVolUuuid = pair.getDestVolUuid();
+
+      // When any of the DiskBalancerWorkItem volumes are not
+      // available, return after setting error in item.
+      FsVolumeSpi source = getFsVolume(this.dataset, sourceVolUuid);
+      if (source == null) {
+        final String errMsg = "Disk Balancer - Unable to find source volume: "
+            + pair.getDestVolBasePath();
+        LOG.error(errMsg);
+        item.setErrMsg(errMsg);
+        return;
+      }
+      FsVolumeSpi dest = getFsVolume(this.dataset, destVolUuuid);
+      if (dest == null) {
+        final String errMsg = "Disk Balancer - Unable to find dest volume: "
+            + pair.getDestVolBasePath();
+        LOG.error(errMsg);
+        item.setErrMsg(errMsg);
+        return;
+      }
+
       List<FsVolumeSpi.BlockIterator> poolIters = new LinkedList<>();
       startTime = Time.now();
       item.setStartTime(startTime);
@@ -977,7 +1053,7 @@ public class DiskBalancer {
             // we are not able to find any blocks to copy.
             if (block == null) {
               LOG.error("No source blocks, exiting the copy. Source: {}, " +
-                  "dest:{}", source.getBasePath(), dest.getBasePath());
+                  "Dest:{}", source.getBasePath(), dest.getBasePath());
               this.setExitFlag();
               continue;
             }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03f519a7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
index eb15bdc..d911e74 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
@@ -20,6 +20,7 @@ import com.google.common.base.Preconditions;
 import com.google.common.base.Supplier;
 import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.ReconfigurationException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
@@ -27,9 +28,15 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DiskBalancer;
+import org.apache.hadoop.hdfs.server.datanode.DiskBalancer.DiskBalancerMover;
+import org.apache.hadoop.hdfs.server.datanode.DiskBalancer.VolumePair;
+import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkItem;
 import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus;
+import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Result;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
@@ -41,18 +48,30 @@ import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
+import org.junit.Assert;
 import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
+import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doAnswer;
 
 /**
  * Test Disk Balancer.
@@ -186,6 +205,47 @@ public class TestDiskBalancer {
   }
 
   /**
+   * Test disk balancer behavior when one of the disks involved
+   * in balancing operation is removed after submitting the plan.
+   * @throws Exception
+   */
+  @Test
+  public void testDiskBalancerWhenRemovingVolumes() throws Exception {
+
+    Configuration conf = new HdfsConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
+
+    final int blockCount = 100;
+    final int blockSize = 1024;
+    final int diskCount = 2;
+    final int dataNodeCount = 1;
+    final int dataNodeIndex = 0;
+    final int sourceDiskIndex = 0;
+
+    MiniDFSCluster cluster = new ClusterBuilder()
+        .setBlockCount(blockCount)
+        .setBlockSize(blockSize)
+        .setDiskCount(diskCount)
+        .setNumDatanodes(dataNodeCount)
+        .setConf(conf)
+        .build();
+
+    try {
+      DataMover dataMover = new DataMover(cluster, dataNodeIndex,
+          sourceDiskIndex, conf, blockSize, blockCount);
+      dataMover.moveDataToSourceDisk();
+      NodePlan plan = dataMover.generatePlan();
+      dataMover.executePlanDuringDiskRemove(plan);
+      dataMover.verifyAllVolumesHaveData();
+      dataMover.verifyTolerance(plan, 0, sourceDiskIndex, 10);
+    } catch (Exception e) {
+      Assert.fail("Unexpected exception: " + e);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  /**
    * Sets alll Disks capacity to size specified.
    *
    * @param cluster - DiskBalancerCluster
@@ -446,6 +506,102 @@ public class TestDiskBalancer {
       }, 1000, 100000);
     }
 
+    public void executePlanDuringDiskRemove(NodePlan plan) throws
+        IOException, TimeoutException, InterruptedException {
+      CountDownLatch createWorkPlanLatch = new CountDownLatch(1);
+      CountDownLatch removeDiskLatch = new CountDownLatch(1);
+      AtomicInteger errorCount = new AtomicInteger(0);
+
+      LOG.info("FSDataSet: " + node.getFSDataset());
+      final FsDatasetSpi<?> fsDatasetSpy = Mockito.spy(node.getFSDataset());
+      doAnswer(new Answer<Object>() {
+          public Object answer(InvocationOnMock invocation) {
+            try {
+              node.getFSDataset().moveBlockAcrossVolumes(
+                  (ExtendedBlock)invocation.getArguments()[0],
+                  (FsVolumeSpi) invocation.getArguments()[1]);
+            } catch (Exception e) {
+              errorCount.incrementAndGet();
+            }
+            return null;
+          }
+        }).when(fsDatasetSpy).moveBlockAcrossVolumes(
+            any(ExtendedBlock.class), any(FsVolumeSpi.class));
+
+      DiskBalancerMover diskBalancerMover = new DiskBalancerMover(
+          fsDatasetSpy, conf);
+      diskBalancerMover.setRunnable();
+
+      DiskBalancerMover diskBalancerMoverSpy = Mockito.spy(diskBalancerMover);
+      doAnswer(new Answer<Object>() {
+          public Object answer(InvocationOnMock invocation) {
+            createWorkPlanLatch.countDown();
+            LOG.info("Waiting for the disk removal!");
+            try {
+              removeDiskLatch.await();
+            } catch (InterruptedException e) {
+              LOG.info("Encountered " + e);
+            }
+            LOG.info("Got disk removal notification, resuming copyBlocks!");
+            diskBalancerMover.copyBlocks((VolumePair)(invocation
+                .getArguments()[0]), (DiskBalancerWorkItem)(invocation
+                .getArguments()[1]));
+            return null;
+          }
+        }).when(diskBalancerMoverSpy).copyBlocks(
+            any(VolumePair.class), any(DiskBalancerWorkItem.class));
+
+      DiskBalancer diskBalancer = new DiskBalancer(node.getDatanodeUuid(),
+          conf, diskBalancerMoverSpy);
+
+      List<String> oldDirs = new ArrayList<String>(node.getConf().
+          getTrimmedStringCollection(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
+      final String newDirs = oldDirs.get(0);
+      LOG.info("Reconfigure newDirs:" + newDirs);
+      Thread reconfigThread = new Thread() {
+        public void run() {
+          try {
+            LOG.info("Waiting for work plan creation!");
+            createWorkPlanLatch.await();
+            LOG.info("Work plan created. Removing disk!");
+            assertThat(
+                "DN did not update its own config", node.
+                reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, newDirs),
+                is(node.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
+            Thread.sleep(1000);
+            LOG.info("Removed disk!");
+            removeDiskLatch.countDown();
+          } catch (ReconfigurationException | InterruptedException e) {
+            Assert.fail("Unexpected error while reconfiguring: " + e);
+          }
+        }
+      };
+      reconfigThread.start();
+
+      String planJson = plan.toJson();
+      String planID = DigestUtils.shaHex(planJson);
+      diskBalancer.submitPlan(planID, 1, PLAN_FILE, planJson, false);
+
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+        @Override
+        public Boolean get() {
+          try {
+            LOG.info("Work Status: " + diskBalancer.
+                queryWorkStatus().toJsonString());
+            Result result = diskBalancer.queryWorkStatus().getResult();
+            return (result == Result.PLAN_DONE);
+          } catch (IOException e) {
+            return false;
+          }
+        }
+      }, 1000, 100000);
+
+      assertTrue("Disk balancer operation hit max errors!", errorCount.get() <
+          DFSConfigKeys.DFS_DISK_BALANCER_MAX_DISK_ERRORS_DEFAULT);
+      createWorkPlanLatch.await();
+      removeDiskLatch.await();
+    }
+
     /**
      * Verifies the Plan Execution has been done.
      */


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/57] [abbrv] hadoop git commit: Revert "HADOOP-13584. hdoop-aliyun: merge HADOOP-12756 branch back" This reverts commit 5707f88d8550346f167e45c2f8c4161eb3957e3a

Posted by in...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/site/markdown/tools/hadoop-aliyun/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/site/markdown/tools/hadoop-aliyun/index.md b/hadoop-tools/hadoop-aliyun/src/site/markdown/tools/hadoop-aliyun/index.md
deleted file mode 100644
index 88c83b5..0000000
--- a/hadoop-tools/hadoop-aliyun/src/site/markdown/tools/hadoop-aliyun/index.md
+++ /dev/null
@@ -1,294 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-# Hadoop-Aliyun module: Integration with Aliyun Web Services
-
-<!-- MACRO{toc|fromDepth=0|toDepth=5} -->
-
-## Overview
-
-The `hadoop-aliyun` module provides support for Aliyun integration with
-[Aliyun Object Storage Service (Aliyun OSS)](https://www.aliyun.com/product/oss).
-The generated JAR file, `hadoop-aliyun.jar` also declares a transitive
-dependency on all external artifacts which are needed for this support \u2014 enabling
-downstream applications to easily use this support.
-
-To make it part of Apache Hadoop's default classpath, simply make sure
-that HADOOP_OPTIONAL_TOOLS in hadoop-env.sh has 'hadoop-aliyun' in the list.
-
-### Features
-
-* Read and write data stored in Aliyun OSS.
-* Present a hierarchical file system view by implementing the standard Hadoop
-[`FileSystem`](../api/org/apache/hadoop/fs/FileSystem.html) interface.
-* Can act as a source of data in a MapReduce job, or a sink.
-
-### Warning #1: Object Stores are not filesystems.
-
-Aliyun OSS is an example of "an object store". In order to achieve scalability
-and especially high availability, Aliyun OSS has relaxed some of the constraints
-which classic "POSIX" filesystems promise.
-
-
-
-Specifically
-
-1. Atomic operations: `delete()` and `rename()` are implemented by recursive
-file-by-file operations. They take time at least proportional to the number of files,
-during which time partial updates may be visible. `delete()` and `rename()`
-can not guarantee atomicity. If the operations are interrupted, the filesystem
-is left in an intermediate state.
-2. File owner and group are persisted, but the permissions model is not enforced.
-Authorization occurs at the level of the entire Aliyun account via
-[Aliyun Resource Access Management (Aliyun RAM)](https://www.aliyun.com/product/ram).
-3. Directory last access time is not tracked.
-4. The append operation is not supported.
-
-### Warning #2: Directory last access time is not tracked,
-features of Hadoop relying on this can have unexpected behaviour. E.g. the
-AggregatedLogDeletionService of YARN will not remove the appropriate logfiles.
-
-### Warning #3: Your Aliyun credentials are valuable
-
-Your Aliyun credentials not only pay for services, they offer read and write
-access to the data. Anyone with the account can not only read your datasets
-\u2014they can delete them.
-
-Do not inadvertently share these credentials through means such as
-1. Checking in to SCM any configuration files containing the secrets.
-2. Logging them to a console, as they invariably end up being seen.
-3. Defining filesystem URIs with the credentials in the URL, such as
-`oss://accessKeyId:accessKeySecret@directory/file`. They will end up in
-logs and error messages.
-4. Including the secrets in bug reports.
-
-If you do any of these: change your credentials immediately!
-
-### Warning #4: The Aliyun OSS client provided by Aliyun E-MapReduce are different from this implementation
-
-Specifically: on Aliyun E-MapReduce, `oss://` is also supported but with
-a different implementation. If you are using Aliyun E-MapReduce,
-follow these instructions \u2014and be aware that all issues related to Aliyun
-OSS integration in E-MapReduce can only be addressed by Aliyun themselves:
-please raise your issues with them.
-
-## OSS
-
-### Authentication properties
-
-    <property>
-      <name>fs.oss.accessKeyId</name>
-      <description>Aliyun access key ID</description>
-    </property>
-
-    <property>
-      <name>fs.oss.accessKeySecret</name>
-      <description>Aliyun access key secret</description>
-    </property>
-
-    <property>
-      <name>fs.oss.credentials.provider</name>
-      <description>
-        Class name of a credentials provider that implements
-        com.aliyun.oss.common.auth.CredentialsProvider. Omit if using access/secret keys
-        or another authentication mechanism. The specified class must provide an
-        accessible constructor accepting java.net.URI and
-        org.apache.hadoop.conf.Configuration, or an accessible default constructor.
-      </description>
-    </property>
-
-### Other properties
-
-    <property>
-      <name>fs.oss.endpoint</name>
-      <description>Aliyun OSS endpoint to connect to. An up-to-date list is
-        provided in the Aliyun OSS Documentation.
-       </description>
-    </property>
-
-    <property>
-      <name>fs.oss.proxy.host</name>
-      <description>Hostname of the (optinal) proxy server for Aliyun OSS connection</description>
-    </property>
-
-    <property>
-      <name>fs.oss.proxy.port</name>
-      <description>Proxy server port</description>
-    </property>
-
-    <property>
-      <name>fs.oss.proxy.username</name>
-      <description>Username for authenticating with proxy server</description>
-    </property>
-
-    <property>
-      <name>fs.oss.proxy.password</name>
-      <description>Password for authenticating with proxy server.</description>
-    </property>
-
-    <property>
-      <name>fs.oss.proxy.domain</name>
-      <description>Domain for authenticating with proxy server.</description>
-    </property>
-
-    <property>
-      <name>fs.oss.proxy.workstation</name>
-      <description>Workstation for authenticating with proxy server.</description>
-    </property>
-
-    <property>
-      <name>fs.oss.attempts.maximum</name>
-      <value>20</value>
-      <description>How many times we should retry commands on transient errors.</description>
-    </property>
-
-    <property>
-      <name>fs.oss.connection.establish.timeout</name>
-      <value>50000</value>
-      <description>Connection setup timeout in milliseconds.</description>
-    </property>
-
-    <property>
-      <name>fs.oss.connection.timeout</name>
-      <value>200000</value>
-      <description>Socket connection timeout in milliseconds.</description>
-    </property>
-
-    <property>
-      <name>fs.oss.paging.maximum</name>
-      <value>1000</value>
-      <description>How many keys to request from Aliyun OSS when doing directory listings at a time.
-      </description>
-    </property>
-
-    <property>
-      <name>fs.oss.multipart.upload.size</name>
-      <value>10485760</value>
-      <description>Size of each of multipart pieces in bytes.</description>
-    </property>
-
-    <property>
-      <name>fs.oss.multipart.upload.threshold</name>
-      <value>20971520</value>
-      <description>Minimum size in bytes before we start a multipart uploads or copy.</description>
-    </property>
-
-    <property>
-      <name>fs.oss.multipart.download.size</name>
-      <value>102400/value>
-      <description>Size in bytes in each request from ALiyun OSS.</description>
-    </property>
-
-    <property>
-      <name>fs.oss.buffer.dir</name>
-      <description>Comma separated list of directories to buffer OSS data before uploading to Aliyun OSS</description>
-    </property>
-
-    <property>
-      <name>fs.oss.acl.default</name>
-      <value></vaule>
-      <description>Set a canned ACL for bucket. Value may be private, public-read, public-read-write.
-      </description>
-    </property>
-
-    <property>
-      <name>fs.oss.server-side-encryption-algorithm</name>
-      <value></vaule>
-      <description>Specify a server-side encryption algorithm for oss: file system.
-         Unset by default, and the only other currently allowable value is AES256.
-      </description>
-    </property>
-
-    <property>
-      <name>fs.oss.connection.maximum</name>
-      <value>32</value>
-      <description>Number of simultaneous connections to oss.</description>
-    </property>
-
-    <property>
-      <name>fs.oss.connection.secure.enabled</name>
-      <value>true</value>
-      <description>Connect to oss over ssl or not, true by default.</description>
-    </property>
-
-## Testing the hadoop-aliyun Module
-
-To test `oss://` filesystem client, two files which pass in authentication
-details to the test runner are needed.
-
-1. `auth-keys.xml`
-2. `core-site.xml`
-
-Those two configuration files must be put into
-`hadoop-tools/hadoop-aliyun/src/test/resources`.
-
-### `core-site.xml`
-
-This file pre-exists and sources the configurations created in `auth-keys.xml`.
-
-For most cases, no modification is needed, unless a specific, non-default property
-needs to be set during the testing.
-
-### `auth-keys.xml`
-
-This file triggers the testing of Aliyun OSS module. Without this file,
-*none of the tests in this module will be executed*
-
-It contains the access key Id/secret and proxy information that are needed to
-connect to Aliyun OSS, and an OSS bucket URL should be also provided.
-
-1. `test.fs.oss.name` : the URL of the bucket for Aliyun OSS tests
-
-The contents of the bucket will be cleaned during the testing process, so
-do not use the bucket for any purpose other than testing.
-
-### Run Hadoop contract tests
-Create file `contract-test-options.xml` under `/test/resources`. If a
-specific file `fs.contract.test.fs.oss` test path is not defined, those
-tests will be skipped. Credentials are also needed to run any of those
-tests, they can be copied from `auth-keys.xml` or through direct
-XInclude inclusion. Here is an example of `contract-test-options.xml`:
-
-    <?xml version="1.0"?>
-    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-    <configuration>
-
-    <include xmlns="http://www.w3.org/2001/XInclude"
-    href="auth-keys.xml"/>
-
-      <property>
-        <name>fs.contract.test.fs.oss</name>
-        <value>oss://spark-tests</value>
-      </property>
-
-      <property>
-        <name>fs.oss.impl</name>
-        <value>org.apache.hadoop.fs.aliyun.AliyunOSSFileSystem</value>
-      </property>
-
-      <property>
-        <name>fs.oss.endpoint</name>
-        <value>oss-cn-hangzhou.aliyuncs.com</value>
-      </property>
-
-      <property>
-        <name>fs.oss.buffer.dir</name>
-        <value>/tmp/oss</value>
-      </property>
-
-      <property>
-        <name>fs.oss.multipart.download.size</name>
-        <value>102400</value>
-      </property>
-    </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSTestUtils.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSTestUtils.java
deleted file mode 100644
index 901cb2b..0000000
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSTestUtils.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.internal.AssumptionViolatedException;
-
-import java.io.IOException;
-import java.net.URI;
-
-/**
- * Utility class for Aliyun OSS Tests.
- */
-public final class AliyunOSSTestUtils {
-
-  private AliyunOSSTestUtils() {
-  }
-
-  /**
-   * Create the test filesystem.
-   *
-   * If the test.fs.oss.name property is not set,
-   * tests will fail.
-   *
-   * @param conf configuration
-   * @return the FS
-   * @throws IOException
-   */
-  public static AliyunOSSFileSystem createTestFileSystem(Configuration conf)
-      throws IOException {
-    String fsname = conf.getTrimmed(
-        TestAliyunOSSFileSystemContract.TEST_FS_OSS_NAME, "");
-
-    boolean liveTest = StringUtils.isNotEmpty(fsname);
-    URI testURI = null;
-    if (liveTest) {
-      testURI = URI.create(fsname);
-      liveTest = testURI.getScheme().equals(Constants.FS_OSS);
-    }
-
-    if (!liveTest) {
-      throw new AssumptionViolatedException("No test filesystem in "
-          + TestAliyunOSSFileSystemContract.TEST_FS_OSS_NAME);
-    }
-    AliyunOSSFileSystem ossfs = new AliyunOSSFileSystem();
-    ossfs.initialize(testURI, conf);
-    return ossfs;
-  }
-
-  /**
-   * Generate unique test path for multiple user tests.
-   *
-   * @return root test path
-   */
-  public static String generateUniqueTestPath() {
-    String testUniqueForkId = System.getProperty("test.unique.fork.id");
-    return testUniqueForkId == null ? "/test" :
-        "/" + testUniqueForkId + "/test";
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunCredentials.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunCredentials.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunCredentials.java
deleted file mode 100644
index e08a4dc..0000000
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunCredentials.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss;
-
-import com.aliyun.oss.common.auth.Credentials;
-import com.aliyun.oss.common.auth.InvalidCredentialsException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.aliyun.oss.contract.AliyunOSSContract;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.apache.hadoop.fs.contract.AbstractFSContractTestBase;
-import org.junit.Test;
-
-import java.io.IOException;
-
-import static org.apache.hadoop.fs.aliyun.oss.Constants.ACCESS_KEY_ID;
-import static org.apache.hadoop.fs.aliyun.oss.Constants.ACCESS_KEY_SECRET;
-import static org.apache.hadoop.fs.aliyun.oss.Constants.SECURITY_TOKEN;
-
-/**
- * Tests use of temporary credentials (for example, Aliyun STS & Aliyun OSS).
- * This test extends a class that "does things to the root directory", and
- * should only be used against transient filesystems where you don't care about
- * the data.
- */
-public class TestAliyunCredentials extends AbstractFSContractTestBase {
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new AliyunOSSContract(conf);
-  }
-
-  @Test
-  public void testCredentialMissingAccessKeyId() throws Throwable {
-    Configuration conf = new Configuration();
-    conf.set(ACCESS_KEY_ID, "");
-    conf.set(ACCESS_KEY_SECRET, "accessKeySecret");
-    conf.set(SECURITY_TOKEN, "token");
-    validateCredential(conf);
-  }
-
-  @Test
-  public void testCredentialMissingAccessKeySecret() throws Throwable {
-    Configuration conf = new Configuration();
-    conf.set(ACCESS_KEY_ID, "accessKeyId");
-    conf.set(ACCESS_KEY_SECRET, "");
-    conf.set(SECURITY_TOKEN, "token");
-    validateCredential(conf);
-  }
-
-  private void validateCredential(Configuration conf) {
-    try {
-      AliyunCredentialsProvider provider
-          = new AliyunCredentialsProvider(conf);
-      Credentials credentials = provider.getCredentials();
-      fail("Expected a CredentialInitializationException, got " + credentials);
-    } catch (InvalidCredentialsException expected) {
-      // expected
-    } catch (IOException e) {
-      fail("Unexpected exception.");
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
deleted file mode 100644
index ad8ef6e..0000000
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.FileSystemContractBaseTest;
-import org.apache.hadoop.fs.Path;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-
-/**
- * Tests a live Aliyun OSS system.
- *
- * This uses BlockJUnit4ClassRunner because FileSystemContractBaseTest from
- * TestCase which uses the old Junit3 runner that doesn't ignore assumptions
- * properly making it impossible to skip the tests if we don't have a valid
- * bucket.
- */
-public class TestAliyunOSSFileSystemContract
-    extends FileSystemContractBaseTest {
-  public static final String TEST_FS_OSS_NAME = "test.fs.oss.name";
-  private static String testRootPath =
-      AliyunOSSTestUtils.generateUniqueTestPath();
-
-  @Override
-  public void setUp() throws Exception {
-    Configuration conf = new Configuration();
-    fs = AliyunOSSTestUtils.createTestFileSystem(conf);
-    super.setUp();
-  }
-
-  @Override
-  public void tearDown() throws Exception {
-    if (fs != null) {
-      fs.delete(super.path(testRootPath), true);
-    }
-    super.tearDown();
-  }
-
-  @Override
-  protected Path path(String path) {
-    if (path.startsWith("/")) {
-      return super.path(testRootPath + path);
-    } else {
-      return super.path(testRootPath + "/" + path);
-    }
-  }
-
-  @Override
-  public void testMkdirsWithUmask() throws Exception {
-    // not supported
-  }
-
-  @Override
-  public void testRootDirAlwaysExists() throws Exception {
-    //this will throw an exception if the path is not found
-    fs.getFileStatus(super.path("/"));
-    //this catches overrides of the base exists() method that don't
-    //use getFileStatus() as an existence probe
-    assertTrue("FileSystem.exists() fails for root",
-        fs.exists(super.path("/")));
-  }
-
-  @Override
-  public void testRenameRootDirForbidden() throws Exception {
-    if (!renameSupported()) {
-      return;
-    }
-    rename(super.path("/"),
-           super.path("/test/newRootDir"),
-           false, true, false);
-  }
-
-  public void testDeleteSubdir() throws IOException {
-    Path parentDir = this.path("/test/hadoop");
-    Path file = this.path("/test/hadoop/file");
-    Path subdir = this.path("/test/hadoop/subdir");
-    this.createFile(file);
-
-    assertTrue("Created subdir", this.fs.mkdirs(subdir));
-    assertTrue("File exists", this.fs.exists(file));
-    assertTrue("Parent dir exists", this.fs.exists(parentDir));
-    assertTrue("Subdir exists", this.fs.exists(subdir));
-
-    assertTrue("Deleted subdir", this.fs.delete(subdir, true));
-    assertTrue("Parent should exist", this.fs.exists(parentDir));
-
-    assertTrue("Deleted file", this.fs.delete(file, false));
-    assertTrue("Parent should exist", this.fs.exists(parentDir));
-  }
-
-
-  @Override
-  protected boolean renameSupported() {
-    return true;
-  }
-
-  @Override
-  public void testRenameNonExistentPath() throws Exception {
-    if (this.renameSupported()) {
-      Path src = this.path("/test/hadoop/path");
-      Path dst = this.path("/test/new/newpath");
-      try {
-        super.rename(src, dst, false, false, false);
-        fail("Should throw FileNotFoundException!");
-      } catch (FileNotFoundException e) {
-        // expected
-      }
-    }
-  }
-
-  @Override
-  public void testRenameFileMoveToNonExistentDirectory() throws Exception {
-    if (this.renameSupported()) {
-      Path src = this.path("/test/hadoop/file");
-      this.createFile(src);
-      Path dst = this.path("/test/new/newfile");
-      try {
-        super.rename(src, dst, false, true, false);
-        fail("Should throw FileNotFoundException!");
-      } catch (FileNotFoundException e) {
-        // expected
-      }
-    }
-  }
-
-  @Override
-  public void testRenameDirectoryMoveToNonExistentDirectory() throws Exception {
-    if (this.renameSupported()) {
-      Path src = this.path("/test/hadoop/dir");
-      this.fs.mkdirs(src);
-      Path dst = this.path("/test/new/newdir");
-      try {
-        super.rename(src, dst, false, true, false);
-        fail("Should throw FileNotFoundException!");
-      } catch (FileNotFoundException e) {
-        // expected
-      }
-    }
-  }
-
-  @Override
-  public void testRenameFileMoveToExistingDirectory() throws Exception {
-    super.testRenameFileMoveToExistingDirectory();
-  }
-
-  @Override
-  public void testRenameFileAsExistingFile() throws Exception {
-    if (this.renameSupported()) {
-      Path src = this.path("/test/hadoop/file");
-      this.createFile(src);
-      Path dst = this.path("/test/new/newfile");
-      this.createFile(dst);
-      try {
-        super.rename(src, dst, false, true, true);
-        fail("Should throw FileAlreadyExistsException");
-      } catch (FileAlreadyExistsException e) {
-        // expected
-      }
-    }
-  }
-
-  @Override
-  public void testRenameDirectoryAsExistingFile() throws Exception {
-    if (this.renameSupported()) {
-      Path src = this.path("/test/hadoop/dir");
-      this.fs.mkdirs(src);
-      Path dst = this.path("/test/new/newfile");
-      this.createFile(dst);
-      try {
-        super.rename(src, dst, false, true, true);
-        fail("Should throw FileAlreadyExistsException");
-      } catch (FileAlreadyExistsException e) {
-        // expected
-      }
-    }
-  }
-
-  public void testGetFileStatusFileAndDirectory() throws Exception {
-    Path filePath = this.path("/test/oss/file1");
-    this.createFile(filePath);
-    assertTrue("Should be file", this.fs.getFileStatus(filePath).isFile());
-    assertFalse("Should not be directory",
-        this.fs.getFileStatus(filePath).isDirectory());
-
-    Path dirPath = this.path("/test/oss/dir");
-    this.fs.mkdirs(dirPath);
-    assertTrue("Should be directory",
-        this.fs.getFileStatus(dirPath).isDirectory());
-    assertFalse("Should not be file", this.fs.getFileStatus(dirPath).isFile());
-  }
-
-  public void testMkdirsForExistingFile() throws Exception {
-    Path testFile = this.path("/test/hadoop/file");
-    assertFalse(this.fs.exists(testFile));
-    this.createFile(testFile);
-    assertTrue(this.fs.exists(testFile));
-    try {
-      this.fs.mkdirs(testFile);
-      fail("Should throw FileAlreadyExistsException!");
-    } catch (FileAlreadyExistsException e) {
-      // expected
-    }
-  }
-
-  public void testWorkingDirectory() throws Exception {
-    Path workDir = super.path(this.getDefaultWorkingDirectory());
-    assertEquals(workDir, this.fs.getWorkingDirectory());
-    this.fs.setWorkingDirectory(super.path("."));
-    assertEquals(workDir, this.fs.getWorkingDirectory());
-    this.fs.setWorkingDirectory(super.path(".."));
-    assertEquals(workDir.getParent(), this.fs.getWorkingDirectory());
-    Path relativeDir = super.path("hadoop");
-    this.fs.setWorkingDirectory(relativeDir);
-    assertEquals(relativeDir, this.fs.getWorkingDirectory());
-    Path absoluteDir = super.path("/test/hadoop");
-    this.fs.setWorkingDirectory(absoluteDir);
-    assertEquals(absoluteDir, this.fs.getWorkingDirectory());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemStore.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemStore.java
deleted file mode 100644
index 7f4bac2..0000000
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemStore.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.URI;
-import java.security.DigestInputStream;
-import java.security.DigestOutputStream;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeNotNull;
-
-/**
- * Test the bridging logic between Hadoop's abstract filesystem and
- * Aliyun OSS.
- */
-public class TestAliyunOSSFileSystemStore {
-  private Configuration conf;
-  private AliyunOSSFileSystemStore store;
-  private AliyunOSSFileSystem fs;
-
-  @Before
-  public void setUp() throws Exception {
-    conf = new Configuration();
-    fs = new AliyunOSSFileSystem();
-    fs.initialize(URI.create(conf.get("test.fs.oss.name")), conf);
-    store = fs.getStore();
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    try {
-      store.purge("test");
-    } catch (Exception e) {
-      e.printStackTrace();
-      throw e;
-    }
-  }
-
-  @BeforeClass
-  public static void checkSettings() throws Exception {
-    Configuration conf = new Configuration();
-    assumeNotNull(conf.get(Constants.ACCESS_KEY_ID));
-    assumeNotNull(conf.get(Constants.ACCESS_KEY_SECRET));
-    assumeNotNull(conf.get("test.fs.oss.name"));
-  }
-
-  protected void writeRenameReadCompare(Path path, long len)
-      throws IOException, NoSuchAlgorithmException {
-    // If len > fs.oss.multipart.upload.threshold,
-    // we'll use a multipart upload copy
-    MessageDigest digest = MessageDigest.getInstance("MD5");
-    OutputStream out = new BufferedOutputStream(
-        new DigestOutputStream(fs.create(path, false), digest));
-    for (long i = 0; i < len; i++) {
-      out.write('Q');
-    }
-    out.flush();
-    out.close();
-
-    assertTrue("Exists", fs.exists(path));
-
-    Path copyPath = path.suffix(".copy");
-    fs.rename(path, copyPath);
-
-    assertTrue("Copy exists", fs.exists(copyPath));
-
-    // Download file from Aliyun OSS and compare the digest against the original
-    MessageDigest digest2 = MessageDigest.getInstance("MD5");
-    InputStream in = new BufferedInputStream(
-        new DigestInputStream(fs.open(copyPath), digest2));
-    long copyLen = 0;
-    while (in.read() != -1) {
-      copyLen++;
-    }
-    in.close();
-
-    assertEquals("Copy length matches original", len, copyLen);
-    assertArrayEquals("Digests match", digest.digest(), digest2.digest());
-  }
-
-  @Test
-  public void testSmallUpload() throws IOException, NoSuchAlgorithmException {
-    // Regular upload, regular copy
-    writeRenameReadCompare(new Path("/test/small"), 16384);
-  }
-
-  @Test
-  public void testLargeUpload()
-      throws IOException, NoSuchAlgorithmException {
-    // Multipart upload, multipart copy
-    writeRenameReadCompare(new Path("/test/xlarge"), 52428800L); // 50MB byte
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSInputStream.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSInputStream.java
deleted file mode 100644
index 37af28f..0000000
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSInputStream.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.hadoop.io.IOUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Random;
-
-import static org.junit.Assert.assertTrue;
-
-/**
- * Tests basic functionality for AliyunOSSInputStream, including seeking and
- * reading files.
- */
-public class TestAliyunOSSInputStream {
-
-  private FileSystem fs;
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestAliyunOSSInputStream.class);
-
-  private static String testRootPath =
-      AliyunOSSTestUtils.generateUniqueTestPath();
-
-  @Rule
-  public Timeout testTimeout = new Timeout(30 * 60 * 1000);
-
-  @Before
-  public void setUp() throws Exception {
-    Configuration conf = new Configuration();
-    fs = AliyunOSSTestUtils.createTestFileSystem(conf);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    if (fs != null) {
-      fs.delete(new Path(testRootPath), true);
-    }
-  }
-
-  private Path setPath(String path) {
-    if (path.startsWith("/")) {
-      return new Path(testRootPath + path);
-    } else {
-      return new Path(testRootPath + "/" + path);
-    }
-  }
-
-  @Test
-  public void testSeekFile() throws Exception {
-    Path smallSeekFile = setPath("/test/smallSeekFile.txt");
-    long size = 5 * 1024 * 1024;
-
-    ContractTestUtils.generateTestFile(this.fs, smallSeekFile, size, 256, 255);
-    LOG.info("5MB file created: smallSeekFile.txt");
-
-    FSDataInputStream instream = this.fs.open(smallSeekFile);
-    int seekTimes = 5;
-    LOG.info("multiple fold position seeking test...:");
-    for (int i = 0; i < seekTimes; i++) {
-      long pos = size / (seekTimes - i) - 1;
-      LOG.info("begin seeking for pos: " + pos);
-      instream.seek(pos);
-      assertTrue("expected position at:" + pos + ", but got:"
-          + instream.getPos(), instream.getPos() == pos);
-      LOG.info("completed seeking at pos: " + instream.getPos());
-    }
-    LOG.info("random position seeking test...:");
-    Random rand = new Random();
-    for (int i = 0; i < seekTimes; i++) {
-      long pos = Math.abs(rand.nextLong()) % size;
-      LOG.info("begin seeking for pos: " + pos);
-      instream.seek(pos);
-      assertTrue("expected position at:" + pos + ", but got:"
-          + instream.getPos(), instream.getPos() == pos);
-      LOG.info("completed seeking at pos: " + instream.getPos());
-    }
-    IOUtils.closeStream(instream);
-  }
-
-  @Test
-  public void testReadFile() throws Exception {
-    final int bufLen = 256;
-    final int sizeFlag = 5;
-    String filename = "readTestFile_" + sizeFlag + ".txt";
-    Path readTestFile = setPath("/test/" + filename);
-    long size = sizeFlag * 1024 * 1024;
-
-    ContractTestUtils.generateTestFile(this.fs, readTestFile, size, 256, 255);
-    LOG.info(sizeFlag + "MB file created: /test/" + filename);
-
-    FSDataInputStream instream = this.fs.open(readTestFile);
-    byte[] buf = new byte[bufLen];
-    long bytesRead = 0;
-    while (bytesRead < size) {
-      int bytes;
-      if (size - bytesRead < bufLen) {
-        int remaining = (int)(size - bytesRead);
-        bytes = instream.read(buf, 0, remaining);
-      } else {
-        bytes = instream.read(buf, 0, bufLen);
-      }
-      bytesRead += bytes;
-
-      if (bytesRead % (1024 * 1024) == 0) {
-        int available = instream.available();
-        int remaining = (int)(size - bytesRead);
-        assertTrue("expected remaining:" + remaining + ", but got:" + available,
-            remaining == available);
-        LOG.info("Bytes read: " + Math.round((double)bytesRead / (1024 * 1024))
-            + " MB");
-      }
-    }
-    assertTrue(instream.available() == 0);
-    IOUtils.closeStream(instream);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSOutputStream.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSOutputStream.java
deleted file mode 100644
index 6b87d9c..0000000
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSOutputStream.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-import java.io.IOException;
-
-/**
- * Tests regular and multi-part upload functionality for AliyunOSSOutputStream.
- */
-public class TestAliyunOSSOutputStream {
-  private FileSystem fs;
-  private static String testRootPath =
-      AliyunOSSTestUtils.generateUniqueTestPath();
-
-  @Rule
-  public Timeout testTimeout = new Timeout(30 * 60 * 1000);
-
-  @Before
-  public void setUp() throws Exception {
-    Configuration conf = new Configuration();
-    conf.setLong(Constants.MIN_MULTIPART_UPLOAD_THRESHOLD_KEY, 5 * 1024 * 1024);
-    conf.setInt(Constants.MULTIPART_UPLOAD_SIZE_KEY, 5 * 1024 * 1024);
-    fs = AliyunOSSTestUtils.createTestFileSystem(conf);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    if (fs != null) {
-      fs.delete(new Path(testRootPath), true);
-    }
-  }
-
-  protected Path getTestPath() {
-    return new Path(testRootPath + "/test-aliyun-oss");
-  }
-
-  @Test
-  public void testRegularUpload() throws IOException {
-    ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 1024 * 1024);
-  }
-
-  @Test
-  public void testMultiPartUpload() throws IOException {
-    ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 6 * 1024 * 1024);
-  }
-
-  @Test
-  public void testMultiPartUploadLimit() throws IOException {
-    long partSize1 = AliyunOSSUtils.calculatePartSize(10 * 1024, 100 * 1024);
-    assert(10 * 1024 / partSize1 < Constants.MULTIPART_UPLOAD_PART_NUM_LIMIT);
-
-    long partSize2 = AliyunOSSUtils.calculatePartSize(200 * 1024, 100 * 1024);
-    assert(200 * 1024 / partSize2 < Constants.MULTIPART_UPLOAD_PART_NUM_LIMIT);
-
-    long partSize3 = AliyunOSSUtils.calculatePartSize(10000 * 100 * 1024,
-        100 * 1024);
-    assert(10000 * 100 * 1024 / partSize3
-        < Constants.MULTIPART_UPLOAD_PART_NUM_LIMIT);
-
-    long partSize4 = AliyunOSSUtils.calculatePartSize(10001 * 100 * 1024,
-        100 * 1024);
-    assert(10001 * 100 * 1024 / partSize4
-        < Constants.MULTIPART_UPLOAD_PART_NUM_LIMIT);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/AliyunOSSContract.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/AliyunOSSContract.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/AliyunOSSContract.java
deleted file mode 100644
index 624c606..0000000
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/AliyunOSSContract.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
-
-/**
- * The contract of Aliyun OSS: only enabled if the test bucket is provided.
- */
-public class AliyunOSSContract extends AbstractBondedFSContract {
-
-  public static final String CONTRACT_XML = "contract/aliyun-oss.xml";
-
-  public AliyunOSSContract(Configuration conf) {
-    super(conf);
-    //insert the base features
-    addConfResource(CONTRACT_XML);
-  }
-
-  @Override
-  public String getScheme() {
-    return "oss";
-  }
-
-  @Override
-  public Path getTestPath() {
-    String testUniqueForkId = System.getProperty("test.unique.fork.id");
-    return testUniqueForkId == null ? super.getTestPath() :
-        new Path("/" + testUniqueForkId, "test");
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractCreate.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractCreate.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractCreate.java
deleted file mode 100644
index 88dd8cd..0000000
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractCreate.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-/**
- * Aliyun OSS contract creating tests.
- */
-public class TestAliyunOSSContractCreate extends AbstractContractCreateTest {
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new AliyunOSSContract(conf);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractDelete.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractDelete.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractDelete.java
deleted file mode 100644
index 1658d80..0000000
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractDelete.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-/**
- * Aliyun OSS contract deleting tests.
- */
-public class TestAliyunOSSContractDelete extends AbstractContractDeleteTest {
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new AliyunOSSContract(conf);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractDistCp.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractDistCp.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractDistCp.java
deleted file mode 100644
index 18d09d5..0000000
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractDistCp.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.aliyun.oss.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.tools.contract.AbstractContractDistCpTest;
-
-import static org.apache.hadoop.fs.aliyun.oss.Constants.*;
-
-/**
- * Contract test suite covering Aliyun OSS integration with DistCp.
- */
-public class TestAliyunOSSContractDistCp extends AbstractContractDistCpTest {
-
-  private static final long MULTIPART_SETTING = 8 * 1024 * 1024; // 8 MB
-
-  @Override
-  protected Configuration createConfiguration() {
-    Configuration newConf = super.createConfiguration();
-    newConf.setLong(MIN_MULTIPART_UPLOAD_THRESHOLD_KEY, MULTIPART_SETTING);
-    newConf.setLong(MULTIPART_UPLOAD_SIZE_KEY, MULTIPART_SETTING);
-    return newConf;
-  }
-
-  @Override
-  protected AliyunOSSContract createContract(Configuration conf) {
-    return new AliyunOSSContract(conf);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractGetFileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractGetFileStatus.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractGetFileStatus.java
deleted file mode 100644
index c69124d..0000000
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractGetFileStatus.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.aliyun.oss.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-/**
- * Test getFileStatus and related listing operations.
- */
-public class TestAliyunOSSContractGetFileStatus
-    extends AbstractContractGetFileStatusTest {
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new AliyunOSSContract(conf);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractMkdir.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractMkdir.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractMkdir.java
deleted file mode 100644
index 6cb7549..0000000
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractMkdir.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-/**
- * Aliyun OSS contract directory tests.
- */
-public class TestAliyunOSSContractMkdir extends AbstractContractMkdirTest {
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new AliyunOSSContract(conf);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractOpen.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractOpen.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractOpen.java
deleted file mode 100644
index 099aba6..0000000
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractOpen.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-/**
- * Aliyun OSS contract opening file tests.
- */
-public class TestAliyunOSSContractOpen extends AbstractContractOpenTest {
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new AliyunOSSContract(conf);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractRename.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractRename.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractRename.java
deleted file mode 100644
index e15b3ba..0000000
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractRename.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-/**
- * Aliyun OSS contract renaming tests.
- */
-public class TestAliyunOSSContractRename extends AbstractContractRenameTest {
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new AliyunOSSContract(conf);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractRootDir.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractRootDir.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractRootDir.java
deleted file mode 100644
index 9faae37..0000000
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractRootDir.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.fs.aliyun.oss.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-
-/**
- * Root dir operations against an Aliyun OSS bucket.
- */
-public class TestAliyunOSSContractRootDir extends
-    AbstractContractRootDirectoryTest {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestAliyunOSSContractRootDir.class);
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new AliyunOSSContract(conf);
-  }
-
-  @Override
-  public void testListEmptyRootDirectory() throws IOException {
-    for (int attempt = 1, maxAttempts = 10; attempt <= maxAttempts; ++attempt) {
-      try {
-        super.testListEmptyRootDirectory();
-        break;
-      } catch (AssertionError | FileNotFoundException e) {
-        if (attempt < maxAttempts) {
-          LOG.info("Attempt {} of {} for empty root directory test failed.  "
-              + "Attempting retry.", attempt, maxAttempts);
-          try {
-            Thread.sleep(1000);
-          } catch (InterruptedException e2) {
-            Thread.currentThread().interrupt();
-            fail("Test interrupted.");
-            break;
-          }
-        } else {
-          LOG.error(
-              "Empty root directory test failed {} attempts.  Failing test.",
-              maxAttempts);
-          throw e;
-        }
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractSeek.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractSeek.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractSeek.java
deleted file mode 100644
index b247ab1..0000000
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractSeek.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss.contract;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-/**
- * Aliyun OSS contract seeking tests.
- */
-public class TestAliyunOSSContractSeek extends AbstractContractSeekTest {
-
-  @Override
-  protected AbstractFSContract createContract(Configuration conf) {
-    return new AliyunOSSContract(conf);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/test/resources/contract/aliyun-oss.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/resources/contract/aliyun-oss.xml b/hadoop-tools/hadoop-aliyun/src/test/resources/contract/aliyun-oss.xml
deleted file mode 100644
index 7bbbf46..0000000
--- a/hadoop-tools/hadoop-aliyun/src/test/resources/contract/aliyun-oss.xml
+++ /dev/null
@@ -1,115 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one
-  ~  or more contributor license agreements.  See the NOTICE file
-  ~  distributed with this work for additional information
-  ~  regarding copyright ownership.  The ASF licenses this file
-  ~  to you under the Apache License, Version 2.0 (the
-  ~  "License"); you may not use this file except in compliance
-  ~  with the License.  You may obtain a copy of the License at
-  ~
-  ~       http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~  Unless required by applicable law or agreed to in writing, software
-  ~  distributed under the License is distributed on an "AS IS" BASIS,
-  ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~  See the License for the specific language governing permissions and
-  ~  limitations under the License.
-  -->
-<configuration>
-    <property>
-        <name>fs.contract.test.random-seek-count</name>
-        <value>10</value>
-    </property>
-
-    <property>
-        <name>fs.contract.is-blobstore</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.contract.is-case-sensitive</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.contract.rename-returns-false-if-source-missing</name>
-        <value>false</value>
-    </property>
-
-    <property>
-        <name>fs.contract.rename-remove-dest-if-empty-dir</name>
-        <value>false</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-append</name>
-        <value>false</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-atomic-directory-delete</name>
-        <value>false</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-atomic-rename</name>
-        <value>false</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-block-locality</name>
-        <value>false</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-concat</name>
-        <value>false</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-seek</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-seek-on-closed-file</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.contract.rejects-seek-past-eof</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-strict-exceptions</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-unix-permissions</name>
-        <value>false</value>
-    </property>
-
-    <property>
-        <name>fs.contract.rename-overwrites-dest</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.contract.test.root-tests-enabled</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.contract.supports-getfilestatus</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.oss.multipart.download.size</name>
-        <value>102400</value>
-    </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/test/resources/core-site.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/resources/core-site.xml b/hadoop-tools/hadoop-aliyun/src/test/resources/core-site.xml
deleted file mode 100644
index fa4118c..0000000
--- a/hadoop-tools/hadoop-aliyun/src/test/resources/core-site.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one
-  ~  or more contributor license agreements.  See the NOTICE file
-  ~  distributed with this work for additional information
-  ~  regarding copyright ownership.  The ASF licenses this file
-  ~  to you under the Apache License, Version 2.0 (the
-  ~  "License"); you may not use this file except in compliance
-  ~  with the License.  You may obtain a copy of the License at
-  ~
-  ~       http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~  Unless required by applicable law or agreed to in writing, software
-  ~  distributed under the License is distributed on an "AS IS" BASIS,
-  ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~  See the License for the specific language governing permissions and
-  ~  limitations under the License.
-  -->
-<configuration>
-
-    <property>
-        <name>hadoop.tmp.dir</name>
-        <value>target/build/test</value>
-        <description>A base for other temporary directories.</description>
-        <final>true</final>
-    </property>
-
-    <!-- Turn security off for tests by default -->
-    <property>
-        <name>hadoop.security.authentication</name>
-        <value>simple</value>
-    </property>
-
-    <!--
-    To run these tests.
-
-    # Create a file auth-keys.xml  - DO NOT ADD TO REVISION CONTROL
-    # add the property test.fs.oss.name to point to an OSS filesystem URL
-    # Add the credentials for the service you are testing against
-    -->
-    <include xmlns="http://www.w3.org/2001/XInclude" href="auth-keys.xml">
-        <fallback/>
-    </include>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-aliyun/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/resources/log4j.properties b/hadoop-tools/hadoop-aliyun/src/test/resources/log4j.properties
deleted file mode 100644
index bb5cbe5..0000000
--- a/hadoop-tools/hadoop-aliyun/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#   Licensed to the Apache Software Foundation (ASF) under one or more
-#   contributor license agreements.  See the NOTICE file distributed with
-#   this work for additional information regarding copyright ownership.
-#   The ASF licenses this file to You under the Apache License, Version 2.0
-#   (the "License"); you may not use this file except in compliance with
-#   the License.  You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-#
-# log4j configuration used during build and unit tests
-
-log4j.rootLogger=INFO,stdout
-log4j.threshold=ALL
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/hadoop-tools-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml
index 14fa9f0..899a945 100644
--- a/hadoop-tools/hadoop-tools-dist/pom.xml
+++ b/hadoop-tools/hadoop-tools-dist/pom.xml
@@ -102,12 +102,6 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-aliyun</artifactId>
-      <scope>compile</scope>
-      <version>${project.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-sls</artifactId>
       <scope>compile</scope>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1443988/hadoop-tools/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml
index e7e876b..db002f4 100644
--- a/hadoop-tools/pom.xml
+++ b/hadoop-tools/pom.xml
@@ -47,7 +47,6 @@
     <module>hadoop-aws</module>
     <module>hadoop-kafka</module>
     <module>hadoop-azure-datalake</module>
-    <module>hadoop-aliyun</module>
   </modules>
 
   <build>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/57] [abbrv] hadoop git commit: HDFS-10824. MiniDFSCluster#storageCapacities has no effects on real capacity. Contributed by Xiaobing Zhou.

Posted by in...@apache.org.
HDFS-10824. MiniDFSCluster#storageCapacities has no effects on real capacity. Contributed by Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c3b235e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c3b235e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c3b235e5

Branch: refs/heads/HDFS-10467
Commit: c3b235e56597d55387b4003e376faee10b473d55
Parents: e19b37e
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed Sep 28 11:47:37 2016 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Wed Sep 28 11:47:37 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  | 103 ++++++++++++----
 .../apache/hadoop/hdfs/TestMiniDFSCluster.java  | 119 +++++++++++++++++++
 2 files changed, 199 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3b235e5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 3bb3a10..cf02a8d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -56,6 +56,7 @@ import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
@@ -547,6 +548,8 @@ public class MiniDFSCluster implements AutoCloseable {
   protected final int storagesPerDatanode;
   private Set<FileSystem> fileSystems = Sets.newHashSet();
 
+  private List<long[]> storageCap = Lists.newLinkedList();
+
   /**
    * A unique instance identifier for the cluster. This
    * is used to disambiguate HA filesystems in the case where
@@ -1648,31 +1651,64 @@ public class MiniDFSCluster implements AutoCloseable {
     }
     this.numDataNodes += numDataNodes;
     waitActive();
-    
+
+    setDataNodeStorageCapacities(
+        curDatanodesNum,
+        numDataNodes,
+        dns,
+        storageCapacities);
+
+    /* memorize storage capacities */
+    if (storageCapacities != null) {
+      storageCap.addAll(Arrays.asList(storageCapacities));
+    }
+  }
+
+  private synchronized void setDataNodeStorageCapacities(
+      final int curDatanodesNum,
+      final int numDNs,
+      final DataNode[] dns,
+      long[][] storageCapacities) throws IOException {
     if (storageCapacities != null) {
-      for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; ++i) {
+      for (int i = curDatanodesNum; i < curDatanodesNum + numDNs; ++i) {
         final int index = i - curDatanodesNum;
-        try (FsDatasetSpi.FsVolumeReferences volumes =
-            dns[index].getFSDataset().getFsVolumeReferences()) {
-          assert storageCapacities[index].length == storagesPerDatanode;
-          assert volumes.size() == storagesPerDatanode;
-
-          int j = 0;
-          for (FsVolumeSpi fvs : volumes) {
-            FsVolumeImpl volume = (FsVolumeImpl) fvs;
-            LOG.info("setCapacityForTesting " + storageCapacities[index][j]
-                + " for [" + volume.getStorageType() + "]" + volume
-                .getStorageID());
-            volume.setCapacityForTesting(storageCapacities[index][j]);
-            j++;
-          }
-        }
+        setDataNodeStorageCapacities(index, dns[index], storageCapacities);
       }
     }
   }
-  
-  
-  
+
+  private synchronized void setDataNodeStorageCapacities(
+      final int curDnIdx,
+      final DataNode curDn,
+      long[][] storageCapacities) throws IOException {
+
+    if (storageCapacities == null || storageCapacities.length == 0) {
+      return;
+    }
+
+    try {
+      waitDataNodeFullyStarted(curDn);
+    } catch (TimeoutException | InterruptedException e) {
+      throw new IOException(e);
+    }
+
+    try (FsDatasetSpi.FsVolumeReferences volumes = curDn.getFSDataset()
+        .getFsVolumeReferences()) {
+      assert storageCapacities[curDnIdx].length == storagesPerDatanode;
+      assert volumes.size() == storagesPerDatanode;
+
+      int j = 0;
+      for (FsVolumeSpi fvs : volumes) {
+        FsVolumeImpl volume = (FsVolumeImpl) fvs;
+        LOG.info("setCapacityForTesting " + storageCapacities[curDnIdx][j]
+            + " for [" + volume.getStorageType() + "]" + volume.getStorageID());
+        volume.setCapacityForTesting(storageCapacities[curDnIdx][j]);
+        j++;
+      }
+    }
+    DataNodeTestUtils.triggerHeartbeat(curDn);
+  }
+
   /**
    * Modify the config and start up the DataNodes.  The info port for
    * DataNodes is guaranteed to use a free port.
@@ -2236,6 +2272,16 @@ public class MiniDFSCluster implements AutoCloseable {
     return restartDataNode(dnprop, false);
   }
 
+  private void waitDataNodeFullyStarted(final DataNode dn)
+      throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return dn.isDatanodeFullyStarted();
+      }
+    }, 100, 60000);
+  }
+
   /**
    * Restart a datanode, on the same port if requested
    * @param dnprop the datanode to restart
@@ -2256,10 +2302,21 @@ public class MiniDFSCluster implements AutoCloseable {
       conf.set(DFS_DATANODE_IPC_ADDRESS_KEY,
           addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort); 
     }
-    DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
-    dataNodes.add(new DataNodeProperties(
-        newDn, newconf, args, secureResources, newDn.getIpcPort()));
+    final DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
+
+    final DataNodeProperties dnp = new DataNodeProperties(
+        newDn,
+        newconf,
+        args,
+        secureResources,
+        newDn.getIpcPort());
+    dataNodes.add(dnp);
     numDataNodes++;
+
+    setDataNodeStorageCapacities(
+        dataNodes.lastIndexOf(dnp),
+        newDn,
+        storageCap.toArray(new long[][]{}));
     return true;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3b235e5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
index 4d027dc..3d4cc72 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
@@ -25,16 +25,25 @@ import static org.junit.Assume.assumeTrue;
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Random;
+import java.util.UUID;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
 import org.apache.hadoop.test.PathUtils;
 import org.junit.Before;
 import org.junit.Test;
 
+import com.google.common.base.Preconditions;
+
 /**
  * Tests MiniDFS cluster setup/teardown and isolation.
  * Every instance is brought up with a new data dir, to ensure that
@@ -78,6 +87,116 @@ public class TestMiniDFSCluster {
     }
   }
 
+  /**
+   * Tests storage capacity setting still effective after cluster restart.
+   */
+  @Test(timeout=100000)
+  public void testClusterSetStorageCapacity() throws Throwable {
+
+    final Configuration conf = new HdfsConfiguration();
+    final int numDatanodes = 1;
+    final int defaultBlockSize = 1024;
+    final int blocks = 100;
+    final int blocksSize = 1024;
+    final int fileLen = blocks * blocksSize;
+    final long capcacity = defaultBlockSize * 2 * fileLen;
+    final long[] capacities = new long[] {capcacity, 2 * capcacity};
+
+    final MiniDFSCluster cluster = newCluster(
+            conf,
+            numDatanodes,
+            capacities,
+            defaultBlockSize,
+            fileLen);
+    verifyStorageCapacity(cluster, capacities);
+
+    /* restart all data nodes */
+    cluster.restartDataNodes();
+    cluster.waitActive();
+    verifyStorageCapacity(cluster, capacities);
+
+    /* restart all name nodes */
+    cluster.restartNameNodes();
+    cluster.waitActive();
+    verifyStorageCapacity(cluster, capacities);
+
+    /* restart all name nodes firstly and data nodes then */
+    cluster.restartNameNodes();
+    cluster.restartDataNodes();
+    cluster.waitActive();
+    verifyStorageCapacity(cluster, capacities);
+
+    /* restart all data nodes firstly and name nodes then */
+    cluster.restartDataNodes();
+    cluster.restartNameNodes();
+    cluster.waitActive();
+    verifyStorageCapacity(cluster, capacities);
+  }
+
+  private void verifyStorageCapacity(
+      final MiniDFSCluster cluster,
+      final long[] capacities) throws IOException {
+
+    FsVolumeImpl source = null;
+    FsVolumeImpl dest = null;
+
+    /* verify capacity */
+    for (int i = 0; i < cluster.getDataNodes().size(); i++) {
+      final DataNode dnNode = cluster.getDataNodes().get(i);
+      try (FsDatasetSpi.FsVolumeReferences refs = dnNode.getFSDataset()
+          .getFsVolumeReferences()) {
+        source = (FsVolumeImpl) refs.get(0);
+        dest = (FsVolumeImpl) refs.get(1);
+        assertEquals(capacities[0], source.getCapacity());
+        assertEquals(capacities[1], dest.getCapacity());
+      }
+    }
+  }
+
+  private MiniDFSCluster newCluster(
+      final Configuration conf,
+      final int numDatanodes,
+      final long[] storageCapacities,
+      final int defaultBlockSize,
+      final int fileLen)
+      throws IOException, InterruptedException, TimeoutException {
+
+    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
+    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
+
+    final String fileName = "/" + UUID.randomUUID().toString();
+    final Path filePath = new Path(fileName);
+
+    Preconditions.checkNotNull(storageCapacities);
+    Preconditions.checkArgument(
+        storageCapacities.length == 2,
+        "need to specify capacities for two storages.");
+
+    /* Write a file and restart the cluster */
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(numDatanodes)
+        .storageCapacities(storageCapacities)
+        .storageTypes(new StorageType[]{StorageType.DISK, StorageType.DISK})
+        .storagesPerDatanode(2)
+        .build();
+    cluster.waitActive();
+
+    final short replicationFactor = (short) 1;
+    final Random r = new Random();
+    FileSystem fs = cluster.getFileSystem(0);
+    DFSTestUtil.createFile(
+        fs,
+        filePath,
+        fileLen,
+        replicationFactor,
+        r.nextLong());
+    DFSTestUtil.waitReplication(fs, filePath, replicationFactor);
+
+    return cluster;
+  }
+
   @Test(timeout=100000)
   public void testIsClusterUpAfterShutdown() throws Throwable {
     Configuration conf = new HdfsConfiguration();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/57] [abbrv] hadoop git commit: HADOOP-13599. s3a close() to be non-synchronized, so avoid risk of deadlock on shutdown. Contributed by Steve Loughran.

Posted by in...@apache.org.
HADOOP-13599. s3a close() to be non-synchronized, so avoid risk of deadlock on shutdown. Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47f80922
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47f80922
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47f80922

Branch: refs/heads/HDFS-10467
Commit: 47f80922dc7cb2fa6d084e6fb1f354c4ec1d4c69
Parents: 84c6264
Author: Chris Nauroth <cn...@apache.org>
Authored: Wed Sep 28 15:53:17 2016 -0700
Committer: Chris Nauroth <cn...@apache.org>
Committed: Wed Sep 28 16:19:32 2016 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java    | 8 +++++++-
 .../java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java | 8 ++++++++
 2 files changed, 15 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f80922/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 6a6c26b..dffef15 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -31,6 +31,7 @@ import java.util.Map;
 import java.util.concurrent.ExecutorService;
 import java.util.Objects;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import com.amazonaws.AmazonClientException;
 import com.amazonaws.AmazonServiceException;
@@ -121,6 +122,7 @@ public class S3AFileSystem extends FileSystem {
   private S3AStorageStatistics storageStatistics;
   private long readAhead;
   private S3AInputPolicy inputPolicy;
+  private final AtomicBoolean closed = new AtomicBoolean(false);
 
   // The maximum number of entries that can be deleted in any call to s3
   private static final int MAX_ENTRIES_TO_DELETE = 1000;
@@ -1414,7 +1416,11 @@ public class S3AFileSystem extends FileSystem {
    * @throws IOException IO problem
    */
   @Override
-  public synchronized void close() throws IOException {
+  public void close() throws IOException {
+    if (closed.getAndSet(true)) {
+      // already closed
+      return;
+    }
     try {
       super.close();
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f80922/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
index fca8e49..b08bfe9 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
@@ -409,6 +409,14 @@ public class ITestS3AConfiguration {
         awsConf.getUserAgent());
   }
 
+  @Test
+  public void testCloseIdempotent() throws Throwable {
+    conf = new Configuration();
+    fs = S3ATestUtils.createTestFileSystem(conf);
+    fs.close();
+    fs.close();
+  }
+
   /**
    * Reads and returns a field from an object using reflection.  If the field
    * cannot be found, is null, or is not the expected type, then this method


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/57] [abbrv] hadoop git commit: MAPREDUCE-6771. RMContainerAllocator sends container diagnostics event after corresponding completion event. Contributed by Haibo Chen

Posted by in...@apache.org.
MAPREDUCE-6771. RMContainerAllocator sends container diagnostics event after corresponding completion event. Contributed by Haibo Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1b8251b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1b8251b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1b8251b

Branch: refs/heads/HDFS-10467
Commit: a1b8251bf7a7e9b776c4483fa01f7d453420eba4
Parents: 2ae5a3a
Author: Jason Lowe <jl...@apache.org>
Authored: Thu Sep 29 15:27:17 2016 +0000
Committer: Jason Lowe <jl...@apache.org>
Committed: Thu Sep 29 15:27:17 2016 +0000

----------------------------------------------------------------------
 .../v2/app/rm/RMContainerAllocator.java         | 51 ++++++++++++--------
 .../v2/app/rm/TestRMContainerAllocator.java     | 46 ++++++++++++++++++
 2 files changed, 77 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1b8251b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
index ecd75db..4cb3cbe 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
@@ -150,7 +150,7 @@ public class RMContainerAllocator extends RMContainerRequestor
     new LinkedList<ContainerRequest>();
 
   //holds information about the assigned containers to task attempts
-  private final AssignedRequests assignedRequests = new AssignedRequests();
+  private final AssignedRequests assignedRequests;
   
   //holds scheduled requests to be fulfilled by RM
   private final ScheduledRequests scheduledRequests = new ScheduledRequests();
@@ -200,6 +200,11 @@ public class RMContainerAllocator extends RMContainerRequestor
     this.preemptionPolicy = preemptionPolicy;
     this.stopped = new AtomicBoolean(false);
     this.clock = context.getClock();
+    this.assignedRequests = createAssignedRequests();
+  }
+
+  protected AssignedRequests createAssignedRequests() {
+    return new AssignedRequests();
   }
 
   @Override
@@ -833,29 +838,35 @@ public class RMContainerAllocator extends RMContainerRequestor
     }
 
     for (ContainerStatus cont : finishedContainers) {
-      LOG.info("Received completed container " + cont.getContainerId());
-      TaskAttemptId attemptID = assignedRequests.get(cont.getContainerId());
-      if (attemptID == null) {
-        LOG.error("Container complete event for unknown container id "
-            + cont.getContainerId());
-      } else {
-        pendingRelease.remove(cont.getContainerId());
-        assignedRequests.remove(attemptID);
-        
-        // send the container completed event to Task attempt
-        eventHandler.handle(createContainerFinishedEvent(cont, attemptID));
-        
-        // Send the diagnostics
-        String diagnostics = StringInterner.weakIntern(cont.getDiagnostics());
-        eventHandler.handle(new TaskAttemptDiagnosticsUpdateEvent(attemptID,
-            diagnostics));
-
-        preemptionPolicy.handleCompletedContainer(attemptID);
-      }
+      processFinishedContainer(cont);
     }
     return newContainers;
   }
 
+  @SuppressWarnings("unchecked")
+  @VisibleForTesting
+  void processFinishedContainer(ContainerStatus container) {
+    LOG.info("Received completed container " + container.getContainerId());
+    TaskAttemptId attemptID = assignedRequests.get(container.getContainerId());
+    if (attemptID == null) {
+      LOG.error("Container complete event for unknown container "
+          + container.getContainerId());
+    } else {
+      pendingRelease.remove(container.getContainerId());
+      assignedRequests.remove(attemptID);
+
+      // Send the diagnostics
+      String diagnostic = StringInterner.weakIntern(container.getDiagnostics());
+      eventHandler.handle(new TaskAttemptDiagnosticsUpdateEvent(attemptID,
+          diagnostic));
+
+      // send the container completed event to Task attempt
+      eventHandler.handle(createContainerFinishedEvent(container, attemptID));
+
+      preemptionPolicy.handleCompletedContainer(attemptID);
+    }
+  }
+
   private void applyConcurrentTaskLimits() {
     int numScheduledMaps = scheduledRequests.maps.size();
     if (maxRunningMaps > 0 && numScheduledMaps > 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1b8251b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index a115b13..38a9731 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
@@ -24,6 +24,7 @@ import static org.mockito.Matchers.anyInt;
 import static org.mockito.Matchers.isA;
 import static org.mockito.Mockito.doCallRealMethod;
 import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.inOrder;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.times;
@@ -70,11 +71,13 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobUpdatedNodesEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
 import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl;
+import org.apache.hadoop.mapreduce.v2.app.rm.preemption.AMPreemptionPolicy;
 import org.apache.hadoop.mapreduce.v2.app.rm.preemption.NoopAMPreemptionPolicy;
 import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -144,6 +147,7 @@ import org.junit.Before;
 import org.junit.Test;
 
 import com.google.common.base.Supplier;
+import org.mockito.InOrder;
 
 @SuppressWarnings("unchecked")
 public class TestRMContainerAllocator {
@@ -3017,6 +3021,48 @@ public class TestRMContainerAllocator {
     }
   }
 
+  /**
+   * MAPREDUCE-6771. Test if RMContainerAllocator generates the events in the
+   * right order while processing finished containers.
+   */
+  @Test
+  public void testHandlingFinishedContainers() {
+    EventHandler eventHandler = mock(EventHandler.class);
+
+    AppContext context = mock(RunningAppContext.class);
+    when(context.getClock()).thenReturn(new ControlledClock());
+    when(context.getClusterInfo()).thenReturn(
+        new ClusterInfo(Resource.newInstance(10240, 1)));
+    when(context.getEventHandler()).thenReturn(eventHandler);
+    RMContainerAllocator containerAllocator =
+        new RMContainerAllocatorForFinishedContainer(null, context,
+            mock(AMPreemptionPolicy.class));
+
+    ContainerStatus finishedContainer = ContainerStatus.newInstance(
+        mock(ContainerId.class), ContainerState.COMPLETE, "", 0);
+    containerAllocator.processFinishedContainer(finishedContainer);
+
+    InOrder inOrder = inOrder(eventHandler);
+    inOrder.verify(eventHandler).handle(
+        isA(TaskAttemptDiagnosticsUpdateEvent.class));
+    inOrder.verify(eventHandler).handle(isA(TaskAttemptEvent.class));
+    inOrder.verifyNoMoreInteractions();
+  }
+
+  private static class RMContainerAllocatorForFinishedContainer
+      extends RMContainerAllocator {
+    public RMContainerAllocatorForFinishedContainer(ClientService clientService,
+        AppContext context, AMPreemptionPolicy preemptionPolicy) {
+      super(clientService, context, preemptionPolicy);
+    }
+    @Override
+    protected AssignedRequests createAssignedRequests() {
+      AssignedRequests assignedReqs = mock(AssignedRequests.class);
+      TaskAttemptId taskAttempt = mock(TaskAttemptId.class);
+      when(assignedReqs.get(any(ContainerId.class))).thenReturn(taskAttempt);
+      return assignedReqs;
+    }
+  }
 
   @Test
   public void testAvoidAskMoreReducersWhenReducerPreemptionIsRequired()


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/57] [abbrv] hadoop git commit: HDFS-10915. Fix time measurement bug in TestDatanodeRestart. Contributed by Xiaobing Zhou

Posted by in...@apache.org.
HDFS-10915. Fix time measurement bug in TestDatanodeRestart. Contributed by Xiaobing Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6437ba18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6437ba18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6437ba18

Branch: refs/heads/HDFS-10467
Commit: 6437ba18c5c26bc271a63aff5ea03756f43dd9a3
Parents: 9c97364
Author: Mingliang Liu <li...@apache.org>
Authored: Tue Sep 27 20:40:17 2016 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Tue Sep 27 20:40:17 2016 -0700

----------------------------------------------------------------------
 .../datanode/fsdataset/impl/TestDatanodeRestart.java   | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6437ba18/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
index 40a3d9d..0afee5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.Time;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -172,17 +173,17 @@ public class TestDatanodeRestart {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
       cluster.waitActive();
 
-      start = System.currentTimeMillis();
+      start = Time.monotonicNow();
       FileSystem fileSys = cluster.getFileSystem();
       try {
         DFSTestUtil.createFile(fileSys, file, 10240L, (short)1, 0L);
         // It is a bug if this does not fail.
         throw new IOException("Did not fail!");
       } catch (org.apache.hadoop.ipc.RemoteException e) {
-        long elapsed = System.currentTimeMillis() - start;
+        long elapsed = Time.monotonicNow() - start;
         // timers have at-least semantics, so it should be at least 5 seconds.
         if (elapsed < 5000 || elapsed > 10000) {
-          throw new IOException(elapsed + " seconds passed.", e);
+          throw new IOException(elapsed + " milliseconds passed.", e);
         }
       }
       DataNodeFaultInjector.set(oldDnInjector);
@@ -195,18 +196,18 @@ public class TestDatanodeRestart {
       // back to simulating unregistered node.
       DataNodeFaultInjector.set(dnFaultInjector);
       byte[] buffer = new byte[8];
-      start = System.currentTimeMillis();
+      start = Time.monotonicNow();
       try {
         fileSys.open(file).read(0L, buffer, 0, 1);
         throw new IOException("Did not fail!");
       } catch (IOException e) {
-        long elapsed = System.currentTimeMillis() - start;
+        long elapsed = Time.monotonicNow() - start;
         if (e.getMessage().contains("readBlockLength")) {
           throw new IOException("Failed, but with unexpected exception:", e);
         }
         // timers have at-least semantics, so it should be at least 5 seconds.
         if (elapsed < 5000 || elapsed > 10000) {
-          throw new IOException(elapsed + " seconds passed.", e);
+          throw new IOException(elapsed + " milliseconds passed.", e);
         }
       }
       DataNodeFaultInjector.set(oldDnInjector);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/57] [abbrv] hadoop git commit: HADOOP-13544. JDiff reports unncessarily show unannotated APIs and cause confusion while our javadocs only show annotated and public APIs. (vinodkv via wangda)

Posted by in...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/875062b5/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_2.7.2.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_2.7.2.xml b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_2.7.2.xml
index dec8dc4..16de0fa 100644
--- a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_2.7.2.xml
+++ b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_2.7.2.xml
@@ -17,7 +17,7 @@
 -->
 <!-- Generated by the JDiff Javadoc doclet -->
 <!-- (http://www.jdiff.org) -->
-<!-- on Mon Jun 13 20:33:25 PDT 2016 -->
+<!-- on Wed Aug 24 13:56:59 PDT 2016 -->
 
 <api
   xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
@@ -25,966 +25,8 @@
   name="hadoop-mapreduce-client-jobclient 2.7.2"
   jdversion="1.0.9">
 
-<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/classes:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/hadoop-mapreduce-client-common-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.7.2.jar:/Users/vinodkv/Wor
 kspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/hadoop-yarn-client-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/hadoop-mapreduce-client-core-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/target/hadoop-mapreduce-client-shuffle-2.7.2.jar:/Users/vinodkv/.m2/repository/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.7.2.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/vinodkv/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vin
 odkv/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/vinodkv/.m2/repository/com/google/
 inject/guice/3.0/guice-3.0.jar:/Users/vinodkv/.m2/repository/javax/inject/javax.inject/1/javax.inject-1.jar:/Users/vinodkv/.m2/repository/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/jline/jline/0.9.94/jline-0.9.94.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.m2/repository/xml
 enc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/vinodkv/.m2/repository/net/
 java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/ap
 acheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Use
 rs/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.7.0_45.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/.m2/repository/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar -sourcepath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapredu
 ce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/site/jdiff/xml -apiname hadoop-mapreduce-client-jobclient 2.7.2 -->
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/classes:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/hadoop-mapreduce-client-common-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.7.2.jar:/Users/vinodkv/Work
 space/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/hadoop-yarn-client-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/hadoop-mapreduce-client-core-2.7.2.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/target/hadoop-mapreduce-client-shuffle-2.7.2.jar:/Users/vinodkv/.m2/repository/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.7.2.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/vinodkv/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vino
 dkv/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/vinodkv/.m2/repository/com/google/i
 nject/guice/3.0/guice-3.0.jar:/Users/vinodkv/.m2/repository/javax/inject/javax.inject/1/javax.inject-1.jar:/Users/vinodkv/.m2/repository/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/jline/jline/0.9.94/jline-0.9.94.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.m2/repository/xmle
 nc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/vinodkv/.m2/repository/net/j
 ava/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apa
 cheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/User
 s/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_40.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/.m2/repository/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar -sourcepath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduc
 e-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/jdiff.jar -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/site/jdiff/xml -apiname hadoop-mapreduce-client-jobclient 2.7.2 -->
 <package name="org.apache.hadoop.mapred">
-  <!-- start class org.apache.hadoop.mapred.ClientCache -->
-  <class name="ClientCache" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="ClientCache" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.ResourceMgrDelegate"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getClient" return="org.apache.hadoop.mapred.ClientServiceDelegate"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="jobId" type="org.apache.hadoop.mapreduce.JobID"/>
-    </method>
-    <method name="getInitializedHSProxy" return="org.apache.hadoop.mapreduce.v2.api.MRClientProtocol"
-      abstract="false" native="false" synchronized="true"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="instantiateHistoryProxy" return="org.apache.hadoop.mapreduce.v2.api.MRClientProtocol"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.ClientCache -->
-  <!-- start class org.apache.hadoop.mapred.ClientServiceDelegate -->
-  <class name="ClientServiceDelegate" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="ClientServiceDelegate" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.ResourceMgrDelegate, org.apache.hadoop.mapreduce.JobID, org.apache.hadoop.mapreduce.v2.api.MRClientProtocol"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getMaxClientRetry" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getJobCounters" return="org.apache.hadoop.mapreduce.Counters"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="arg0" type="org.apache.hadoop.mapreduce.JobID"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="arg0" type="org.apache.hadoop.mapreduce.JobID"/>
-      <param name="arg1" type="int"/>
-      <param name="arg2" type="int"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getTaskDiagnostics" return="java.lang.String[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="arg0" type="org.apache.hadoop.mapreduce.TaskAttemptID"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getJobStatus" return="org.apache.hadoop.mapreduce.JobStatus"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="oldJobID" type="org.apache.hadoop.mapreduce.JobID"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getTaskReports" return="org.apache.hadoop.mapreduce.TaskReport[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="oldJobID" type="org.apache.hadoop.mapreduce.JobID"/>
-      <param name="taskType" type="org.apache.hadoop.mapreduce.TaskType"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="killTask" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="taskAttemptID" type="org.apache.hadoop.mapreduce.TaskAttemptID"/>
-      <param name="fail" type="boolean"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="killJob" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="oldJobID" type="org.apache.hadoop.mapreduce.JobID"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getLogFilePath" return="org.apache.hadoop.mapreduce.v2.LogParams"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="oldJobID" type="org.apache.hadoop.mapreduce.JobID"/>
-      <param name="oldTaskAttemptID" type="org.apache.hadoop.mapreduce.TaskAttemptID"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.ClientServiceDelegate -->
-  <!-- start class org.apache.hadoop.mapred.NotRunningJob -->
-  <class name="NotRunningJob" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.mapreduce.v2.api.MRClientProtocol"/>
-    <method name="failTaskAttempt" return="org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getCounters" return="org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getDiagnostics" return="org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getJobReport" return="org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getTaskAttemptCompletionEvents" return="org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getTaskAttemptReport" return="org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getTaskReport" return="org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getTaskReports" return="org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="killJob" return="org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="killTask" return="org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="killTaskAttempt" return="org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getDelegationToken" return="org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="renewDelegationToken" return="org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="cancelDelegationToken" return="org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getConnectAddress" return="java.net.InetSocketAddress"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.NotRunningJob -->
-  <!-- start class org.apache.hadoop.mapred.ResourceMgrDelegate -->
-  <class name="ResourceMgrDelegate" extends="org.apache.hadoop.yarn.client.api.YarnClient"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="ResourceMgrDelegate" type="org.apache.hadoop.yarn.conf.YarnConfiguration"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Delegate responsible for communicating with the Resource Manager's
- {@link ApplicationClientProtocol}.
- @param conf the configuration object.]]>
-      </doc>
-    </constructor>
-    <method name="serviceInit"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <exception name="Exception" type="java.lang.Exception"/>
-    </method>
-    <method name="serviceStart"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <exception name="Exception" type="java.lang.Exception"/>
-    </method>
-    <method name="serviceStop"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <exception name="Exception" type="java.lang.Exception"/>
-    </method>
-    <method name="getActiveTrackers" return="org.apache.hadoop.mapreduce.TaskTrackerInfo[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getAllJobs" return="org.apache.hadoop.mapreduce.JobStatus[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getBlacklistedTrackers" return="org.apache.hadoop.mapreduce.TaskTrackerInfo[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getClusterMetrics" return="org.apache.hadoop.mapreduce.ClusterMetrics"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getRMDelegationTokenService" return="org.apache.hadoop.io.Text"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="renewer" type="org.apache.hadoop.io.Text"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getFilesystemName" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getNewJobID" return="org.apache.hadoop.mapreduce.JobID"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getQueue" return="org.apache.hadoop.mapreduce.QueueInfo"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="queueName" type="java.lang.String"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getQueueAclsForCurrentUser" return="org.apache.hadoop.mapreduce.QueueAclsInfo[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getQueues" return="org.apache.hadoop.mapreduce.QueueInfo[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getRootQueues" return="org.apache.hadoop.mapreduce.QueueInfo[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getChildQueues" return="org.apache.hadoop.mapreduce.QueueInfo[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="parent" type="java.lang.String"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getStagingAreaDir" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getSystemDir" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getTaskTrackerExpiryInterval" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="setJobPriority"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="arg0" type="org.apache.hadoop.mapreduce.JobID"/>
-      <param name="arg1" type="java.lang.String"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getProtocolVersion" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="arg0" type="java.lang.String"/>
-      <param name="arg1" type="long"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="createApplication" return="org.apache.hadoop.yarn.client.api.YarnClientApplication"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="submitApplication" return="org.apache.hadoop.yarn.api.records.ApplicationId"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="appContext" type="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="killApplication"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getApplicationReport" return="org.apache.hadoop.yarn.api.records.ApplicationReport"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getAMRMToken" return="org.apache.hadoop.security.token.Token"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getApplications" return="java.util.List"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getApplications" return="java.util.List"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="applicationTypes" type="java.util.Set"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getApplications" return="java.util.List"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="applicationStates" type="java.util.EnumSet"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getApplications" return="java.util.List"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="applicationTypes" type="java.util.Set"/>
-      <param name="applicationStates" type="java.util.EnumSet"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getYarnClusterMetrics" return="org.apache.hadoop.yarn.api.records.YarnClusterMetrics"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getNodeReports" return="java.util.List"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="states" type="org.apache.hadoop.yarn.api.records.NodeState[]"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getRMDelegationToken" return="org.apache.hadoop.yarn.api.records.Token"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="renewer" type="org.apache.hadoop.io.Text"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getQueueInfo" return="org.apache.hadoop.yarn.api.records.QueueInfo"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="queueName" type="java.lang.String"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getAllQueues" return="java.util.List"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getRootQueueInfos" return="java.util.List"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getChildQueueInfos" return="java.util.List"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="parent" type="java.lang.String"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getQueueAclsInfo" return="java.util.List"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getApplicationAttemptReport" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getApplicationAttempts" return="java.util.List"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getContainerReport" return="org.apache.hadoop.yarn.api.records.ContainerReport"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getContainers" return="java.util.List"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="moveApplicationAcrossQueues"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
-      <param name="queue" type="java.lang.String"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="submitReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="updateReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="deleteReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getNodeToLabels" return="java.util.Map"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getLabelsToNodes" return="java.util.Map"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getLabelsToNodes" return="java.util.Map"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="labels" type="java.util.Set"/>
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getClusterNodeLabels" return="java.util.Set"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <field name="client" type="org.apache.hadoop.yarn.client.api.YarnClient"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </field>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.ResourceMgrDelegate -->
-  <!-- start class org.apache.hadoop.mapred.YarnClientProtocolProvider -->
-  <class name="YarnClientProtocolProvider" extends="org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="YarnClientProtocolProvider"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="create" return="org.apache.hadoop.mapreduce.protocol.ClientProtocol"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="create" return="org.apache.hadoop.mapreduce.protocol.ClientProtocol"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="addr" type="java.net.InetSocketAddress"/>
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="close"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="clientProtocol" type="org.apache.hadoop.mapreduce.protocol.ClientProtocol"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.YarnClientProtocolProvider -->
-  <!-- start class org.apache.hadoop.mapred.YARNRunner -->
-  <class name="YARNRunner" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.mapreduce.protocol.ClientProtocol"/>
-    <constructor name="YARNRunner" type="org.apache.hadoop.conf.Configuration"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Yarn runner incapsulates the client interface of
- yarn
- @param conf the configuration object for the client]]>
-      </doc>
-    </constructor>
-    <constructor name="YARNRunner" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.ResourceMgrDelegate"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Similar to {@link #YARNRunner(Configuration)} but allowing injecting
- {@link ResourceMgrDelegate}. Enables mocking and testing.
- @param conf the configuration object for the client
- @param resMgrDelegate the resourcemanager client handle.]]>
-      </doc>
-    </constructor>
-    <constructor name="YARNRunner" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.ResourceMgrDelegate, org.apache.hadoop.mapred.ClientCache"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Similar to {@link YARNRunner#YARNRunner(Configuration, ResourceMgrDelegate)}
- but allowing injecting {@link ClientCache}. Enable mocking and testing.
- @param conf the configuration object
- @param resMgrDelegate the resource manager delegate
- @param clientCache the client cache object.]]>
-      </doc>
-    </constructor>
-    <method name="setResourceMgrDelegate"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="resMgrDelegate" type="org.apache.hadoop.mapred.ResourceMgrDelegate"/>
-    </method>
-    <method name="cancelDelegationToken"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="arg0" type="org.apache.hadoop.security.token.Token"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getActiveTrackers" return="org.apache.hadoop.mapreduce.TaskTrackerInfo[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getAllJobs" return="org.apache.hadoop.mapreduce.JobStatus[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getBlacklistedTrackers" return="org.apache.hadoop.mapreduce.TaskTrackerInfo[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getClusterMetrics" return="org.apache.hadoop.mapreduce.ClusterMetrics"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="renewer" type="org.apache.hadoop.io.Text"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getFilesystemName" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getNewJobID" return="org.apache.hadoop.mapreduce.JobID"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getQueue" return="org.apache.hadoop.mapreduce.QueueInfo"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="queueName" type="java.lang.String"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getQueueAclsForCurrentUser" return="org.apache.hadoop.mapreduce.QueueAclsInfo[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getQueues" return="org.apache.hadoop.mapreduce.QueueInfo[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getRootQueues" return="org.apache.hadoop.mapreduce.QueueInfo[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getChildQueues" return="org.apache.hadoop.mapreduce.QueueInfo[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="parent" type="java.lang.String"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getStagingAreaDir" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getSystemDir" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getTaskTrackerExpiryInterval" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="submitJob" return="org.apache.hadoop.mapreduce.JobStatus"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="jobId" type="org.apache.hadoop.mapreduce.JobID"/>
-      <param name="jobSubmitDir" type="java.lang.String"/>
-      <param name="ts" type="org.apache.hadoop.security.Credentials"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="createApplicationSubmissionContext" return="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="jobConf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="jobSubmitDir" type="java.lang.String"/>
-      <param name="ts" type="org.apache.hadoop.security.Credentials"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="setJobPriority"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="arg0" type="org.apache.hadoop.mapreduce.JobID"/>
-      <param name="arg1" type="java.lang.String"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getProtocolVersion" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="arg0" type="java.lang.String"/>
-      <param name="arg1" type="long"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="renewDelegationToken" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="arg0" type="org.apache.hadoop.security.token.Token"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getJobCounters" return="org.apache.hadoop.mapreduce.Counters"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="arg0" type="org.apache.hadoop.mapreduce.JobID"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getJobHistoryDir" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getJobStatus" return="org.apache.hadoop.mapreduce.JobStatus"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="jobID" type="org.apache.hadoop.mapreduce.JobID"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapreduce.TaskCompletionEvent[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="arg0" type="org.apache.hadoop.mapreduce.JobID"/>
-      <param name="arg1" type="int"/>
-      <param name="arg2" type="int"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getTaskDiagnostics" return="java.lang.String[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="arg0" type="org.apache.hadoop.mapreduce.TaskAttemptID"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getTaskReports" return="org.apache.hadoop.mapreduce.TaskReport[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="jobID" type="org.apache.hadoop.mapreduce.JobID"/>
-      <param name="taskType" type="org.apache.hadoop.mapreduce.TaskType"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="killJob"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="arg0" type="org.apache.hadoop.mapreduce.JobID"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="killTask" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="arg0" type="org.apache.hadoop.mapreduce.TaskAttemptID"/>
-      <param name="arg1" type="boolean"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getQueueAdmins" return="org.apache.hadoop.security.authorize.AccessControlList"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="arg0" type="java.lang.String"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getJobTrackerStatus" return="org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
-    </method>
-    <method name="getProtocolSignature" return="org.apache.hadoop.ipc.ProtocolSignature"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="protocol" type="java.lang.String"/>
-      <param name="clientVersion" type="long"/>
-      <param name="clientMethodsHash" type="int"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getLogFileParams" return="org.apache.hadoop.mapreduce.v2.LogParams"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="jobID" type="org.apache.hadoop.mapreduce.JobID"/>
-      <param name="taskAttemptID" type="org.apache.hadoop.mapreduce.TaskAttemptID"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <doc>
-    <![CDATA[This class enables the current JobClient (0.22 hadoop) to run on YARN.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.mapred.YARNRunner -->
 </package>
 
 </api>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/875062b5/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
index f3fe68b..e71d21f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
@@ -263,7 +263,7 @@
                   <!-- Jdiff -->
                   <mkdir dir="${project.build.directory}/site/jdiff/xml"/>
                   <javadoc maxmemory="${jdiff.javadoc.maxmemory}" verbose="yes">
-                    <doclet name="org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet"
+                    <doclet name="org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet"
                             path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar">
                       <param name="-apidir" value="${project.build.directory}/site/jdiff/xml"/>
                       <param name="-apiname" value="${project.name} ${project.version}"/>
@@ -278,7 +278,7 @@
                            destdir="${project.build.directory}/site/jdiff/xml"
                            sourceFiles="${dev-support.relative.dir}/jdiff/Null.java"
                            maxmemory="${jdiff.javadoc.maxmemory}">
-                    <doclet name="org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet"
+                    <doclet name="org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet"
                             path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar:${project.build.directory}/xerces.jar">
                       <param name="-oldapi" value="${project.name} ${jdiff.stable.api}"/>
                       <param name="-newapi" value="${project.name} ${project.version}"/>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/875062b5/hadoop-project-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index 0ee9895..bf4fac7 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -281,7 +281,7 @@
                     <mkdir dir="${project.build.directory}/site/jdiff/xml"/>
 
                     <javadoc maxmemory="${jdiff.javadoc.maxmemory}" verbose="yes">
-                      <doclet name="org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet"
+                      <doclet name="org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet"
                               path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar">
                         <param name="-apidir" value="${project.build.directory}/site/jdiff/xml"/>
                         <param name="-apiname" value="${project.name} ${project.version}"/>
@@ -296,7 +296,7 @@
                              destdir="${project.build.directory}/site/jdiff/xml"
                              sourceFiles="${basedir}/dev-support/jdiff/Null.java"
                              maxmemory="${jdiff.javadoc.maxmemory}">
-                      <doclet name="org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet"
+                      <doclet name="org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet"
                               path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar:${project.build.directory}/xerces.jar">
                         <param name="-oldapi" value="${project.name} ${jdiff.stable.api}"/>
                         <param name="-newapi" value="${project.name} ${project.version}"/>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/57] [abbrv] hadoop git commit: HADOOP-13317. Add logs to KMS server-side to improve supportability. Contributed by Suraj Acharya.

Posted by in...@apache.org.
HADOOP-13317. Add logs to KMS server-side to improve supportability. Contributed by Suraj Acharya.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89bd6d29
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89bd6d29
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89bd6d29

Branch: refs/heads/HDFS-10467
Commit: 89bd6d29a62afd7ed8ff87bcc29d17b1cb53dcb6
Parents: 2549ee9
Author: Xiao Chen <xi...@apache.org>
Authored: Fri Sep 30 17:51:39 2016 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Fri Sep 30 17:51:39 2016 -0700

----------------------------------------------------------------------
 .../hadoop/crypto/key/kms/server/KMS.java       | 76 +++++++++++++++++---
 1 file changed, 66 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89bd6d29/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
index f069fca..371f3f5 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
@@ -28,6 +28,8 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 import javax.ws.rs.Consumes;
@@ -68,6 +70,8 @@ public class KMS {
   private KeyProviderCryptoExtension provider;
   private KMSAudit kmsAudit;
 
+  private static final Logger LOG = LoggerFactory.getLogger(KMS.class);
+
   public KMS() throws Exception {
     provider = KMSWebApp.getKeyProvider();
     kmsAudit= KMSWebApp.getKMSAudit();
@@ -77,7 +81,7 @@ public class KMS {
       KMSOp operation) throws AccessControlException {
     KMSWebApp.getACLs().assertAccess(aclType, ugi, operation, null);
   }
-  
+
   private void assertAccess(KMSACLs.Type aclType, UserGroupInformation ugi,
       KMSOp operation, String key) throws AccessControlException {
     KMSWebApp.getACLs().assertAccess(aclType, ugi, operation, key);
@@ -100,6 +104,7 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   @SuppressWarnings("unchecked")
   public Response createKey(Map jsonKey) throws Exception {
+    LOG.trace("Entering createKey Method.");
     KMSWebApp.getAdminCallsMeter().mark();
     UserGroupInformation user = HttpUserGroupInformation.get();
     final String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD);
@@ -111,6 +116,9 @@ public class KMS {
                  ? (Integer) jsonKey.get(KMSRESTConstants.LENGTH_FIELD) : 0;
     String description = (String)
         jsonKey.get(KMSRESTConstants.DESCRIPTION_FIELD);
+    LOG.debug("Creating key with name {}, cipher being used{}, " +
+            "length of key {}, description of key {}", name, cipher,
+             length, description);
     Map<String, String> attributes = (Map<String, String>)
         jsonKey.get(KMSRESTConstants.ATTRIBUTES_FIELD);
     if (material != null) {
@@ -151,6 +159,7 @@ public class KMS {
     String requestURL = KMSMDCFilter.getURL();
     int idx = requestURL.lastIndexOf(KMSRESTConstants.KEYS_RESOURCE);
     requestURL = requestURL.substring(0, idx);
+    LOG.trace("Exiting createKey Method.");
     return Response.created(getKeyURI(KMSRESTConstants.SERVICE_VERSION, name))
         .type(MediaType.APPLICATION_JSON)
         .header("Location", getKeyURI(requestURL, name)).entity(json).build();
@@ -160,11 +169,12 @@ public class KMS {
   @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}")
   public Response deleteKey(@PathParam("name") final String name)
       throws Exception {
+    LOG.trace("Entering deleteKey method.");
     KMSWebApp.getAdminCallsMeter().mark();
     UserGroupInformation user = HttpUserGroupInformation.get();
     assertAccess(KMSACLs.Type.DELETE, user, KMSOp.DELETE_KEY, name);
     KMSClientProvider.checkNotEmpty(name, "name");
-
+    LOG.debug("Deleting key with name {}.", name);
     user.doAs(new PrivilegedExceptionAction<Void>() {
       @Override
       public Void run() throws Exception {
@@ -175,7 +185,7 @@ public class KMS {
     });
 
     kmsAudit.ok(user, KMSOp.DELETE_KEY, name, "");
-
+    LOG.trace("Exiting deleteKey method.");
     return Response.ok().build();
   }
 
@@ -185,10 +195,12 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   public Response rolloverKey(@PathParam("name") final String name,
       Map jsonMaterial) throws Exception {
+    LOG.trace("Entering rolloverKey Method.");
     KMSWebApp.getAdminCallsMeter().mark();
     UserGroupInformation user = HttpUserGroupInformation.get();
     assertAccess(KMSACLs.Type.ROLLOVER, user, KMSOp.ROLL_NEW_VERSION, name);
     KMSClientProvider.checkNotEmpty(name, "name");
+    LOG.debug("Rolling key with name {}.", name);
     final String material = (String)
         jsonMaterial.get(KMSRESTConstants.MATERIAL_FIELD);
     if (material != null) {
@@ -216,6 +228,7 @@ public class KMS {
       keyVersion = removeKeyMaterial(keyVersion);
     }
     Map json = KMSServerJSONUtils.toJSON(keyVersion);
+    LOG.trace("Exiting rolloverKey Method.");
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
 
@@ -224,6 +237,7 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   public Response getKeysMetadata(@QueryParam(KMSRESTConstants.KEY)
       List<String> keyNamesList) throws Exception {
+    LOG.trace("Entering getKeysMetadata method.");
     KMSWebApp.getAdminCallsMeter().mark();
     UserGroupInformation user = HttpUserGroupInformation.get();
     final String[] keyNames = keyNamesList.toArray(
@@ -241,6 +255,7 @@ public class KMS {
 
     Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta);
     kmsAudit.ok(user, KMSOp.GET_KEYS_METADATA, "");
+    LOG.trace("Exiting getKeysMetadata method.");
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
 
@@ -248,6 +263,7 @@ public class KMS {
   @Path(KMSRESTConstants.KEYS_NAMES_RESOURCE)
   @Produces(MediaType.APPLICATION_JSON)
   public Response getKeyNames() throws Exception {
+    LOG.trace("Entering getKeyNames method.");
     KMSWebApp.getAdminCallsMeter().mark();
     UserGroupInformation user = HttpUserGroupInformation.get();
     assertAccess(KMSACLs.Type.GET_KEYS, user, KMSOp.GET_KEYS);
@@ -262,6 +278,7 @@ public class KMS {
     );
 
     kmsAudit.ok(user, KMSOp.GET_KEYS, "");
+    LOG.trace("Exiting getKeyNames method.");
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
 
@@ -269,6 +286,9 @@ public class KMS {
   @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}")
   public Response getKey(@PathParam("name") String name)
       throws Exception {
+    LOG.trace("Entering getKey method.");
+    LOG.debug("Getting key information for key with name {}.", name);
+    LOG.trace("Exiting getKey method.");
     return getMetadata(name);
   }
 
@@ -278,10 +298,12 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   public Response getMetadata(@PathParam("name") final String name)
       throws Exception {
+    LOG.trace("Entering getMetadata method.");
     UserGroupInformation user = HttpUserGroupInformation.get();
     KMSClientProvider.checkNotEmpty(name, "name");
     KMSWebApp.getAdminCallsMeter().mark();
     assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_METADATA, name);
+    LOG.debug("Getting metadata for key with name {}.", name);
 
     KeyProvider.Metadata metadata = user.doAs(
         new PrivilegedExceptionAction<KeyProvider.Metadata>() {
@@ -294,6 +316,7 @@ public class KMS {
 
     Object json = KMSServerJSONUtils.toJSON(name, metadata);
     kmsAudit.ok(user, KMSOp.GET_METADATA, name, "");
+    LOG.trace("Exiting getMetadata method.");
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
 
@@ -303,10 +326,12 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   public Response getCurrentVersion(@PathParam("name") final String name)
       throws Exception {
+    LOG.trace("Entering getCurrentVersion method.");
     UserGroupInformation user = HttpUserGroupInformation.get();
     KMSClientProvider.checkNotEmpty(name, "name");
     KMSWebApp.getKeyCallsMeter().mark();
     assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_CURRENT_KEY, name);
+    LOG.debug("Getting key version for key with name {}.", name);
 
     KeyVersion keyVersion = user.doAs(
         new PrivilegedExceptionAction<KeyVersion>() {
@@ -319,6 +344,7 @@ public class KMS {
 
     Object json = KMSServerJSONUtils.toJSON(keyVersion);
     kmsAudit.ok(user, KMSOp.GET_CURRENT_KEY, name, "");
+    LOG.trace("Exiting getCurrentVersion method.");
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
 
@@ -327,10 +353,12 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   public Response getKeyVersion(
       @PathParam("versionName") final String versionName) throws Exception {
+    LOG.trace("Entering getKeyVersion method.");
     UserGroupInformation user = HttpUserGroupInformation.get();
     KMSClientProvider.checkNotEmpty(versionName, "versionName");
     KMSWebApp.getKeyCallsMeter().mark();
     assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSION);
+    LOG.debug("Getting key with version name {}.", versionName);
 
     KeyVersion keyVersion = user.doAs(
         new PrivilegedExceptionAction<KeyVersion>() {
@@ -345,6 +373,7 @@ public class KMS {
       kmsAudit.ok(user, KMSOp.GET_KEY_VERSION, keyVersion.getName(), "");
     }
     Object json = KMSServerJSONUtils.toJSON(keyVersion);
+    LOG.trace("Exiting getKeyVersion method.");
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
 
@@ -359,12 +388,16 @@ public class KMS {
           @DefaultValue("1")
           @QueryParam(KMSRESTConstants.EEK_NUM_KEYS) final int numKeys)
           throws Exception {
+    LOG.trace("Entering generateEncryptedKeys method.");
     UserGroupInformation user = HttpUserGroupInformation.get();
     KMSClientProvider.checkNotEmpty(name, "name");
     KMSClientProvider.checkNotNull(edekOp, "eekOp");
+    LOG.debug("Generating encrypted key with name {}," +
+            " the edek Operation is {}.", name, edekOp);
 
     Object retJSON;
     if (edekOp.equals(KMSRESTConstants.EEK_GENERATE)) {
+      LOG.debug("edek Operation is Generate.");
       assertAccess(KMSACLs.Type.GENERATE_EEK, user, KMSOp.GENERATE_EEK, name);
 
       final List<EncryptedKeyVersion> retEdeks =
@@ -375,6 +408,8 @@ public class KMS {
             new PrivilegedExceptionAction<Void>() {
               @Override
               public Void run() throws Exception {
+                  LOG.debug("Generated Encrypted key for {} number of keys.",
+                          numKeys);
                 for (int i = 0; i < numKeys; i++) {
                   retEdeks.add(provider.generateEncryptedKey(name));
                 }
@@ -384,6 +419,7 @@ public class KMS {
         );
 
       } catch (Exception e) {
+        LOG.error("Exception in generateEncryptedKeys:", e);
         throw new IOException(e);
       }
       kmsAudit.ok(user, KMSOp.GENERATE_EEK, name, "");
@@ -392,11 +428,18 @@ public class KMS {
         ((ArrayList)retJSON).add(KMSServerJSONUtils.toJSON(edek));
       }
     } else {
-      throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP +
-          " value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " +
-          KMSRESTConstants.EEK_DECRYPT);
+      StringBuilder error;
+      error = new StringBuilder("IllegalArgumentException Wrong ");
+      error.append(KMSRESTConstants.EEK_OP);
+      error.append(" value, it must be ");
+      error.append(KMSRESTConstants.EEK_GENERATE);
+      error.append(" or ");
+      error.append(KMSRESTConstants.EEK_DECRYPT);
+      LOG.error(error.toString());
+      throw new IllegalArgumentException(error.toString());
     }
     KMSWebApp.getGenerateEEKCallsMeter().mark();
+    LOG.trace("Exiting generateEncryptedKeys method.");
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON)
         .build();
   }
@@ -411,14 +454,17 @@ public class KMS {
       @QueryParam(KMSRESTConstants.EEK_OP) String eekOp,
       Map jsonPayload)
       throws Exception {
+    LOG.trace("Entering decryptEncryptedKey method.");
     UserGroupInformation user = HttpUserGroupInformation.get();
     KMSClientProvider.checkNotEmpty(versionName, "versionName");
     KMSClientProvider.checkNotNull(eekOp, "eekOp");
+    LOG.debug("Decrypting key for {}, the edek Operation is {}.",
+            versionName, eekOp);
 
     final String keyName = (String) jsonPayload.get(
         KMSRESTConstants.NAME_FIELD);
     String ivStr = (String) jsonPayload.get(KMSRESTConstants.IV_FIELD);
-    String encMaterialStr = 
+    String encMaterialStr =
         (String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
     Object retJSON;
     if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
@@ -445,11 +491,18 @@ public class KMS {
       retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
       kmsAudit.ok(user, KMSOp.DECRYPT_EEK, keyName, "");
     } else {
-      throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP +
-          " value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " +
-          KMSRESTConstants.EEK_DECRYPT);
+      StringBuilder error;
+      error = new StringBuilder("IllegalArgumentException Wrong ");
+      error.append(KMSRESTConstants.EEK_OP);
+      error.append(" value, it must be ");
+      error.append(KMSRESTConstants.EEK_GENERATE);
+      error.append(" or ");
+      error.append(KMSRESTConstants.EEK_DECRYPT);
+      LOG.error(error.toString());
+      throw new IllegalArgumentException(error.toString());
     }
     KMSWebApp.getDecryptEEKCallsMeter().mark();
+    LOG.trace("Exiting decryptEncryptedKey method.");
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON)
         .build();
   }
@@ -460,10 +513,12 @@ public class KMS {
   @Produces(MediaType.APPLICATION_JSON)
   public Response getKeyVersions(@PathParam("name") final String name)
       throws Exception {
+    LOG.trace("Entering getKeyVersions method.");
     UserGroupInformation user = HttpUserGroupInformation.get();
     KMSClientProvider.checkNotEmpty(name, "name");
     KMSWebApp.getKeyCallsMeter().mark();
     assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSIONS, name);
+    LOG.debug("Getting key versions for key {}", name);
 
     List<KeyVersion> ret = user.doAs(
         new PrivilegedExceptionAction<List<KeyVersion>>() {
@@ -476,6 +531,7 @@ public class KMS {
 
     Object json = KMSServerJSONUtils.toJSON(ret);
     kmsAudit.ok(user, KMSOp.GET_KEY_VERSIONS, name, "");
+    LOG.trace("Exiting getKeyVersions method.");
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/57] [abbrv] hadoop git commit: HDFS-10892. Add unit tests for HDFS command 'dfs -tail' and 'dfs -stat'. Contributed by Mingliang Liu

Posted by in...@apache.org.
HDFS-10892. Add unit tests for HDFS command 'dfs -tail' and 'dfs -stat'. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84c62640
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84c62640
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84c62640

Branch: refs/heads/HDFS-10467
Commit: 84c626407925e03ee2ef11faba9324d5c55b8e93
Parents: 92e5e91
Author: Mingliang Liu <li...@apache.org>
Authored: Fri Sep 23 15:41:07 2016 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Wed Sep 28 16:03:51 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/TestDFSShell.java    | 187 ++++++++++++++++++-
 1 file changed, 182 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84c62640/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 73ebf28..fc90db5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -20,18 +20,28 @@ package org.apache.hadoop.hdfs;
 import java.io.*;
 import java.security.Permission;
 import java.security.PrivilegedExceptionAction;
+import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Date;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.Scanner;
+import java.util.TimeZone;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.zip.DeflaterOutputStream;
 import java.util.zip.GZIPOutputStream;
 
+import com.google.common.base.Supplier;
+import com.google.common.collect.Lists;
+import org.apache.commons.lang.RandomStringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.log4j.Level;
+import org.junit.Test;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.AclEntry;
@@ -51,13 +61,11 @@ import org.apache.hadoop.io.compress.BZip2Codec;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.log4j.Level;
-import org.junit.Test;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
@@ -70,8 +78,6 @@ import static org.hamcrest.CoreMatchers.not;
 import static org.junit.Assert.*;
 import static org.hamcrest.core.StringContains.containsString;
 
-import com.google.common.collect.Lists;
-
 /**
  * This class tests commands from DFSShell.
  */
@@ -118,6 +124,7 @@ public class TestDFSShell {
     assertFalse(fs.exists(p));
   }
 
+  /** Create a local file whose content contains its full path. */
   static File createLocalFile(File f) throws IOException {
     assertTrue(!f.exists());
     PrintWriter out = new PrintWriter(f);
@@ -914,6 +921,97 @@ public class TestDFSShell {
     }
   }
 
+  /**
+   * Test that -tail displays last kilobyte of the file to stdout.
+   */
+  @Test (timeout = 30000)
+  public void testTail() throws Exception {
+    final int blockSize = 1024;
+    final int fileLen = 5 * blockSize;
+    final Configuration conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+
+    try (MiniDFSCluster cluster =
+             new MiniDFSCluster.Builder(conf).numDataNodes(3).build()) {
+      cluster.waitActive();
+      final DistributedFileSystem dfs = cluster.getFileSystem();
+
+      // create a text file with multiple KB bytes (and multiple blocks)
+      final Path testFile = new Path("testTail", "file1");
+      final String text = RandomStringUtils.randomAscii(fileLen);
+      try (OutputStream pout = dfs.create(testFile)) {
+        pout.write(text.getBytes());
+      }
+      final ByteArrayOutputStream out = new ByteArrayOutputStream();
+      System.setOut(new PrintStream(out));
+      final String[] argv = new String[]{"-tail", testFile.toString()};
+      final int ret = ToolRunner.run(new FsShell(conf), argv);
+
+      assertEquals(Arrays.toString(argv) + " returned " + ret, 0, ret);
+      assertEquals("-tail returned " + out.size() + " bytes data, expected 1KB",
+          1024, out.size());
+      // tailed out last 1KB of the file content
+      assertArrayEquals("Tail output doesn't match input",
+          text.substring(fileLen - 1024).getBytes(), out.toByteArray());
+      out.reset();
+    }
+  }
+
+  /**
+   * Test that -tail -f outputs appended data as the file grows.
+   */
+  @Test(timeout = 30000)
+  public void testTailWithFresh() throws Exception {
+    final int blockSize = 1024;
+    final Configuration conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+
+    try (MiniDFSCluster cluster =
+             new MiniDFSCluster.Builder(conf).numDataNodes(3).build()) {
+      cluster.waitActive();
+      final DistributedFileSystem dfs = cluster.getFileSystem();
+      final Path testFile = new Path("testTailWithFresh", "file1");
+      dfs.create(testFile);
+
+      final ByteArrayOutputStream out = new ByteArrayOutputStream();
+      System.setOut(new PrintStream(out));
+      final Thread tailer = new Thread() {
+        @Override
+        public void run() {
+          final String[] argv = new String[]{"-tail", "-f",
+              testFile.toString()};
+          try {
+            ToolRunner.run(new FsShell(conf), argv);
+          } catch (Exception e) {
+            LOG.error("Client that tails the test file fails", e);
+          } finally {
+            out.reset();
+          }
+        }
+      };
+      tailer.start();
+      // wait till the tailer is sleeping
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+        @Override
+        public Boolean get() {
+          return tailer.getState() == Thread.State.TIMED_WAITING;
+        }
+      }, 100, 10000);
+
+      final String text = RandomStringUtils.randomAscii(blockSize / 2);
+      try (OutputStream pout = dfs.create(testFile)) {
+        pout.write(text.getBytes());
+      }
+      // The tailer should eventually show the file contents
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+        @Override
+        public Boolean get() {
+          return Arrays.equals(text.getBytes(), out.toByteArray());
+        }
+      }, 100, 10000);
+    }
+  }
+
   @Test (timeout = 30000)
   public void testText() throws Exception {
     Configuration conf = new HdfsConfiguration();
@@ -2010,6 +2108,85 @@ public class TestDFSShell {
     }
   }
 
+  /**
+   * Test -stat [format] <path>... prints statistics about the file/directory
+   * at <path> in the specified format.
+   */
+  @Test (timeout = 30000)
+  public void testStat() throws Exception {
+    final int blockSize = 1024;
+    final Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+
+    try (MiniDFSCluster cluster =
+             new MiniDFSCluster.Builder(conf).numDataNodes(3).build()) {
+      cluster.waitActive();
+      final DistributedFileSystem dfs = cluster.getFileSystem();
+
+      final SimpleDateFormat fmt = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
+      fmt.setTimeZone(TimeZone.getTimeZone("UTC"));
+      final Path testDir1 = new Path("testStat", "dir1");
+      dfs.mkdirs(testDir1);
+      final FileStatus status1 = dfs.getFileStatus(testDir1);
+      final String mtime1 = fmt.format(new Date(status1.getModificationTime()));
+      final Path testFile2 = new Path(testDir1, "file2");
+      DFSTestUtil.createFile(dfs, testFile2, 2 * blockSize, (short) 3, 0);
+      final FileStatus status2 = dfs.getFileStatus(testDir1);
+      final String mtime2 = fmt.format(new Date(status2.getModificationTime()));
+
+      final ByteArrayOutputStream out = new ByteArrayOutputStream();
+      System.setOut(new PrintStream(out));
+
+      doFsStat(conf, null);
+
+      out.reset();
+      doFsStat(conf, null, testDir1);
+      assertEquals("Unexpected -stat output: " + out,
+          out.toString(), String.format("%s%n", mtime1));
+
+      out.reset();
+      doFsStat(conf, null, testDir1, testFile2);
+      assertEquals("Unexpected -stat output: " + out,
+          out.toString(), String.format("%s%n%s%n", mtime1, mtime2));
+
+      doFsStat(conf, "%F %u:%g %b %y %n");
+
+      out.reset();
+      doFsStat(conf, "%F %u:%g %b %y %n", testDir1);
+      assertTrue(out.toString(), out.toString().contains(mtime1));
+      assertTrue(out.toString(), out.toString().contains("directory"));
+      assertTrue(out.toString(), out.toString().contains(status1.getGroup()));
+
+      out.reset();
+      doFsStat(conf, "%F %u:%g %b %y %n", testDir1, testFile2);
+      assertTrue(out.toString(), out.toString().contains(mtime1));
+      assertTrue(out.toString(), out.toString().contains("regular file"));
+      assertTrue(out.toString(), out.toString().contains(mtime2));
+    }
+  }
+
+  private static void doFsStat(Configuration conf, String format, Path... files)
+      throws Exception {
+    if (files == null || files.length == 0) {
+      final String[] argv = (format == null ? new String[] {"-stat"} :
+          new String[] {"-stat", format});
+      assertEquals("Should have failed with missing arguments",
+          -1, ToolRunner.run(new FsShell(conf), argv));
+    } else {
+      List<String> argv = new LinkedList<>();
+      argv.add("-stat");
+      if (format != null) {
+        argv.add(format);
+      }
+      for (Path f : files) {
+        argv.add(f.toString());
+      }
+
+      int ret = ToolRunner.run(new FsShell(conf), argv.toArray(new String[0]));
+      assertEquals(argv + " returned non-zero status " + ret, 0, ret);
+    }
+  }
+
   @Test (timeout = 30000)
   public void testLsr() throws Exception {
     final Configuration conf = new HdfsConfiguration();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/57] [abbrv] hadoop git commit: YARN-5384. Expose priority in ReservationSystem submission APIs. (Sean Po via Subru).

Posted by in...@apache.org.
YARN-5384. Expose priority in ReservationSystem submission APIs. (Sean Po via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a3697de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a3697de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a3697de

Branch: refs/heads/HDFS-10467
Commit: 3a3697deab3e3397082222deb66fb613d86ff9ae
Parents: 89bd6d2
Author: Subru Krishnan <su...@apache.org>
Authored: Fri Sep 30 19:41:43 2016 -0700
Committer: Subru Krishnan <su...@apache.org>
Committed: Fri Sep 30 19:41:43 2016 -0700

----------------------------------------------------------------------
 .../yarn/api/records/ReservationDefinition.java | 44 ++++++++++++++++----
 .../src/main/proto/yarn_protos.proto            |  1 +
 .../impl/pb/ReservationDefinitionPBImpl.java    | 31 ++++++++++++++
 .../webapp/dao/ReservationDefinitionInfo.java   | 11 +++++
 .../reservation/ReservationSystemTestUtil.java  | 10 ++++-
 .../src/site/markdown/ResourceManagerRest.md    |  3 ++
 6 files changed, 91 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a3697de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
index 8ef881b..bb9bca2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.api.records;
 
 import org.apache.hadoop.classification.InterfaceAudience.Public;
-import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.util.Records;
 
@@ -38,7 +37,7 @@ public abstract class ReservationDefinition {
   @Unstable
   public static ReservationDefinition newInstance(long arrival, long deadline,
       ReservationRequests reservationRequests, String name,
-      String recurrenceExpression) {
+      String recurrenceExpression, Priority priority) {
     ReservationDefinition rDefinition =
         Records.newRecord(ReservationDefinition.class);
     rDefinition.setArrival(arrival);
@@ -46,6 +45,7 @@ public abstract class ReservationDefinition {
     rDefinition.setReservationRequests(reservationRequests);
     rDefinition.setReservationName(name);
     rDefinition.setRecurrenceExpression(recurrenceExpression);
+    rDefinition.setPriority(priority);
     return rDefinition;
   }
 
@@ -53,8 +53,8 @@ public abstract class ReservationDefinition {
   @Unstable
   public static ReservationDefinition newInstance(long arrival, long deadline,
       ReservationRequests reservationRequests, String name) {
-    ReservationDefinition rDefinition =
-        newInstance(arrival, deadline, reservationRequests, name, "0");
+    ReservationDefinition rDefinition = newInstance(arrival, deadline,
+        reservationRequests, name, "0", Priority.UNDEFINED);
     return rDefinition;
   }
 
@@ -130,7 +130,7 @@ public abstract class ReservationDefinition {
    *         allocation in the scheduler
    */
   @Public
-  @Evolving
+  @Unstable
   public abstract String getReservationName();
 
   /**
@@ -142,7 +142,7 @@ public abstract class ReservationDefinition {
    *          allocation in the scheduler
    */
   @Public
-  @Evolving
+  @Unstable
   public abstract void setReservationName(String name);
 
   /**
@@ -160,7 +160,7 @@ public abstract class ReservationDefinition {
    * @return recurrence of this reservation
    */
   @Public
-  @Evolving
+  @Unstable
   public abstract String getRecurrenceExpression();
 
   /**
@@ -178,7 +178,35 @@ public abstract class ReservationDefinition {
    * @param recurrenceExpression recurrence interval of this reservation
    */
   @Public
-  @Evolving
+  @Unstable
   public abstract void setRecurrenceExpression(String recurrenceExpression);
 
+  /**
+   * Get the priority for this reservation. A lower number for priority
+   * indicates a higher priority reservation. Recurring reservations are
+   * always higher priority than non-recurring reservations. Priority for
+   * non-recurring reservations are only compared with non-recurring
+   * reservations. Likewise for recurring reservations.
+   *
+   * @return int representing the priority of the reserved resource
+   *         allocation in the scheduler
+   */
+  @Public
+  @Unstable
+  public abstract Priority getPriority();
+
+  /**
+   * Set the priority for this reservation. A lower number for priority
+   * indicates a higher priority reservation. Recurring reservations are
+   * always higher priority than non-recurring reservations. Priority for
+   * non-recurring reservations are only compared with non-recurring
+   * reservations. Likewise for recurring reservations.
+   *
+   * @param priority representing the priority of the reserved resource
+   *          allocation in the scheduler
+   */
+  @Public
+  @Unstable
+  public abstract void setPriority(Priority priority);
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a3697de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index f788295..9c746fd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -489,6 +489,7 @@ message ReservationDefinitionProto {
   optional int64 deadline = 3;
   optional string reservation_name = 4;
   optional string recurrence_expression = 5 [default = "0"];
+  optional PriorityProto priority = 6;
 }
 
 message ResourceAllocationRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a3697de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java
index b30cd2a..49aef11 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationDefinitionPBImpl.java
@@ -18,8 +18,10 @@
 
 package org.apache.hadoop.yarn.api.records.impl.pb;
 
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.ReservationDefinition;
 import org.apache.hadoop.yarn.api.records.ReservationRequests;
+import org.apache.hadoop.yarn.proto.YarnProtos;
 import org.apache.hadoop.yarn.proto.YarnProtos.ReservationDefinitionProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ReservationDefinitionProtoOrBuilder;
 import org.apache.hadoop.yarn.proto.YarnProtos.ReservationRequestsProto;
@@ -32,6 +34,7 @@ public class ReservationDefinitionPBImpl extends ReservationDefinition {
   boolean viaProto = false;
 
   private ReservationRequests reservationReqs;
+  private Priority priority = null;
 
   public ReservationDefinitionPBImpl() {
     builder = ReservationDefinitionProto.newBuilder();
@@ -150,6 +153,33 @@ public class ReservationDefinitionPBImpl extends ReservationDefinition {
     builder.setReservationName(name);
   }
 
+  @Override
+  public Priority getPriority() {
+    ReservationDefinitionProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.priority != null) {
+      return this.priority;
+    }
+    if (!p.hasPriority()) {
+      return Priority.UNDEFINED;
+    }
+    this.priority = convertFromProtoFormat(p.getPriority());
+    return this.priority;
+  }
+
+  @Override
+  public void setPriority(Priority priority) {
+    maybeInitBuilder();
+    if (priority == null) {
+      this.priority = Priority.UNDEFINED;
+    }
+    this.priority = priority;
+  }
+
+  private PriorityPBImpl convertFromProtoFormat(
+      YarnProtos.PriorityProto p) {
+    return new PriorityPBImpl(p);
+  }
+
   private ReservationRequestsPBImpl convertFromProtoFormat(
       ReservationRequestsProto p) {
     return new ReservationRequestsPBImpl(p);
@@ -164,6 +194,7 @@ public class ReservationDefinitionPBImpl extends ReservationDefinition {
     return "{Arrival: " + getArrival() + ", Deadline: " + getDeadline()
         + ", Reservation Name: " + getReservationName()
         + ", Recurrence expression: " + getRecurrenceExpression()
+        + ", Priority: " + getPriority().toString()
         + ", Resources: " + getReservationRequests() + "}";
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a3697de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ReservationDefinitionInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ReservationDefinitionInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ReservationDefinitionInfo.java
index 71ee924..42a07af 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ReservationDefinitionInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ReservationDefinitionInfo.java
@@ -44,6 +44,9 @@ public class ReservationDefinitionInfo {
   @XmlElement(name = "reservation-name")
   private String reservationName;
 
+  @XmlElement(name = "priority")
+  private int priority;
+
   public ReservationDefinitionInfo() {
 
   }
@@ -89,4 +92,12 @@ public class ReservationDefinitionInfo {
     this.reservationName = reservationName;
   }
 
+  public int getPriority() {
+    return priority;
+  }
+
+  public void setPriority(int priority) {
+    this.priority = priority;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a3697de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java
index 24c386a..1ff6a1a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java
@@ -31,6 +31,7 @@ import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.ReservationDefinition;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.ReservationRequest;
@@ -199,6 +200,13 @@ public class ReservationSystemTestUtil {
   public static ReservationSubmissionRequest createSimpleReservationRequest(
       ReservationId reservationId, int numContainers, long arrival,
       long deadline, long duration) {
+    return createSimpleReservationRequest(reservationId, numContainers,
+        arrival, deadline, duration, Priority.UNDEFINED);
+  }
+
+  public static ReservationSubmissionRequest createSimpleReservationRequest(
+      ReservationId reservationId, int numContainers, long arrival,
+      long deadline, long duration, Priority priority) {
     // create a request with a single atomic ask
     ReservationRequest r =
         ReservationRequest.newInstance(Resource.newInstance(1024, 1),
@@ -208,7 +216,7 @@ public class ReservationSystemTestUtil {
             ReservationRequestInterpreter.R_ALL);
     ReservationDefinition rDef =
         ReservationDefinition.newInstance(arrival, deadline, reqs,
-            "testClientRMService#reservation");
+            "testClientRMService#reservation", "0", priority);
     ReservationSubmissionRequest request =
         ReservationSubmissionRequest.newInstance(rDef,
             reservationQ, reservationId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a3697de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
index 5862506..051509c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
@@ -3237,6 +3237,7 @@ The Cluster Reservation API can be used to list reservations. When listing reser
 | deadline | long | The UTC time representation of the latest time within which this reservation can be allocated. |
 | reservation-name | string | A mnemonic name of the reservation (not a valid identifier). |
 | reservation-requests | object | A list of "stages" or phases of this reservation, each describing resource requirements and duration |
+| priority | int | An integer representing the priority of the reservation. A lower number for priority indicates a higher priority reservation. Recurring reservations are always higher priority than non-recurring reservations. Priority for non-recurring reservations are only compared with non-recurring reservations. Likewise with recurring reservations. |
 
 ### Elements of the *reservation-requests* object
 
@@ -3500,6 +3501,7 @@ Elements of the *reservation-definition* object
 | deadline | long | The UTC time representation of the latest time within which this reservation can be allocated. |
 | reservation-name | string | A mnemonic name of the reservation (not a valid identifier). |
 | reservation-requests | object | A list of "stages" or phases of this reservation, each describing resource requirements and duration |
+| priority | int | An integer representing the priority of the reservation. A lower number for priority indicates a higher priority reservation. Recurring reservations are always higher priority than non-recurring reservations. Priority for non-recurring reservations are only compared with non-recurring reservations. Likewise with recurring reservations. |
 
 Elements of the *reservation-requests* object
 
@@ -3675,6 +3677,7 @@ Elements of the *reservation-definition* object
 | deadline | long | The UTC time representation of the latest time within which this reservation can be allocated. |
 | reservation-name | string | A mnemonic name of the reservation (not a valid identifier). |
 | reservation-requests | object | A list of "stages" or phases of this reservation, each describing resource requirements and duration |
+| priority | int | An integer representing the priority of the reservation. A lower number for priority indicates a higher priority reservation. Recurring reservations are always higher priority than non-recurring reservations. Priority for non-recurring reservations are only compared with non-recurring reservations. Likewise with recurring reservations. |
 
 Elements of the *reservation-requests* object
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/57] [abbrv] hadoop git commit: HDFS-10923. Make InstrumentedLock require ReentrantLock.

Posted by in...@apache.org.
HDFS-10923. Make InstrumentedLock require ReentrantLock.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7ce6fdc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7ce6fdc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7ce6fdc

Branch: refs/heads/HDFS-10467
Commit: c7ce6fdc20fe053f0bb3bcf900ffc0e1db6feee5
Parents: 3a3697d
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Sep 30 23:00:06 2016 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Sep 30 23:00:06 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/InstrumentedLock.java    | 185 ------------------
 .../hadoop/hdfs/InstrumentedReentrantLock.java  | 195 +++++++++++++++++++
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   4 +-
 .../hadoop/hdfs/TestInstrumentedLock.java       | 166 ----------------
 .../hdfs/TestInstrumentedReentrantLock.java     | 177 +++++++++++++++++
 5 files changed, 374 insertions(+), 353 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ce6fdc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedLock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedLock.java
deleted file mode 100644
index 6279e95..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedLock.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.Condition;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Timer;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * This is a debugging class that can be used by callers to track
- * whether a specifc lock is being held for too long and periodically
- * log a warning and stack trace, if so.
- *
- * The logged warnings are throttled so that logs are not spammed.
- *
- * A new instance of InstrumentedLock can be created for each object
- * that needs to be instrumented.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class InstrumentedLock implements Lock {
-
-  private final Lock lock;
-  private final Log logger;
-  private final String name;
-  private final Timer clock;
-
-  /** Minimum gap between two lock warnings. */
-  private final long minLoggingGap;
-  /** Threshold for detecting long lock held time. */
-  private final long lockWarningThreshold;
-
-  // Tracking counters for lock statistics.
-  private volatile long lockAcquireTimestamp;
-  private final AtomicLong lastLogTimestamp;
-  private final AtomicLong warningsSuppressed = new AtomicLong(0);
-
-  /**
-   * Create a instrumented lock instance which logs a warning message
-   * when lock held time is above given threshold.
-   *
-   * @param name the identifier of the lock object
-   * @param logger this class does not have its own logger, will log to the
-   *               given logger instead
-   * @param minLoggingGapMs  the minimum time gap between two log messages,
-   *                         this is to avoid spamming to many logs
-   * @param lockWarningThresholdMs the time threshold to view lock held
-   *                               time as being "too long"
-   */
-  public InstrumentedLock(String name, Log logger, long minLoggingGapMs,
-      long lockWarningThresholdMs) {
-    this(name, logger, new ReentrantLock(),
-        minLoggingGapMs, lockWarningThresholdMs);
-  }
-
-  public InstrumentedLock(String name, Log logger, Lock lock,
-      long minLoggingGapMs, long lockWarningThresholdMs) {
-    this(name, logger, lock,
-        minLoggingGapMs, lockWarningThresholdMs, new Timer());
-  }
-
-  @VisibleForTesting
-  InstrumentedLock(String name, Log logger, Lock lock,
-      long minLoggingGapMs, long lockWarningThresholdMs, Timer clock) {
-    this.name = name;
-    this.lock = lock;
-    this.clock = clock;
-    this.logger = logger;
-    minLoggingGap = minLoggingGapMs;
-    lockWarningThreshold = lockWarningThresholdMs;
-    lastLogTimestamp = new AtomicLong(
-      clock.monotonicNow() - Math.max(minLoggingGap, lockWarningThreshold));
-  }
-
-  @Override
-  public void lock() {
-    lock.lock();
-    lockAcquireTimestamp = clock.monotonicNow();
-  }
-
-  @Override
-  public void lockInterruptibly() throws InterruptedException {
-    lock.lockInterruptibly();
-    lockAcquireTimestamp = clock.monotonicNow();
-  }
-
-  @Override
-  public boolean tryLock() {
-    if (lock.tryLock()) {
-      lockAcquireTimestamp = clock.monotonicNow();
-      return true;
-    }
-    return false;
-  }
-
-  @Override
-  public boolean tryLock(long time, TimeUnit unit) throws InterruptedException {
-    if (lock.tryLock(time, unit)) {
-      lockAcquireTimestamp = clock.monotonicNow();
-      return true;
-    }
-    return false;
-  }
-
-  @Override
-  public void unlock() {
-    long localLockReleaseTime = clock.monotonicNow();
-    long localLockAcquireTime = lockAcquireTimestamp;
-    lock.unlock();
-    check(localLockAcquireTime, localLockReleaseTime);
-  }
-
-  @Override
-  public Condition newCondition() {
-    return lock.newCondition();
-  }
-
-  @VisibleForTesting
-  void logWarning(long lockHeldTime, long suppressed) {
-    logger.warn(String.format("Lock held time above threshold: " +
-        "lock identifier: %s " +
-        "lockHeldTimeMs=%d ms. Suppressed %d lock warnings. " +
-        "The stack trace is: %s" ,
-        name, lockHeldTime, suppressed,
-        StringUtils.getStackTrace(Thread.currentThread())));
-  }
-
-  /**
-   * Log a warning if the lock was held for too long.
-   *
-   * Should be invoked by the caller immediately AFTER releasing the lock.
-   *
-   * @param acquireTime  - timestamp just after acquiring the lock.
-   * @param releaseTime - timestamp just before releasing the lock.
-   */
-  private void check(long acquireTime, long releaseTime) {
-    if (!logger.isWarnEnabled()) {
-      return;
-    }
-
-    final long lockHeldTime = releaseTime - acquireTime;
-    if (lockWarningThreshold - lockHeldTime < 0) {
-      long now;
-      long localLastLogTs;
-      do {
-        now = clock.monotonicNow();
-        localLastLogTs = lastLogTimestamp.get();
-        long deltaSinceLastLog = now - localLastLogTs;
-        // check should print log or not
-        if (deltaSinceLastLog - minLoggingGap < 0) {
-          warningsSuppressed.incrementAndGet();
-          return;
-        }
-      } while (!lastLogTimestamp.compareAndSet(localLastLogTs, now));
-      long suppressed = warningsSuppressed.getAndSet(0);
-      logWarning(lockHeldTime, suppressed);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ce6fdc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedReentrantLock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedReentrantLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedReentrantLock.java
new file mode 100644
index 0000000..010571e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedReentrantLock.java
@@ -0,0 +1,195 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Timer;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * This is a debugging class that can be used by callers to track
+ * whether a specific lock is being held for too long and periodically
+ * log a warning and stack trace, if so.
+ *
+ * The logged warnings are throttled so that logs are not spammed.
+ *
+ * A new instance of InstrumentedLock can be created for each object
+ * that needs to be instrumented.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class InstrumentedReentrantLock implements Lock {
+
+  @VisibleForTesting
+  final ReentrantLock lock;
+  private final Log logger;
+  private final String name;
+  private final Timer clock;
+
+  /** Minimum gap between two lock warnings. */
+  private final long minLoggingGap;
+  /** Threshold for detecting long lock held time. */
+  private final long lockWarningThreshold;
+
+  // Tracking counters for lock statistics.
+  private volatile long lockAcquireTimestamp;
+  private final AtomicLong lastLogTimestamp;
+  private final AtomicLong warningsSuppressed = new AtomicLong(0);
+
+  /**
+   * Create a instrumented lock instance which logs a warning message
+   * when lock held time is above given threshold.
+   *
+   * @param name the identifier of the lock object
+   * @param logger this class does not have its own logger, will log to the
+   *               given logger instead
+   * @param minLoggingGapMs  the minimum time gap between two log messages,
+   *                         this is to avoid spamming to many logs
+   * @param lockWarningThresholdMs the time threshold to view lock held
+   *                               time as being "too long"
+   */
+  public InstrumentedReentrantLock(
+      String name, Log logger, long minLoggingGapMs,
+      long lockWarningThresholdMs) {
+    this(name, logger, new ReentrantLock(),
+        minLoggingGapMs, lockWarningThresholdMs);
+  }
+
+  public InstrumentedReentrantLock(
+      String name, Log logger, ReentrantLock lock,
+      long minLoggingGapMs, long lockWarningThresholdMs) {
+    this(name, logger, lock,
+        minLoggingGapMs, lockWarningThresholdMs, new Timer());
+  }
+
+  @VisibleForTesting
+  InstrumentedReentrantLock(
+      String name, Log logger, ReentrantLock lock,
+      long minLoggingGapMs, long lockWarningThresholdMs, Timer clock) {
+    this.name = name;
+    this.lock = lock;
+    this.clock = clock;
+    this.logger = logger;
+    minLoggingGap = minLoggingGapMs;
+    lockWarningThreshold = lockWarningThresholdMs;
+    lastLogTimestamp = new AtomicLong(
+      clock.monotonicNow() - Math.max(minLoggingGap, lockWarningThreshold));
+  }
+
+  @Override
+  public void lock() {
+    lock.lock();
+    if (lock.getHoldCount() == 1) {
+      lockAcquireTimestamp = clock.monotonicNow();
+    }
+  }
+
+  @Override
+  public void lockInterruptibly() throws InterruptedException {
+    lock.lockInterruptibly();
+    if (lock.getHoldCount() == 1) {
+      lockAcquireTimestamp = clock.monotonicNow();
+    }
+  }
+
+  @Override
+  public boolean tryLock() {
+    if (lock.tryLock() && lock.getHoldCount() == 1) {
+      lockAcquireTimestamp = clock.monotonicNow();
+      return true;
+    }
+    return false;
+  }
+
+  @Override
+  public boolean tryLock(long time, TimeUnit unit) throws InterruptedException {
+    if (lock.tryLock(time, unit) && lock.getHoldCount() == 1) {
+      lockAcquireTimestamp = clock.monotonicNow();
+      return true;
+    }
+    return false;
+  }
+
+  @Override
+  public void unlock() {
+    final boolean needReport = (lock.getHoldCount() == 1);
+    long localLockReleaseTime = clock.monotonicNow();
+    long localLockAcquireTime = lockAcquireTimestamp;
+    lock.unlock();
+    if (needReport) {
+      check(localLockAcquireTime, localLockReleaseTime);
+    }
+  }
+
+  @Override
+  public Condition newCondition() {
+    return lock.newCondition();
+  }
+
+  @VisibleForTesting
+  void logWarning(long lockHeldTime, long suppressed) {
+    logger.warn(String.format("Lock held time above threshold: " +
+        "lock identifier: %s " +
+        "lockHeldTimeMs=%d ms. Suppressed %d lock warnings. " +
+        "The stack trace is: %s" ,
+        name, lockHeldTime, suppressed,
+        StringUtils.getStackTrace(Thread.currentThread())));
+  }
+
+  /**
+   * Log a warning if the lock was held for too long.
+   *
+   * Should be invoked by the caller immediately AFTER releasing the lock.
+   *
+   * @param acquireTime  - timestamp just after acquiring the lock.
+   * @param releaseTime - timestamp just before releasing the lock.
+   */
+  private void check(long acquireTime, long releaseTime) {
+    if (!logger.isWarnEnabled()) {
+      return;
+    }
+
+    final long lockHeldTime = releaseTime - acquireTime;
+    if (lockWarningThreshold - lockHeldTime < 0) {
+      long now;
+      long localLastLogTs;
+      do {
+        now = clock.monotonicNow();
+        localLastLogTs = lastLogTimestamp.get();
+        long deltaSinceLastLog = now - localLastLogTs;
+        // check should print log or not
+        if (deltaSinceLastLog - minLoggingGap < 0) {
+          warningsSuppressed.incrementAndGet();
+          return;
+        }
+      } while (!lastLogTimestamp.compareAndSet(localLastLogTs, now));
+      long suppressed = warningsSuppressed.getAndSet(0);
+      logWarning(lockHeldTime, suppressed);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ce6fdc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 26a2e9f..ab31f25 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -58,7 +58,7 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.ExtendedBlockId;
-import org.apache.hadoop.hdfs.InstrumentedLock;
+import org.apache.hadoop.hdfs.InstrumentedReentrantLock;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@@ -266,7 +266,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     this.conf = conf;
     this.smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
     this.datasetLock = new AutoCloseableLock(
-        new InstrumentedLock(getClass().getName(), LOG,
+        new InstrumentedReentrantLock(getClass().getName(), LOG,
           conf.getTimeDuration(
             DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY,
             DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_DEFAULT,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ce6fdc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInstrumentedLock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInstrumentedLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInstrumentedLock.java
deleted file mode 100644
index f470688..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInstrumentedLock.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.concurrent.locks.Lock;
-
-import org.apache.hadoop.util.AutoCloseableLock;
-import org.apache.hadoop.util.Timer;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
-import static org.mockito.Mockito.*;
-import static org.junit.Assert.*;
-
-/**
- * A test class for InstrumentedLock.
- */
-public class TestInstrumentedLock {
-
-  static final Log LOG = LogFactory.getLog(TestInstrumentedLock.class);
-
-  @Rule public TestName name = new TestName();
-
-  /**
-   * Test exclusive access of the lock.
-   * @throws Exception
-   */
-  @Test(timeout=10000)
-  public void testMultipleThread() throws Exception {
-    String testname = name.getMethodName();
-    InstrumentedLock lock = new InstrumentedLock(testname, LOG, 0, 300);
-    lock.lock();
-    try {
-      Thread competingThread = new Thread() {
-        @Override
-        public void run() {
-          assertFalse(lock.tryLock());
-        }
-      };
-      competingThread.start();
-      competingThread.join();
-    } finally {
-      lock.unlock();
-    }
-  }
-
-  /**
-   * Test the correctness with try-with-resource syntax.
-   * @throws Exception
-   */
-  @Test(timeout=10000)
-  public void testTryWithResourceSyntax() throws Exception {
-    String testname = name.getMethodName();
-    final AtomicReference<Thread> lockThread = new AtomicReference<>(null);
-    Lock lock = new InstrumentedLock(testname, LOG, 0, 300) {
-      @Override
-      public void lock() {
-        super.lock();
-        lockThread.set(Thread.currentThread());
-      }
-      @Override
-      public void unlock() {
-        super.unlock();
-        lockThread.set(null);
-      }
-    };
-    AutoCloseableLock acl = new AutoCloseableLock(lock);
-    try (AutoCloseable localLock = acl.acquire()) {
-      assertEquals(acl, localLock);
-      Thread competingThread = new Thread() {
-        @Override
-        public void run() {
-          assertNotEquals(Thread.currentThread(), lockThread.get());
-          assertFalse(lock.tryLock());
-        }
-      };
-      competingThread.start();
-      competingThread.join();
-      assertEquals(Thread.currentThread(), lockThread.get());
-    }
-    assertNull(lockThread.get());
-  }
-
-  /**
-   * Test the lock logs warning when lock held time is greater than threshold
-   * and not log warning otherwise.
-   * @throws Exception
-   */
-  @Test(timeout=10000)
-  public void testLockLongHoldingReport() throws Exception {
-    String testname = name.getMethodName();
-    final AtomicLong time = new AtomicLong(0);
-    Timer mclock = new Timer() {
-      @Override
-      public long monotonicNow() {
-        return time.get();
-      }
-    };
-    Lock mlock = mock(Lock.class);
-
-    final AtomicLong wlogged = new AtomicLong(0);
-    final AtomicLong wsuppresed = new AtomicLong(0);
-    InstrumentedLock lock = new InstrumentedLock(
-        testname, LOG, mlock, 2000, 300, mclock) {
-      @Override
-      void logWarning(long lockHeldTime, long suppressed) {
-        wlogged.incrementAndGet();
-        wsuppresed.set(suppressed);
-      }
-    };
-
-    // do not log warning when the lock held time is short
-    lock.lock();   // t = 0
-    time.set(200);
-    lock.unlock(); // t = 200
-    assertEquals(0, wlogged.get());
-    assertEquals(0, wsuppresed.get());
-
-    lock.lock();   // t = 200
-    time.set(700);
-    lock.unlock(); // t = 700
-    assertEquals(1, wlogged.get());
-    assertEquals(0, wsuppresed.get());
-
-    // despite the lock held time is greater than threshold
-    // suppress the log warning due to the logging gap
-    // (not recorded in wsuppressed until next log message)
-    lock.lock();   // t = 700
-    time.set(1100);
-    lock.unlock(); // t = 1100
-    assertEquals(1, wlogged.get());
-    assertEquals(0, wsuppresed.get());
-
-    // log a warning message when the lock held time is greater the threshold
-    // and the logging time gap is satisfied. Also should display suppressed
-    // previous warnings.
-    time.set(2400);
-    lock.lock();   // t = 2400
-    time.set(2800);
-    lock.unlock(); // t = 2800
-    assertEquals(2, wlogged.get());
-    assertEquals(1, wsuppresed.get());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ce6fdc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInstrumentedReentrantLock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInstrumentedReentrantLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInstrumentedReentrantLock.java
new file mode 100644
index 0000000..3374b8a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInstrumentedReentrantLock.java
@@ -0,0 +1,177 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.hadoop.util.AutoCloseableLock;
+import org.apache.hadoop.util.FakeTimer;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+import static org.mockito.Mockito.*;
+import static org.junit.Assert.*;
+
+/**
+ * A test class for {@link InstrumentedReentrantLock}.
+ */
+public class TestInstrumentedReentrantLock {
+
+  static final Log LOG = LogFactory.getLog(TestInstrumentedReentrantLock.class);
+
+  @Rule public TestName name = new TestName();
+
+  /**
+   * Test exclusive access of the lock.
+   * @throws Exception
+   */
+  @Test(timeout=10000)
+  public void testMultipleThread() throws Exception {
+    String testname = name.getMethodName();
+    InstrumentedReentrantLock lock =
+        new InstrumentedReentrantLock(testname, LOG, 0, 300);
+    lock.lock();
+    try {
+      Thread competingThread = new Thread() {
+        @Override
+        public void run() {
+          assertFalse(lock.tryLock());
+        }
+      };
+      competingThread.start();
+      competingThread.join();
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  /**
+   * Test the correctness with try-with-resource syntax.
+   * @throws Exception
+   */
+  @Test(timeout=10000)
+  public void testTryWithResourceSyntax() throws Exception {
+    String testname = name.getMethodName();
+    final AtomicReference<Thread> lockThread = new AtomicReference<>(null);
+    Lock lock = new InstrumentedReentrantLock(testname, LOG, 0, 300) {
+      @Override
+      public void lock() {
+        super.lock();
+        lockThread.set(Thread.currentThread());
+      }
+      @Override
+      public void unlock() {
+        super.unlock();
+        lockThread.set(null);
+      }
+    };
+    AutoCloseableLock acl = new AutoCloseableLock(lock);
+    try (AutoCloseable localLock = acl.acquire()) {
+      assertEquals(acl, localLock);
+      Thread competingThread = new Thread() {
+        @Override
+        public void run() {
+          assertNotEquals(Thread.currentThread(), lockThread.get());
+          assertFalse(lock.tryLock());
+        }
+      };
+      competingThread.start();
+      competingThread.join();
+      assertEquals(Thread.currentThread(), lockThread.get());
+    }
+    assertNull(lockThread.get());
+  }
+
+  /**
+   * Test the lock logs warning when lock held time is greater than threshold
+   * and not log warning otherwise.
+   * @throws Exception
+   */
+  @Test(timeout=10000)
+  public void testLockLongHoldingReport() throws Exception {
+    String testname = name.getMethodName();
+    FakeTimer mclock = new FakeTimer();
+    final int warningThreshold = 500;
+    final int minLoggingGap = warningThreshold * 10;
+
+    final AtomicLong wlogged = new AtomicLong(0);
+    final AtomicLong wsuppresed = new AtomicLong(0);
+    InstrumentedReentrantLock lock = new InstrumentedReentrantLock(
+        testname, LOG, new ReentrantLock(), minLoggingGap,
+        warningThreshold, mclock) {
+      @Override
+      void logWarning(long lockHeldTime, long suppressed) {
+        wlogged.incrementAndGet();
+        wsuppresed.set(suppressed);
+      }
+    };
+
+    // do not log warning when the lock held time is <= warningThreshold.
+    lock.lock();
+    mclock.advance(warningThreshold);
+    lock.unlock();
+    assertEquals(0, wlogged.get());
+    assertEquals(0, wsuppresed.get());
+
+    // log a warning when the lock held time exceeds the threshold.
+    lock.lock();
+    mclock.advance(warningThreshold + 1);
+    assertEquals(1, lock.lock.getHoldCount());
+    lock.unlock();
+    assertEquals(1, wlogged.get());
+    assertEquals(0, wsuppresed.get());
+
+    // despite the lock held time is greater than threshold
+    // suppress the log warning due to the logging gap
+    // (not recorded in wsuppressed until next log message)
+    lock.lock();
+    mclock.advance(warningThreshold + 1);
+    lock.unlock();
+    assertEquals(1, wlogged.get());
+    assertEquals(0, wsuppresed.get());
+
+    // log a warning message when the lock held time is greater the threshold
+    // and the logging time gap is satisfied. Also should display suppressed
+    // previous warnings.
+    lock.lock();
+    mclock.advance(minLoggingGap + 1);
+    lock.unlock(); // t = 2800
+    assertEquals(2, wlogged.get());
+    assertEquals(1, wsuppresed.get());
+
+    // Ensure that nested acquisitions do not log.
+    wlogged.set(0);
+    wsuppresed.set(0);
+    lock.lock();
+    lock.lock();
+    mclock.advance(minLoggingGap + 1);
+    lock.unlock();
+    assertEquals(0, wlogged.get());    // No warnings on nested release.
+    assertEquals(0, wsuppresed.get());
+    lock.unlock();
+    assertEquals(1, wlogged.get());    // Last release immediately logs.
+    assertEquals(0, wsuppresed.get());
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/57] [abbrv] hadoop git commit: HADOOP-13663 Index out of range in SysInfoWindows. Contributed by Inigo Goiri

Posted by in...@apache.org.
HADOOP-13663 Index out of range in SysInfoWindows. Contributed by Inigo Goiri


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1518cb95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1518cb95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1518cb95

Branch: refs/heads/HDFS-10467
Commit: 1518cb9532cbedeada1b3d880f4ef1059301e828
Parents: 47f8092
Author: Steve Loughran <st...@apache.org>
Authored: Thu Sep 29 11:35:00 2016 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Thu Sep 29 11:35:00 2016 +0100

----------------------------------------------------------------------
 .../org/apache/hadoop/util/SysInfoWindows.java  | 58 +++++++++++---------
 .../apache/hadoop/util/TestSysInfoWindows.java  |  7 ++-
 2 files changed, 37 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1518cb95/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
index 490c127..e21adac 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
@@ -100,36 +100,40 @@ public class SysInfoWindows extends SysInfo {
       String sysInfoStr = getSystemInfoInfoFromShell();
       if (sysInfoStr != null) {
         final int sysInfoSplitCount = 11;
-        String[] sysInfo = sysInfoStr.substring(0, sysInfoStr.indexOf("\r\n"))
-            .split(",");
-        if (sysInfo.length == sysInfoSplitCount) {
-          try {
-            vmemSize = Long.parseLong(sysInfo[0]);
-            memSize = Long.parseLong(sysInfo[1]);
-            vmemAvailable = Long.parseLong(sysInfo[2]);
-            memAvailable = Long.parseLong(sysInfo[3]);
-            numProcessors = Integer.parseInt(sysInfo[4]);
-            cpuFrequencyKhz = Long.parseLong(sysInfo[5]);
-            cumulativeCpuTimeMs = Long.parseLong(sysInfo[6]);
-            storageBytesRead = Long.parseLong(sysInfo[7]);
-            storageBytesWritten = Long.parseLong(sysInfo[8]);
-            netBytesRead = Long.parseLong(sysInfo[9]);
-            netBytesWritten = Long.parseLong(sysInfo[10]);
-            if (lastCumCpuTimeMs != -1) {
-              /**
-               * This number will be the aggregated usage across all cores in
-               * [0.0, 100.0]. For example, it will be 400.0 if there are 8
-               * cores and each of them is running at 50% utilization.
-               */
-              cpuUsage = (cumulativeCpuTimeMs - lastCumCpuTimeMs)
-                  * 100F / refreshInterval;
+        int index = sysInfoStr.indexOf("\r\n");
+        if (index >= 0) {
+          String[] sysInfo = sysInfoStr.substring(0, index).split(",");
+          if (sysInfo.length == sysInfoSplitCount) {
+            try {
+              vmemSize = Long.parseLong(sysInfo[0]);
+              memSize = Long.parseLong(sysInfo[1]);
+              vmemAvailable = Long.parseLong(sysInfo[2]);
+              memAvailable = Long.parseLong(sysInfo[3]);
+              numProcessors = Integer.parseInt(sysInfo[4]);
+              cpuFrequencyKhz = Long.parseLong(sysInfo[5]);
+              cumulativeCpuTimeMs = Long.parseLong(sysInfo[6]);
+              storageBytesRead = Long.parseLong(sysInfo[7]);
+              storageBytesWritten = Long.parseLong(sysInfo[8]);
+              netBytesRead = Long.parseLong(sysInfo[9]);
+              netBytesWritten = Long.parseLong(sysInfo[10]);
+              if (lastCumCpuTimeMs != -1) {
+                /**
+                 * This number will be the aggregated usage across all cores in
+                 * [0.0, 100.0]. For example, it will be 400.0 if there are 8
+                 * cores and each of them is running at 50% utilization.
+                 */
+                cpuUsage = (cumulativeCpuTimeMs - lastCumCpuTimeMs)
+                    * 100F / refreshInterval;
+              }
+            } catch (NumberFormatException nfe) {
+              LOG.warn("Error parsing sysInfo", nfe);
             }
-          } catch (NumberFormatException nfe) {
-            LOG.warn("Error parsing sysInfo", nfe);
+          } else {
+            LOG.warn("Expected split length of sysInfo to be "
+                + sysInfoSplitCount + ". Got " + sysInfo.length);
           }
         } else {
-          LOG.warn("Expected split length of sysInfo to be "
-              + sysInfoSplitCount + ". Got " + sysInfo.length);
+          LOG.warn("Wrong output from sysInfo: " + sysInfoStr);
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1518cb95/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoWindows.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoWindows.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoWindows.java
index 5551576..fc99aeb 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoWindows.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoWindows.java
@@ -141,10 +141,15 @@ public class TestSysInfoWindows {
   @Test(timeout = 10000)
   public void errorInGetSystemInfo() {
     SysInfoWindowsMock tester = new SysInfoWindowsMock();
-    // info str derived from windows shell command has \r\n termination
+    // info str derived from windows shell command is null
     tester.setSysinfoString(null);
     // call a method to refresh values
     tester.getAvailablePhysicalMemorySize();
+
+    // info str derived from windows shell command with no \r\n termination
+    tester.setSysinfoString("");
+    // call a method to refresh values
+    tester.getAvailablePhysicalMemorySize();
   }
 
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/57] [abbrv] hadoop git commit: Revert "HADOOP-13081. add the ability to create multiple UGIs/subjects from one kerberos login. Contributed by Sergey Shelukhin."

Posted by in...@apache.org.
Revert "HADOOP-13081. add the ability to create multiple UGIs/subjects from one kerberos login. Contributed by Sergey Shelukhin."

This reverts commit 0458a2af6e925d023882714e8b7b0568eca7a775.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e0ea27e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e0ea27e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e0ea27e

Branch: refs/heads/HDFS-10467
Commit: 1e0ea27e9602efba102b2145d0240ecc9d5845a1
Parents: 236ac77
Author: Chris Nauroth <cn...@apache.org>
Authored: Thu Sep 29 13:59:09 2016 -0700
Committer: Chris Nauroth <cn...@apache.org>
Committed: Thu Sep 29 13:59:09 2016 -0700

----------------------------------------------------------------------
 .../hadoop/security/UserGroupInformation.java   | 29 +-------------------
 .../security/TestUserGroupInformation.java      | 27 ------------------
 2 files changed, 1 insertion(+), 55 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e0ea27e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index ed3a9d0..bcdfd53 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -38,7 +38,6 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -652,33 +651,7 @@ public class UserGroupInformation {
     }
     this.isKrbTkt = KerberosUtil.hasKerberosTicket(subject);
   }
-
-  /**
-   * Copies the Subject of this UGI and creates a new UGI with the new subject.
-   * This can be used to add credentials (e.g. tokens) to different copies of
-   * the same UGI, allowing multiple users with different tokens to reuse the
-   * UGI without re-authenticating with Kerberos.
-   * @return clone of the UGI with a new subject.
-   */
-  @InterfaceAudience.Public
-  @InterfaceStability.Evolving
-  public UserGroupInformation copySubjectAndUgi() {
-    Subject subj = getSubject();
-    // The ctor will set other fields automatically from the principals.
-    return new UserGroupInformation(new Subject(false, subj.getPrincipals(),
-        cloneCredentials(subj.getPublicCredentials()),
-        cloneCredentials(subj.getPrivateCredentials())));
-  }
-
-  private static Set<Object> cloneCredentials(Set<Object> old) {
-    Set<Object> set = new HashSet<>();
-    // Make sure Hadoop credentials objects do not reuse the maps.
-    for (Object o : old) {
-      set.add(o instanceof Credentials ? new Credentials((Credentials)o) : o);
-    }
-    return set;
-  }
-
+  
   /**
    * checks if logged in using kerberos
    * @return true if the subject logged via keytab or has a Kerberos TGT

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e0ea27e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
index e45d70d..09a5807 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
@@ -50,7 +50,6 @@ import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
 import java.util.ConcurrentModificationException;
 import java.util.LinkedHashSet;
-import java.util.List;
 import java.util.Set;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
@@ -899,32 +898,6 @@ public class TestUserGroupInformation {
     assertEquals(1, tokens.size());
   }
 
-  @Test(timeout = 30000)
-  public void testCopySubjectAndUgi() throws IOException {
-    SecurityUtil.setAuthenticationMethod(AuthenticationMethod.SIMPLE, conf);
-    UserGroupInformation.setConfiguration(conf);
-    UserGroupInformation u1 = UserGroupInformation.getLoginUser();
-    assertNotNull(u1);
-    @SuppressWarnings("unchecked")
-    Token<? extends TokenIdentifier> tmpToken = mock(Token.class);
-    u1.addToken(tmpToken);
-
-    UserGroupInformation u2 = u1.copySubjectAndUgi();
-    assertEquals(u1.getAuthenticationMethod(), u2.getAuthenticationMethod());
-    assertNotSame(u1.getSubject(), u2.getSubject());
-    Credentials c1 = u1.getCredentials(), c2 = u2.getCredentials();
-    List<Text> sc1 = c1.getAllSecretKeys(), sc2 = c2.getAllSecretKeys();
-    assertArrayEquals(sc1.toArray(new Text[0]), sc2.toArray(new Text[0]));
-    Collection<Token<? extends TokenIdentifier>> ts1 = c1.getAllTokens(),
-        ts2 = c2.getAllTokens();
-    assertArrayEquals(ts1.toArray(new Token[0]), ts2.toArray(new Token[0]));
-    @SuppressWarnings("unchecked")
-    Token<? extends TokenIdentifier> token = mock(Token.class);
-    u2.addToken(token);
-    assertTrue(u2.getCredentials().getAllTokens().contains(token));
-    assertFalse(u1.getCredentials().getAllTokens().contains(token));
-  }
-
   /**
    * This test checks a race condition between getting and adding tokens for
    * the current user.  Calling UserGroupInformation.getCurrentUser() returns


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[51/57] [abbrv] hadoop git commit: YARN-4855. Should check if node exists when replace nodelabels. Contributeed by Tao Jie

Posted by in...@apache.org.
YARN-4855. Should check if node exists when replace nodelabels. Contributeed by Tao Jie


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e130c30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e130c30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e130c30

Branch: refs/heads/HDFS-10467
Commit: 6e130c308cf1b97e8386b6a43c26d72d2850119c
Parents: 8285703
Author: Naganarasimha <na...@apache.org>
Authored: Mon Oct 3 02:02:26 2016 -0400
Committer: Naganarasimha <na...@apache.org>
Committed: Mon Oct 3 02:02:26 2016 -0400

----------------------------------------------------------------------
 .../ReplaceLabelsOnNodeRequest.java             |   8 ++
 ..._server_resourcemanager_service_protos.proto |   2 +-
 .../hadoop/yarn/client/cli/RMAdminCLI.java      |  39 ++++---
 .../hadoop/yarn/client/cli/TestRMAdminCLI.java  |   3 +-
 .../pb/ReplaceLabelsOnNodeRequestPBImpl.java    |  14 ++-
 .../server/resourcemanager/AdminService.java    |  46 +++++++++
 .../resourcemanager/TestRMAdminService.java     | 103 ++++++++++++++++++-
 7 files changed, 197 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e130c30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReplaceLabelsOnNodeRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReplaceLabelsOnNodeRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReplaceLabelsOnNodeRequest.java
index 28e261a..1b8e687 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReplaceLabelsOnNodeRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReplaceLabelsOnNodeRequest.java
@@ -44,4 +44,12 @@ public abstract class ReplaceLabelsOnNodeRequest {
   @Public
   @Evolving
   public abstract Map<NodeId, Set<String>> getNodeToLabels();
+
+  @Public
+  @Evolving
+  public abstract void setFailOnUnknownNodes(boolean failOnUnknownNodes);
+
+  @Public
+  @Evolving
+  public abstract boolean getFailOnUnknownNodes();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e130c30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
index b9f30db..16d8097 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
@@ -99,10 +99,10 @@ message RemoveFromClusterNodeLabelsResponseProto {
 
 message ReplaceLabelsOnNodeRequestProto {
   repeated NodeIdToLabelsNameProto nodeToLabels = 1;
+  optional bool failOnUnknownNodes = 2;
 }
 
 message ReplaceLabelsOnNodeResponseProto {
-  
 }
 
 message UpdateNodeLabelsResponseProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e130c30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index 7a898a1..640f8e3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -130,11 +130,13 @@ public class RMAdminCLI extends HAAdmin {
               new UsageInfo("<label1,label2,label3> (label splitted by \",\")",
                   "remove from cluster node labels"))
           .put("-replaceLabelsOnNode",
-              new UsageInfo(
+              new UsageInfo("[-failOnUnknownNodes] " +
                   "<\"node1[:port]=label1,label2 node2[:port]=label1,label2\">",
-                  "replace labels on nodes"
-                      + " (please note that we do not support specifying multiple"
-                      + " labels on a single host for now.)"))
+              "replace labels on nodes"
+                  + " (please note that we do not support specifying multiple"
+                  + " labels on a single host for now.)\n\t\t"
+                  + "[-failOnUnknownNodes] is optional, when we set this"
+                  + " option, it will fail if specified nodes are unknown."))
           .put("-directlyAccessNodeLabelStore",
               new UsageInfo("", "This is DEPRECATED, will be removed in future releases. Directly access node label store, "
                   + "with this option, all node label related operations"
@@ -246,8 +248,8 @@ public class RMAdminCLI extends HAAdmin {
         " [-addToClusterNodeLabels <\"label1(exclusive=true),"
             + "label2(exclusive=false),label3\">]" +
         " [-removeFromClusterNodeLabels <label1,label2,label3>]" +
-        " [-replaceLabelsOnNode <\"node1[:port]=label1,label2" +
-        " node2[:port]=label1\">]" +
+        " [-replaceLabelsOnNode [-failOnUnknownNodes] "
+            + "<\"node1[:port]=label1,label2 node2[:port]=label1\">]" +
         " [-directlyAccessNodeLabelStore]" +
         " [-refreshClusterMaxPriority]" +
         " [-updateNodeResource [NodeID] [MemSize] [vCores]" +
@@ -302,7 +304,7 @@ public class RMAdminCLI extends HAAdmin {
     return ClientRMProxy.createRMProxy(conf,
         ResourceManagerAdministrationProtocol.class);
   }
-  
+
   private int refreshQueues() throws IOException, YarnException {
     // Refresh the queue properties
     ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol();
@@ -657,14 +659,14 @@ public class RMAdminCLI extends HAAdmin {
     return map;
   }
 
-  private int replaceLabelsOnNodes(String args) throws IOException,
-      YarnException {
+  private int replaceLabelsOnNodes(String args, boolean failOnUnknownNodes)
+      throws IOException, YarnException {
     Map<NodeId, Set<String>> map = buildNodeLabelsMapFromStr(args);
-    return replaceLabelsOnNodes(map);
+    return replaceLabelsOnNodes(map, failOnUnknownNodes);
   }
 
-  private int replaceLabelsOnNodes(Map<NodeId, Set<String>> map)
-      throws IOException, YarnException {
+  private int replaceLabelsOnNodes(Map<NodeId, Set<String>> map,
+      boolean failOnUnknownNodes) throws IOException, YarnException {
     if (directlyAccessNodeLabelStore) {
       getNodeLabelManagerInstance(getConf()).replaceLabelsOnNode(map);
     } else {
@@ -672,11 +674,12 @@ public class RMAdminCLI extends HAAdmin {
           createAdminProtocol();
       ReplaceLabelsOnNodeRequest request =
           ReplaceLabelsOnNodeRequest.newInstance(map);
+      request.setFailOnUnknownNodes(failOnUnknownNodes);
       adminProtocol.replaceLabelsOnNode(request);
     }
     return 0;
   }
-  
+
   @Override
   public int run(String[] args) throws Exception {
     // -directlyAccessNodeLabelStore is a additional option for node label
@@ -783,8 +786,16 @@ public class RMAdminCLI extends HAAdmin {
           System.err.println(NO_MAPPING_ERR_MSG);
           printUsage("", isHAEnabled);
           exitCode = -1;
+        } else if ("-failOnUnknownNodes".equals(args[i])) {
+          if (i + 1 >= args.length) {
+            System.err.println(NO_MAPPING_ERR_MSG);
+            printUsage("", isHAEnabled);
+            exitCode = -1;
+          } else {
+            exitCode = replaceLabelsOnNodes(args[i + 1], true);
+          }
         } else {
-          exitCode = replaceLabelsOnNodes(args[i]);
+          exitCode = replaceLabelsOnNodes(args[i], false);
         }
       } else {
         exitCode = -1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e130c30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
index bea6e39..9e20a43 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
@@ -469,7 +469,7 @@ public class TestRMAdminCLI {
               "[username]] [-addToClusterNodeLabels " +
               "<\"label1(exclusive=true),label2(exclusive=false),label3\">] " +
               "[-removeFromClusterNodeLabels <label1,label2,label3>] " +
-              "[-replaceLabelsOnNode " +
+              "[-replaceLabelsOnNode [-failOnUnknownNodes] " +
               "<\"node1[:port]=label1,label2 node2[:port]=label1\">] " +
               "[-directlyAccessNodeLabelStore] [-refreshClusterMaxPriority] " +
               "[-updateNodeResource [NodeID] [MemSize] [vCores] " +
@@ -564,6 +564,7 @@ public class TestRMAdminCLI {
               + " [username]] [-addToClusterNodeLabels <\"label1(exclusive=true),"
                   + "label2(exclusive=false),label3\">]"
               + " [-removeFromClusterNodeLabels <label1,label2,label3>] [-replaceLabelsOnNode "
+              + "[-failOnUnknownNodes] "
               + "<\"node1[:port]=label1,label2 node2[:port]=label1\">] [-directlyAccessNodeLabelStore] "
               + "[-refreshClusterMaxPriority] "
               + "[-updateNodeResource [NodeID] [MemSize] [vCores] "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e130c30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java
index 22e561c..3b15b27 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java
@@ -146,10 +146,22 @@ public class ReplaceLabelsOnNodeRequestPBImpl extends
     nodeIdToLabels.putAll(map);
   }
 
+  @Override
+  public boolean getFailOnUnknownNodes() {
+    ReplaceLabelsOnNodeRequestProtoOrBuilder p = viaProto ? proto : builder;
+    return p.getFailOnUnknownNodes();
+  }
+
+  @Override
+  public void setFailOnUnknownNodes(boolean failOnUnknownNodes) {
+    maybeInitBuilder();
+    builder.setFailOnUnknownNodes(failOnUnknownNodes);
+  }
+
   private NodeIdProto convertToProtoFormat(NodeId t) {
     return ((NodeIdPBImpl) t).getProto();
   }
-  
+
   @Override
   public int hashCode() {
     assert false : "hashCode not designed";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e130c30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index db55264..33daf7f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -21,6 +21,9 @@ package org.apache.hadoop.yarn.server.resourcemanager;
 import java.io.IOException;
 import java.io.InputStream;
 import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -806,6 +809,49 @@ public class AdminService extends CompositeService implements
 
     ReplaceLabelsOnNodeResponse response =
         recordFactory.newRecordInstance(ReplaceLabelsOnNodeResponse.class);
+
+    if (request.getFailOnUnknownNodes()) {
+      // verify if nodes have registered to RM
+      List<NodeId> unknownNodes = new ArrayList<>();
+      for (NodeId requestedNode : request.getNodeToLabels().keySet()) {
+        boolean isKnown = false;
+        // both active and inactive nodes are recognized as known nodes
+        if (requestedNode.getPort() != 0) {
+          if (rmContext.getRMNodes().containsKey(requestedNode)
+              || rmContext.getInactiveRMNodes().containsKey(requestedNode)) {
+            isKnown = true;
+          }
+        } else {
+          for (NodeId knownNode : rmContext.getRMNodes().keySet()) {
+            if (knownNode.getHost().equals(requestedNode.getHost())) {
+              isKnown = true;
+              break;
+            }
+          }
+          if (!isKnown) {
+            for (NodeId knownNode : rmContext.getInactiveRMNodes().keySet()) {
+              if (knownNode.getHost().equals(requestedNode.getHost())) {
+                isKnown = true;
+                break;
+              }
+            }
+          }
+        }
+        if (!isKnown) {
+          unknownNodes.add(requestedNode);
+        }
+      }
+
+      if (!unknownNodes.isEmpty()) {
+        RMAuditLogger.logFailure(user.getShortUserName(), operation, "",
+            "AdminService",
+            "Failed to replace labels as there are unknown nodes:"
+                + Arrays.toString(unknownNodes.toArray()));
+        throw RPCUtil.getRemoteException(new IOException(
+            "Failed to replace labels as there are unknown nodes:"
+                + Arrays.toString(unknownNodes.toArray())));
+      }
+    }
     try {
       rmContext.getNodeLabelManager().replaceLabelsOnNode(
           request.getNodeToLabels());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e130c30/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
index 0b65c0b..a3022f7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
@@ -28,6 +28,7 @@ import java.io.IOException;
 import java.io.PrintWriter;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
@@ -64,9 +65,9 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequ
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.DynamicResourceConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -1086,6 +1087,106 @@ public class TestRMAdminService {
   }
 
   @Test
+  public void testModifyLabelsOnUnknownNodes() throws IOException,
+      YarnException {
+    // create RM and set it's ACTIVE, and set distributed node label
+    // configuration to true
+    rm = new MockRM();
+
+    ((RMContextImpl) rm.getRMContext())
+        .setHAServiceState(HAServiceState.ACTIVE);
+    Map<NodeId, RMNode> rmNodes = rm.getRMContext().getRMNodes();
+    rmNodes.put(NodeId.newInstance("host1", 1111),
+        new RMNodeImpl(null, rm.getRMContext(), "host1", 0, 0, null, null,
+                null));
+    rmNodes.put(NodeId.newInstance("host2", 2222),
+            new RMNodeImpl(null, rm.getRMContext(), "host2", 0, 0, null, null,
+                null));
+    rmNodes.put(NodeId.newInstance("host3", 3333),
+            new RMNodeImpl(null, rm.getRMContext(), "host3", 0, 0, null, null,
+                null));
+    Map<NodeId, RMNode> rmInactiveNodes = rm.getRMContext()
+        .getInactiveRMNodes();
+    rmInactiveNodes.put(NodeId.newInstance("host4", 4444),
+        new RMNodeImpl(null, rm.getRMContext(), "host4", 0, 0, null, null,
+                null));
+    RMNodeLabelsManager labelMgr = rm.rmContext.getNodeLabelManager();
+
+    // by default, distributed configuration for node label is disabled, this
+    // should pass
+    labelMgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x",
+        "y"));
+    // replace known node
+    ReplaceLabelsOnNodeRequest request1 = ReplaceLabelsOnNodeRequest
+        .newInstance(ImmutableMap.of(NodeId.newInstance("host1", 1111),
+            (Set<String>) ImmutableSet.of("x")));
+    request1.setFailOnUnknownNodes(true);
+    try {
+      rm.adminService.replaceLabelsOnNode(request1);
+    } catch (Exception ex) {
+      fail("should not fail on known node");
+    }
+
+    // replace known node with wildcard port
+    ReplaceLabelsOnNodeRequest request2 = ReplaceLabelsOnNodeRequest
+        .newInstance(ImmutableMap.of(NodeId.newInstance("host1", 0),
+            (Set<String>) ImmutableSet.of("x")));
+    request2.setFailOnUnknownNodes(true);
+    try {
+      rm.adminService.replaceLabelsOnNode(request2);
+    } catch (Exception ex) {
+      fail("should not fail on known node");
+    }
+
+    // replace unknown node
+    ReplaceLabelsOnNodeRequest request3 = ReplaceLabelsOnNodeRequest
+        .newInstance(ImmutableMap.of(NodeId.newInstance("host5", 0),
+            (Set<String>) ImmutableSet.of("x")));
+    request3.setFailOnUnknownNodes(true);
+    try {
+      rm.adminService.replaceLabelsOnNode(request3);
+      fail("Should fail on unknown node");
+    } catch (Exception ex) {
+    }
+
+    // replace known node but wrong port
+    ReplaceLabelsOnNodeRequest request4 = ReplaceLabelsOnNodeRequest
+        .newInstance(ImmutableMap.of(NodeId.newInstance("host2", 1111),
+            (Set<String>) ImmutableSet.of("x")));
+    request4.setFailOnUnknownNodes(true);
+    try {
+      rm.adminService.replaceLabelsOnNode(request4);
+      fail("Should fail on node with wrong port");
+    } catch (Exception ex) {
+    }
+
+    // replace non-exist node but not check
+    ReplaceLabelsOnNodeRequest request5 = ReplaceLabelsOnNodeRequest
+        .newInstance(ImmutableMap.of(NodeId.newInstance("host5", 0),
+            (Set<String>) ImmutableSet.of("x")));
+    request5.setFailOnUnknownNodes(false);
+    try {
+      rm.adminService.replaceLabelsOnNode(request5);
+    } catch (Exception ex) {
+      fail("Should not fail on unknown node when "
+          + "fail-on-unkown-nodes is set false");
+    }
+
+    // replace on inactive node
+    ReplaceLabelsOnNodeRequest request6 = ReplaceLabelsOnNodeRequest
+        .newInstance(ImmutableMap.of(NodeId.newInstance("host4", 0),
+            (Set<String>) ImmutableSet.of("x")));
+    request6.setFailOnUnknownNodes(true);
+    try {
+      rm.adminService.replaceLabelsOnNode(request6);
+    } catch (Exception ex) {
+      fail("should not fail on inactive node");
+    }
+
+    rm.close();
+  }
+
+  @Test
   public void testRemoveClusterNodeLabelsWithCentralizedConfigurationDisabled()
       throws IOException, YarnException {
     // create RM and set it's ACTIVE


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/57] [abbrv] hadoop git commit: HADOOP-13544. JDiff reports unncessarily show unannotated APIs and cause confusion while our javadocs only show annotated and public APIs. (vinodkv via wangda)

Posted by in...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/875062b5/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.7.2.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.7.2.xml b/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.7.2.xml
index 5ef99b2..47e64d8 100644
--- a/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.7.2.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.7.2.xml
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
 <!-- Generated by the JDiff Javadoc doclet -->
 <!-- (http://www.jdiff.org) -->
-<!-- on Thu Aug 18 16:00:16 PDT 2016 -->
+<!-- on Wed Aug 24 13:50:51 PDT 2016 -->
 
 <api
   xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
@@ -9,7 +9,7 @@
   name="Apache Hadoop Common 2.7.2"
   jdversion="1.0.9">
 
-<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/wtan/project/github/hadoop-common-trunk/hadoop-common-project/hadoop-common/target/hadoop-annotations.jar:/Users/wtan/project/github/hadoop-common-trunk/hadoop-common-project/hadoop-common/target/jdiff.jar -verbose -classpath /Users/wtan/project/github/hadoop-common-trunk/hadoop-common-project/hadoop-common/target/classes:/Users/wtan/project/github/hadoop-common-trunk/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_92.jdk/Contents/Home/lib/tools.jar:/Users/wtan/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/wtan/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/wtan/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/wtan/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/wtan/.m2/repository/commons-
 httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/wtan/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/wtan/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/wtan/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/wtan/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/wtan/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/wtan/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/wtan/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/wtan/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/wtan/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/wtan/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/wtan/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/wtan/.m2/repository/javax/xml/bind/jaxb-ap
 i/2.2.2/jaxb-api-2.2.2.jar:/Users/wtan/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/wtan/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/wtan/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/wtan/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/wtan/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/wtan/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/wtan/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/wtan/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/wtan/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/wtan/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/wtan/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/wtan/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.ja
 r:/Users/wtan/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/wtan/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/wtan/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/wtan/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/wtan/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/wtan/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/wtan/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/wtan/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/wtan/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/wtan/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/wtan/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1
 .jar:/Users/wtan/.m2/repository/org/apache/ant/ant/1.8.1/ant-1.8.1.jar:/Users/wtan/.m2/repository/org/apache/ant/ant-launcher/1.8.1/ant-launcher-1.8.1.jar:/Users/wtan/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/wtan/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/wtan/project/github/hadoop-common-trunk/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/wtan/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/wtan/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/wtan/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/wtan/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/wtan/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/wtan/.m2/repository/com/jcraft/j
 sch/0.1.42/jsch-0.1.42.jar:/Users/wtan/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/wtan/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/wtan/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/wtan/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/wtan/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/wtan/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/wtan/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/wtan/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar -sourcepath /Users/wtan/project/github/hadoop-common-trunk/hadoop-common-project/hadoop-common/src/main/java -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/wtan/project/github/hadoop-common-trunk/hadoop-common-project/hadoop-c
 ommon/target/hadoop-annotations.jar:/Users/wtan/project/github/hadoop-common-trunk/hadoop-common-project/hadoop-common/target/jdiff.jar -apidir /Users/wtan/project/github/hadoop-common-trunk/hadoop-common-project/hadoop-common/target/site/jdiff/xml -apiname Apache Hadoop Common 2.7.2 -->
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/jdiff.jar -verbose -classpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/classes:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_40.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/vinodkv/.m2/repository/xml
 enc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vinodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/vinodkv/.m2/repository/com
 /sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/vinodkv/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/vinodkv/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/
 httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/
 Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/.m2/repository/org/apache/ant/ant/1.8.1/ant-1.8.1.jar:/Users/vinodkv/.m2/repository/org/apache/ant/ant-launcher/1.8.1/ant-launcher-1.8.1.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/re
 pository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar -sourcepath /Users/vinodkv/Workspace/ec
 lipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/jdiff.jar -apidir /Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/site/jdiff/xml -apiname Apache Hadoop Common 2.7.2 -->
 <package name="org.apache.hadoop">
   <!-- start class org.apache.hadoop.HadoopIllegalArgumentException -->
   <class name="HadoopIllegalArgumentException" extends="java.lang.IllegalArgumentException"
@@ -1547,91 +1547,6 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.conf.Configuration -->
-  <!-- start class org.apache.hadoop.conf.Configuration.DeprecationDelta -->
-  <class name="Configuration.DeprecationDelta" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="DeprecationDelta" type="java.lang.String, java.lang.String, java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="DeprecationDelta" type="java.lang.String, java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getKey" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getNewKeys" return="java.lang.String[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getCustomMessage" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <doc>
-    <![CDATA[A pending addition to the global set of deprecated keys.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.conf.Configuration.DeprecationDelta -->
-  <!-- start class org.apache.hadoop.conf.Configuration.IntegerRanges -->
-  <class name="Configuration.IntegerRanges" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="java.lang.Iterable"/>
-    <constructor name="IntegerRanges"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="IntegerRanges" type="java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="isIncluded" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="value" type="int"/>
-      <doc>
-      <![CDATA[Is the given value in the set of ranges
- @param value the value to check
- @return is the value in the ranges?]]>
-      </doc>
-    </method>
-    <method name="isEmpty" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[@return true if there are no values in this range, else false.]]>
-      </doc>
-    </method>
-    <method name="toString" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="iterator" return="java.util.Iterator"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <doc>
-    <![CDATA[A class that represents a set of positive integer ranges. It parses
- strings of the form: "2-3,5,7-" where ranges are separated by comma and
- the lower/upper bounds are separated by dash. Either the lower or upper
- bound may be omitted meaning all values up to or over. So the string
- above means 2, 3, 5, and 7, 8, 9, ...]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.conf.Configuration.IntegerRanges -->
   <!-- start class org.apache.hadoop.conf.Configured -->
   <class name="Configured" extends="java.lang.Object"
     abstract="false"
@@ -1668,285 +1583,6 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.conf.Configured -->
-  <!-- start class org.apache.hadoop.conf.ConfServlet.BadFormatException -->
-  <class name="ConfServlet.BadFormatException" extends="java.lang.Exception"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="BadFormatException" type="java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-  </class>
-  <!-- end class org.apache.hadoop.conf.ConfServlet.BadFormatException -->
-  <!-- start interface org.apache.hadoop.conf.Reconfigurable -->
-  <interface name="Reconfigurable"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.conf.Configurable"/>
-    <method name="reconfigureProperty" return="java.lang.String"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="property" type="java.lang.String"/>
-      <param name="newVal" type="java.lang.String"/>
-      <exception name="ReconfigurationException" type="org.apache.hadoop.conf.ReconfigurationException"/>
-      <doc>
-      <![CDATA[Change a configuration property on this object to the value specified.
-
- Change a configuration property on this object to the value specified
- and return the previous value that the configuration property was set to
- (or null if it was not previously set). If newVal is null, set the property
- to its default value;
-
- If the property cannot be changed, throw a
- {@link ReconfigurationException}.]]>
-      </doc>
-    </method>
-    <method name="isPropertyReconfigurable" return="boolean"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="property" type="java.lang.String"/>
-      <doc>
-      <![CDATA[Return whether a given property is changeable at run time.
-
- If isPropertyReconfigurable returns true for a property,
- then changeConf should not throw an exception when changing
- this property.]]>
-      </doc>
-    </method>
-    <method name="getReconfigurableProperties" return="java.util.Collection"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Return all the properties that can be changed at run time.]]>
-      </doc>
-    </method>
-    <doc>
-    <![CDATA[Something whose {@link Configuration} can be changed at run time.]]>
-    </doc>
-  </interface>
-  <!-- end interface org.apache.hadoop.conf.Reconfigurable -->
-  <!-- start class org.apache.hadoop.conf.ReconfigurableBase -->
-  <class name="ReconfigurableBase" extends="org.apache.hadoop.conf.Configured"
-    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.conf.Reconfigurable"/>
-    <constructor name="ReconfigurableBase"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Construct a ReconfigurableBase.]]>
-      </doc>
-    </constructor>
-    <constructor name="ReconfigurableBase" type="org.apache.hadoop.conf.Configuration"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Construct a ReconfigurableBase with the {@link Configuration}
- conf.]]>
-      </doc>
-    </constructor>
-    <method name="setReconfigurationUtil"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="ru" type="org.apache.hadoop.conf.ReconfigurationUtil"/>
-    </method>
-    <method name="getChangedProperties" return="java.util.Collection"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="newConf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="oldConf" type="org.apache.hadoop.conf.Configuration"/>
-    </method>
-    <method name="startReconfigurationTask"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Start a reconfiguration task to reload configuration in background.]]>
-      </doc>
-    </method>
-    <method name="getReconfigurationTaskStatus" return="org.apache.hadoop.conf.ReconfigurationTaskStatus"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="shutdownReconfigurationTask"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="reconfigureProperty" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="true" visibility="public"
-      deprecated="not deprecated">
-      <param name="property" type="java.lang.String"/>
-      <param name="newVal" type="java.lang.String"/>
-      <exception name="ReconfigurationException" type="org.apache.hadoop.conf.ReconfigurationException"/>
-      <doc>
-      <![CDATA[{@inheritDoc}
-
- This method makes the change to this objects {@link Configuration}
- and calls reconfigurePropertyImpl to update internal data structures.
- This method cannot be overridden, subclasses should instead override
- reconfigureProperty.]]>
-      </doc>
-    </method>
-    <method name="getReconfigurableProperties" return="java.util.Collection"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[{@inheritDoc}
-
- Subclasses must override this.]]>
-      </doc>
-    </method>
-    <method name="isPropertyReconfigurable" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="property" type="java.lang.String"/>
-      <doc>
-      <![CDATA[{@inheritDoc}
-
- Subclasses may wish to override this with a more efficient implementation.]]>
-      </doc>
-    </method>
-    <method name="reconfigurePropertyImpl"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="property" type="java.lang.String"/>
-      <param name="newVal" type="java.lang.String"/>
-      <exception name="ReconfigurationException" type="org.apache.hadoop.conf.ReconfigurationException"/>
-      <doc>
-      <![CDATA[Change a configuration property.
-
- Subclasses must override this. This method applies the change to
- all internal data structures derived from the configuration property
- that is being changed. If this object owns other Reconfigurable objects
- reconfigureProperty should be called recursively to make sure that
- to make sure that the configuration of these objects is updated.]]>
-      </doc>
-    </method>
-    <doc>
-    <![CDATA[Utility base class for implementing the Reconfigurable interface.
-
- Subclasses should override reconfigurePropertyImpl to change individual
- properties and getReconfigurableProperties to get all properties that
- can be changed at run time.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.conf.ReconfigurableBase -->
-  <!-- start class org.apache.hadoop.conf.ReconfigurationException -->
-  <class name="ReconfigurationException" extends="java.lang.Exception"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="ReconfigurationException"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Create a new instance of {@link ReconfigurationException}.]]>
-      </doc>
-    </constructor>
-    <constructor name="ReconfigurationException" type="java.lang.String, java.lang.String, java.lang.String, java.lang.Throwable"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Create a new instance of {@link ReconfigurationException}.]]>
-      </doc>
-    </constructor>
-    <constructor name="ReconfigurationException" type="java.lang.String, java.lang.String, java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Create a new instance of {@link ReconfigurationException}.]]>
-      </doc>
-    </constructor>
-    <method name="getProperty" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Get property that cannot be changed.]]>
-      </doc>
-    </method>
-    <method name="getNewValue" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Get value to which property was supposed to be changed.]]>
-      </doc>
-    </method>
-    <method name="getOldValue" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Get old value of property that cannot be changed.]]>
-      </doc>
-    </method>
-    <doc>
-    <![CDATA[Exception indicating that configuration property cannot be changed
- at run time.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.conf.ReconfigurationException -->
-  <!-- start class org.apache.hadoop.conf.ReconfigurationServlet -->
-  <class name="ReconfigurationServlet" extends="javax.servlet.http.HttpServlet"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="ReconfigurationServlet"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="init"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="ServletException" type="javax.servlet.ServletException"/>
-    </method>
-    <method name="doGet"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
-      <param name="resp" type="javax.servlet.http.HttpServletResponse"/>
-      <exception name="ServletException" type="javax.servlet.ServletException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="doPost"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-      <param name="req" type="javax.servlet.http.HttpServletRequest"/>
-      <param name="resp" type="javax.servlet.http.HttpServletResponse"/>
-      <exception name="ServletException" type="javax.servlet.ServletException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <field name="CONF_SERVLET_RECONFIGURABLE_PREFIX" type="java.lang.String"
-      transient="false" volatile="false"
-      static="true" final="true" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <doc>
-    <![CDATA[A servlet for changing a node's configuration.
-
- Reloads the configuration file, verifies whether changes are
- possible and asks the admin to approve the change.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.conf.ReconfigurationServlet -->
   <!-- start class org.apache.hadoop.conf.ReconfigurationTaskStatus -->
   <class name="ReconfigurationTaskStatus" extends="java.lang.Object"
     abstract="false"
@@ -1992,221 +1628,13 @@
     </method>
   </class>
   <!-- end class org.apache.hadoop.conf.ReconfigurationTaskStatus -->
-  <!-- start class org.apache.hadoop.conf.ReconfigurationUtil -->
-  <class name="ReconfigurationUtil" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="ReconfigurationUtil"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getChangedProperties" return="java.util.Collection"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="newConf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="oldConf" type="org.apache.hadoop.conf.Configuration"/>
-    </method>
-    <method name="parseChangedProperties" return="java.util.Collection"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="newConf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="oldConf" type="org.apache.hadoop.conf.Configuration"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.conf.ReconfigurationUtil -->
-  <!-- start class org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange -->
-  <class name="ReconfigurationUtil.PropertyChange" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="PropertyChange" type="java.lang.String, java.lang.String, java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <field name="prop" type="java.lang.String"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <field name="oldVal" type="java.lang.String"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <field name="newVal" type="java.lang.String"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </field>
-  </class>
-  <!-- end class org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange -->
   <doc>
   <![CDATA[Configuration of system parameters.]]>
   </doc>
 </package>
 <package name="org.apache.hadoop.crypto">
-  <!-- start class org.apache.hadoop.crypto.UnsupportedCodecException -->
-  <class name="UnsupportedCodecException" extends="java.lang.RuntimeException"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="UnsupportedCodecException"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Default constructor]]>
-      </doc>
-    </constructor>
-    <constructor name="UnsupportedCodecException" type="java.lang.String"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Constructs an UnsupportedCodecException with the specified
- detail message.
-
- @param message the detail message]]>
-      </doc>
-    </constructor>
-    <constructor name="UnsupportedCodecException" type="java.lang.String, java.lang.Throwable"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Constructs a new exception with the specified detail message and
- cause.
-
- @param message the detail message
- @param cause the cause]]>
-      </doc>
-    </constructor>
-    <constructor name="UnsupportedCodecException" type="java.lang.Throwable"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[Constructs a new exception with the specified cause.
-
- @param cause the cause]]>
-      </doc>
-    </constructor>
-    <doc>
-    <![CDATA[Thrown to indicate that the specific codec is not supported.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.crypto.UnsupportedCodecException -->
 </package>
 <package name="org.apache.hadoop.crypto.key">
-  <!-- start class org.apache.hadoop.crypto.key.CachingKeyProvider -->
-  <class name="CachingKeyProvider" extends="org.apache.hadoop.crypto.key.KeyProviderExtension"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="CachingKeyProvider" type="org.apache.hadoop.crypto.key.KeyProvider, long, long"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getCurrentKey" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getKeyVersion" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="versionName" type="java.lang.String"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="deleteKey"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="rollNewVersion" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-      <param name="material" type="byte[]"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="rollNewVersion" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-      <exception name="NoSuchAlgorithmException" type="java.security.NoSuchAlgorithmException"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getMetadata" return="org.apache.hadoop.crypto.key.KeyProvider.Metadata"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <doc>
-    <![CDATA[A <code>KeyProviderExtension</code> implementation providing a short lived
- cache for <code>KeyVersions</code> and <code>Metadata</code>to avoid burst
- of requests to hit the underlying <code>KeyProvider</code>.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.crypto.key.CachingKeyProvider -->
-  <!-- start class org.apache.hadoop.crypto.key.JavaKeyStoreProvider.Factory -->
-  <class name="JavaKeyStoreProvider.Factory" extends="org.apache.hadoop.crypto.key.KeyProviderFactory"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="Factory"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="createProvider" return="org.apache.hadoop.crypto.key.KeyProvider"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="providerName" type="java.net.URI"/>
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <doc>
-    <![CDATA[The factory to create JksProviders, which is used by the ServiceLoader.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.crypto.key.JavaKeyStoreProvider.Factory -->
-  <!-- start class org.apache.hadoop.crypto.key.JavaKeyStoreProvider.KeyMetadata -->
-  <class name="JavaKeyStoreProvider.KeyMetadata" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="java.security.Key"/>
-    <implements name="java.io.Serializable"/>
-    <method name="getAlgorithm" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getFormat" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getEncoded" return="byte[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <doc>
-    <![CDATA[An adapter between a KeyStore Key and our Metadata. This is used to store
- the metadata in a KeyStore even though isn't really a key.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.crypto.key.JavaKeyStoreProvider.KeyMetadata -->
   <!-- start class org.apache.hadoop.crypto.key.KeyProvider -->
   <class name="KeyProvider" extends="java.lang.Object"
     abstract="true"
@@ -2523,1648 +1951,1558 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.crypto.key.KeyProvider -->
-  <!-- start class org.apache.hadoop.crypto.key.KeyProvider.KeyVersion -->
-  <class name="KeyProvider.KeyVersion" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
+  <!-- start class org.apache.hadoop.crypto.key.KeyProviderFactory -->
+  <class name="KeyProviderFactory" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
     deprecated="not deprecated">
-    <constructor name="KeyVersion" type="java.lang.String, java.lang.String, byte[]"
-      static="false" final="false" visibility="protected"
+    <constructor name="KeyProviderFactory"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
-    <method name="getName" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
+    <method name="createProvider" return="org.apache.hadoop.crypto.key.KeyProvider"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="providerName" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
     </method>
-    <method name="getVersionName" return="java.lang.String"
+    <method name="getProviders" return="java.util.List"
       abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
+      static="true" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
     </method>
-    <method name="getMaterial" return="byte[]"
+    <method name="get" return="org.apache.hadoop.crypto.key.KeyProvider"
       abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
+      static="true" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a KeyProvider based on a provided URI.
+
+ @param uri key provider URI
+ @param conf configuration to initialize the key provider
+ @return the key provider for the specified URI, or <code>NULL</code> if
+         a provider for the specified URI scheme could not be found.
+ @throws IOException thrown if the provider failed to initialize.]]>
+      </doc>
     </method>
-    <method name="toString" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
+    <field name="KEY_PROVIDER_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
       deprecated="not deprecated">
-    </method>
+    </field>
     <doc>
-    <![CDATA[The combination of both the key version name and the key material.]]>
+    <![CDATA[A factory to create a list of KeyProvider based on the path given in a
+ Configuration. It uses a service loader interface to find the available
+ KeyProviders and create them based on the list of URIs.]]>
     </doc>
   </class>
-  <!-- end class org.apache.hadoop.crypto.key.KeyProvider.KeyVersion -->
-  <!-- start class org.apache.hadoop.crypto.key.KeyProvider.Metadata -->
-  <class name="KeyProvider.Metadata" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
+  <!-- end class org.apache.hadoop.crypto.key.KeyProviderFactory -->
+</package>
+<package name="org.apache.hadoop.crypto.key.kms">
+</package>
+<package name="org.apache.hadoop.crypto.random">
+</package>
+<package name="org.apache.hadoop.fs">
+  <!-- start class org.apache.hadoop.fs.AbstractFileSystem -->
+  <class name="AbstractFileSystem" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
     deprecated="not deprecated">
-    <constructor name="Metadata" type="java.lang.String, int, java.lang.String, java.util.Map, java.util.Date, int"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </constructor>
-    <constructor name="Metadata" type="byte[]"
-      static="false" final="false" visibility="protected"
+    <constructor name="AbstractFileSystem" type="java.net.URI, java.lang.String, boolean, int"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="URISyntaxException" type="java.net.URISyntaxException"/>
       <doc>
-      <![CDATA[Deserialize a new metadata object from a set of bytes.
- @param bytes the serialized metadata
- @throws IOException]]>
+      <![CDATA[Constructor to be called by subclasses.
+
+ @param uri for this file system.
+ @param supportedScheme the scheme supported by the implementor
+ @param authorityNeeded if true then theURI must have authority, if false
+          then the URI must have null authority.
+
+ @throws URISyntaxException <code>uri</code> has syntax error]]>
       </doc>
     </constructor>
-    <method name="toString" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getDescription" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getCreated" return="java.util.Date"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getCipher" return="java.lang.String"
+    <method name="getStatistics" return="org.apache.hadoop.fs.FileSystem.Statistics"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
-    <method name="getAttributes" return="java.util.Map"
+    <method name="isValidName" return="boolean"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="src" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns true if the specified string is considered valid in the path part
+ of a URI by this file system.  The default implementation enforces the rules
+ of HDFS, but subclasses may override this method to implement specific
+ validation rules for specific file systems.
+
+ @param src String source filename to check, path part of the URI
+ @return boolean true if the specified string is considered valid]]>
+      </doc>
     </method>
-    <method name="getAlgorithm" return="java.lang.String"
+    <method name="createFileSystem" return="org.apache.hadoop.fs.AbstractFileSystem"
       abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
+      static="true" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
       <doc>
-      <![CDATA[Get the algorithm from the cipher.
- @return the algorithm name]]>
+      <![CDATA[Create a file system instance for the specified uri using the conf. The
+ conf is used to find the class name that implements the file system. The
+ conf is also passed to the file system for its configuration.
+
+ @param uri URI of the file system
+ @param conf Configuration for the file system
+
+ @return Returns the file system for the given URI
+
+ @throws UnsupportedFileSystemException file system for <code>uri</code> is
+           not found]]>
       </doc>
     </method>
-    <method name="getBitLength" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getVersions" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
+    <method name="getStatistics" return="org.apache.hadoop.fs.FileSystem.Statistics"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="protected"
       deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <doc>
+      <![CDATA[Get the statistics for a particular file system.
+
+ @param uri
+          used as key to lookup STATISTICS_TABLE. Only scheme and authority
+          part of the uri are used.
+ @return a statistics object]]>
+      </doc>
     </method>
-    <method name="addVersion" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
+    <method name="clearStatistics"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
-    <method name="serialize" return="byte[]"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
+    <method name="printStatistics"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="public"
       deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Serialize the metadata to a set of bytes.
- @return the serialized bytes
- @throws IOException]]>
+      <![CDATA[Prints statistics for all file systems.]]>
       </doc>
     </method>
-    <doc>
-    <![CDATA[Key metadata that is associated with the key.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.crypto.key.KeyProvider.Metadata -->
-  <!-- start class org.apache.hadoop.crypto.key.KeyProvider.Options -->
-  <class name="KeyProvider.Options" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="Options" type="org.apache.hadoop.conf.Configuration"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="setCipher" return="org.apache.hadoop.crypto.key.KeyProvider.Options"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
+    <method name="getAllStatistics" return="java.util.Map"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="protected"
       deprecated="not deprecated">
-      <param name="cipher" type="java.lang.String"/>
     </method>
-    <method name="setBitLength" return="org.apache.hadoop.crypto.key.KeyProvider.Options"
+    <method name="get" return="org.apache.hadoop.fs.AbstractFileSystem"
       abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
+      static="true" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="bitLength" type="int"/>
+      <param name="uri" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <doc>
+      <![CDATA[The main factory method for creating a file system. Get a file system for
+ the URI's scheme and authority. The scheme of the <code>uri</code>
+ determines a configuration property name,
+ <tt>fs.AbstractFileSystem.<i>scheme</i>.impl</tt> whose value names the
+ AbstractFileSystem class.
+
+ The entire URI and conf is passed to the AbstractFileSystem factory method.
+
+ @param uri for the file system to be created.
+ @param conf which is passed to the file system impl.
+
+ @return file system for the given URI.
+
+ @throws UnsupportedFileSystemException if the file system for
+           <code>uri</code> is not supported.]]>
+      </doc>
     </method>
-    <method name="setDescription" return="org.apache.hadoop.crypto.key.KeyProvider.Options"
+    <method name="checkScheme"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="description" type="java.lang.String"/>
+      <param name="uri" type="java.net.URI"/>
+      <param name="supportedScheme" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Check that the Uri's scheme matches
+ @param uri
+ @param supportedScheme]]>
+      </doc>
     </method>
-    <method name="setAttributes" return="org.apache.hadoop.crypto.key.KeyProvider.Options"
-      abstract="false" native="false" synchronized="false"
+    <method name="getUriDefaultPort" return="int"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="attributes" type="java.util.Map"/>
+      <doc>
+      <![CDATA[The default port of this file system.
+
+ @return default port of this file system's Uri scheme
+         A uri with a port of -1 => default port;]]>
+      </doc>
     </method>
-    <method name="getCipher" return="java.lang.String"
+    <method name="getUri" return="java.net.URI"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.
+
+ @return the uri of this file system.]]>
+      </doc>
     </method>
-    <method name="getBitLength" return="int"
+    <method name="checkPath"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Check that a Path belongs to this FileSystem.
+
+ If the path is fully qualified URI, then its scheme and authority
+ matches that of this file system. Otherwise the path must be
+ slash-relative name.
+
+ @throws InvalidPathException if the path is invalid]]>
+      </doc>
     </method>
-    <method name="getDescription" return="java.lang.String"
+    <method name="getUriPath" return="java.lang.String"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Get the path-part of a pathname. Checks that URI matches this file system
+ and that the path-part is a valid name.
+
+ @param p path
+
+ @return path-part of the Path p]]>
+      </doc>
     </method>
-    <method name="getAttributes" return="java.util.Map"
+    <method name="makeQualified" return="org.apache.hadoop.fs.Path"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Make the path fully qualified to this file system
+ @param path
+ @return the qualified path]]>
+      </doc>
     </method>
-    <method name="toString" return="java.lang.String"
+    <method name="getInitialWorkingDirectory" return="org.apache.hadoop.fs.Path"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-    </method>
-    <doc>
-    <![CDATA[Options when creating key objects.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.crypto.key.KeyProvider.Options -->
-  <!-- start interface org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension -->
-  <interface name="KeyProviderCryptoExtension.CryptoExtension"    abstract="true"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.crypto.key.KeyProviderExtension.Extension"/>
-    <method name="warmUpEncryptedKeys"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="keyNames" type="java.lang.String[]"/>
-      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Calls to this method allows the underlying KeyProvider to warm-up any
- implementation specific caches used to store the Encrypted Keys.
- @param keyNames Array of Key Names]]>
+      <![CDATA[Some file systems like LocalFileSystem have an initial workingDir
+ that is used as the starting workingDir. For other file systems
+ like HDFS there is no built in notion of an initial workingDir.
+
+ @return the initial workingDir if the file system has such a notion
+         otherwise return a null.]]>
       </doc>
     </method>
-    <method name="drain"
-      abstract="true" native="false" synchronized="false"
+    <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="keyName" type="java.lang.String"/>
       <doc>
-      <![CDATA[Drains the Queue for the provided key.
+      <![CDATA[Return the current user's home directory in this file system.
+ The default implementation returns "/user/$USER/".
 
- @param keyName the key to drain the Queue for]]>
+ @return current user's home directory.]]>
       </doc>
     </method>
-    <method name="generateEncryptedKey" return="org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion"
+    <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
       abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="encryptionKeyName" type="java.lang.String"/>
       <exception name="IOException" type="java.io.IOException"/>
-      <exception name="GeneralSecurityException" type="java.security.GeneralSecurityException"/>
       <doc>
-      <![CDATA[Generates a key material and encrypts it using the given key version name
- and initialization vector. The generated key material is of the same
- length as the <code>KeyVersion</code> material of the latest key version
- of the key and is encrypted using the same cipher.
- <p/>
- NOTE: The generated key is not stored by the <code>KeyProvider</code>
+      <![CDATA[Return a set of server default configuration values.
 
- @param encryptionKeyName
-          The latest KeyVersion of this key's material will be encrypted.
- @return EncryptedKeyVersion with the generated key material, the version
-         name is 'EEK' (for Encrypted Encryption Key)
- @throws IOException
-           thrown if the key material could not be generated
- @throws GeneralSecurityException
-           thrown if the key material could not be encrypted because of a
-           cryptographic issue.]]>
+ @return server default configuration values
+
+ @throws IOException an I/O error occurred]]>
       </doc>
     </method>
-    <method name="decryptEncryptedKey" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
-      abstract="true" native="false" synchronized="false"
+    <method name="resolvePath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="encryptedKeyVersion" type="org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion"/>
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
       <exception name="IOException" type="java.io.IOException"/>
-      <exception name="GeneralSecurityException" type="java.security.GeneralSecurityException"/>
       <doc>
-      <![CDATA[Decrypts an encrypted byte[] key material using the given a key version
- name and initialization vector.
-
- @param encryptedKeyVersion
-          contains keyVersionName and IV to decrypt the encrypted key
-          material
- @return a KeyVersion with the decrypted key material, the version name is
-         'EK' (For Encryption Key)
- @throws IOException
-           thrown if the key material could not be decrypted
- @throws GeneralSecurityException
-           thrown if the key material could not be decrypted because of a
-           cryptographic issue.]]>
+      <![CDATA[Return the fully-qualified path of path f resolving the path
+ through any internal symlinks or mount point
+ @param p path to be resolved
+ @return fully qualified path
+ @throws FileNotFoundException, AccessControlException, IOException
+         UnresolvedLinkException if symbolic link on path cannot be resolved
+          internally]]>
       </doc>
     </method>
-    <doc>
-    <![CDATA[CryptoExtension is a type of Extension that exposes methods to generate
- EncryptedKeys and to decrypt the same.]]>
-    </doc>
-  </interface>
-  <!-- end interface org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension -->
-  <!-- start class org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion -->
-  <class name="KeyProviderCryptoExtension.EncryptedKeyVersion" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="EncryptedKeyVersion" type="java.lang.String, java.lang.String, byte[], org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
-      static="false" final="false" visibility="protected"
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
       deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="createFlag" type="java.util.EnumSet"/>
+      <param name="opts" type="org.apache.hadoop.fs.Options.CreateOpts[]"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Create a new EncryptedKeyVersion.
-
- @param keyName                  Name of the encryption key used to
-                                 encrypt the encrypted key.
- @param encryptionKeyVersionName Version name of the encryption key used
-                                 to encrypt the encrypted key.
- @param encryptedKeyIv           Initialization vector of the encrypted
-                                 key. The IV of the encryption key used to
-                                 encrypt the encrypted key is derived from
-                                 this IV.
- @param encryptedKeyVersion      The encrypted encryption key version.]]>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#create(Path, EnumSet, Options.CreateOpts...)} except
+ that the Path f must be fully qualified and the permission is absolute
+ (i.e. umask has been applied).]]>
       </doc>
-    </constructor>
-    <method name="createForDecryption" return="org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+    </method>
+    <method name="createInternal" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="keyName" type="java.lang.String"/>
-      <param name="encryptionKeyVersionName" type="java.lang.String"/>
-      <param name="encryptedKeyIv" type="byte[]"/>
-      <param name="encryptedKeyMaterial" type="byte[]"/>
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="flag" type="java.util.EnumSet"/>
+      <param name="absolutePermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <param name="checksumOpt" type="org.apache.hadoop.fs.Options.ChecksumOpt"/>
+      <param name="createParent" type="boolean"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Factory method to create a new EncryptedKeyVersion that can then be
- passed into {@link #decryptEncryptedKey}. Note that the fields of the
- returned EncryptedKeyVersion will only partially be populated; it is not
- necessarily suitable for operations besides decryption.
-
- @param keyName Key name of the encryption key use to encrypt the
-                encrypted key.
- @param encryptionKeyVersionName Version name of the encryption key used
-                                 to encrypt the encrypted key.
- @param encryptedKeyIv           Initialization vector of the encrypted
-                                 key. The IV of the encryption key used to
-                                 encrypt the encrypted key is derived from
-                                 this IV.
- @param encryptedKeyMaterial     Key material of the encrypted key.
- @return EncryptedKeyVersion suitable for decryption.]]>
+      <![CDATA[The specification of this method matches that of
+ {@link #create(Path, EnumSet, Options.CreateOpts...)} except that the opts
+ have been declared explicitly.]]>
       </doc>
     </method>
-    <method name="getEncryptionKeyName" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
+    <method name="mkdir"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="dir" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="createParent" type="boolean"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[@return Name of the encryption key used to encrypt the encrypted key.]]>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#mkdir(Path, FsPermission, boolean)} except that the Path
+ f must be fully qualified and the permission is absolute (i.e.
+ umask has been applied).]]>
       </doc>
     </method>
-    <method name="getEncryptionKeyVersionName" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
+    <method name="delete" return="boolean"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="recursive" type="boolean"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[@return Version name of the encryption key used to encrypt the encrypted
- key.]]>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#delete(Path, boolean)} except that Path f must be for
+ this file system.]]>
       </doc>
     </method>
-    <method name="getEncryptedKeyIv" return="byte[]"
+    <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[@return Initialization vector of the encrypted key. The IV of the
- encryption key used to encrypt the encrypted key is derived from this
- IV.]]>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#open(Path)} except that Path f must be for this
+ file system.]]>
       </doc>
     </method>
-    <method name="getEncryptedKeyVersion" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
-      abstract="false" native="false" synchronized="false"
+    <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="bufferSize" type="int"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[@return The encrypted encryption key version.]]>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#open(Path, int)} except that Path f must be for this
+ file system.]]>
       </doc>
     </method>
-    <method name="deriveIV" return="byte[]"
+    <method name="truncate" return="boolean"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="protected"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="encryptedKeyIV" type="byte[]"/>
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="newLength" type="long"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Derive the initialization vector (IV) for the encryption key from the IV
- of the encrypted key. This derived IV is used with the encryption key to
- decrypt the encrypted key.
- <p/>
- The alternative to this is using the same IV for both the encryption key
- and the encrypted key. Even a simple symmetric transformation like this
- improves security by avoiding IV re-use. IVs will also be fairly unique
- among different EEKs.
-
- @param encryptedKeyIV of the encrypted key (i.e. {@link
- #getEncryptedKeyIv()})
- @return IV for the encryption key]]>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#truncate(Path, long)} except that Path f must be for
+ this file system.]]>
       </doc>
     </method>
-    <doc>
-    <![CDATA[An encrypted encryption key (EEK) and related information. An EEK must be
- decrypted using the key's encryption key before it can be used.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion -->
-  <!-- start class org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension -->
-  <class name="KeyProviderDelegationTokenExtension" extends="org.apache.hadoop.crypto.key.KeyProviderExtension"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="addDelegationTokens" return="org.apache.hadoop.security.token.Token[]"
-      abstract="false" native="false" synchronized="false"
+    <method name="setReplication" return="boolean"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="renewer" type="java.lang.String"/>
-      <param name="credentials" type="org.apache.hadoop.security.Credentials"/>
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="replication" type="short"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
       <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Passes the renewer and Credentials object to the underlying
- {@link DelegationTokenExtension}
- @param renewer the user allowed to renew the delegation tokens
- @param credentials cache in which to add new delegation tokens
- @return list of new delegation tokens
- @throws IOException thrown if IOException if an IO error occurs.]]>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#setReplication(Path, short)} except that Path f must be
+ for this file system.]]>
       </doc>
     </method>
-    <method name="createKeyProviderDelegationTokenExtension" return="org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension"
+    <method name="rename"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="true" visibility="public"
       deprecated="not deprecated">
-      <param name="keyProvider" type="org.apache.hadoop.crypto.key.KeyProvider"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Creates a <code>KeyProviderDelegationTokenExtension</code> using a given
- {@link KeyProvider}.
- <p/>
- If the given <code>KeyProvider</code> implements the
- {@link DelegationTokenExtension} interface the <code>KeyProvider</code>
- itself will provide the extension functionality, otherwise a default
- extension implementation will be used.
-
- @param keyProvider <code>KeyProvider</code> to use to create the
- <code>KeyProviderDelegationTokenExtension</code> extension.
- @return a <code>KeyProviderDelegationTokenExtension</code> instance
- using the given <code>KeyProvider</code>.]]>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
+ f must be for this file system.]]>
       </doc>
     </method>
-    <doc>
-    <![CDATA[A KeyProvider extension with the ability to add a renewer's Delegation
- Tokens to the provided Credentials.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension -->
-  <!-- start interface org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension.DelegationTokenExtension -->
-  <interface name="KeyProviderDelegationTokenExtension.DelegationTokenExtension"    abstract="true"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.crypto.key.KeyProviderExtension.Extension"/>
-    <method name="addDelegationTokens" return="org.apache.hadoop.security.token.Token[]"
+    <method name="renameInternal"
       abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="renewer" type="java.lang.String"/>
-      <param name="credentials" type="org.apache.hadoop.security.Credentials"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
       <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[The implementer of this class will take a renewer and add all
- delegation tokens associated with the renewer to the
- <code>Credentials</code> object if it is not already present,
- @param renewer the user allowed to renew the delegation tokens
- @param credentials cache in which to add new delegation tokens
- @return list of new delegation tokens
- @throws IOException thrown if IOException if an IO error occurs.]]>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
+ f must be for this file system and NO OVERWRITE is performed.
+
+ File systems that do not have a built in overwrite need implement only this
+ method and can take advantage of the default impl of the other
+ {@link #renameInternal(Path, Path, boolean)}]]>
       </doc>
     </method>
-    <doc>
-    <![CDATA[DelegationTokenExtension is a type of Extension that exposes methods to
- needed to work with Delegation Tokens.]]>
-    </doc>
-  </interface>
-  <!-- end interface org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension.DelegationTokenExtension -->
-  <!-- start class org.apache.hadoop.crypto.key.KeyProviderExtension -->
-  <class name="KeyProviderExtension" extends="org.apache.hadoop.crypto.key.KeyProvider"
-    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="KeyProviderExtension" type="org.apache.hadoop.crypto.key.KeyProvider, E"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="getExtension" return="E"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
-      deprecated="not deprecated">
-    </method>
-    <method name="getKeyProvider" return="org.apache.hadoop.crypto.key.KeyProvider"
+    <method name="renameInternal"
       abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="protected"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <param name="overwrite" type="boolean"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
+ f must be for this file system.]]>
+      </doc>
     </method>
-    <method name="isTransient" return="boolean"
+    <method name="supportsSymlinks" return="boolean"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns true if the file system supports symlinks, false otherwise.
+ @return true if filesystem supports symlinks]]>
+      </doc>
     </method>
-    <method name="getKeysMetadata" return="org.apache.hadoop.crypto.key.KeyProvider.Metadata[]"
+    <method name="createSymlink"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="names" type="java.lang.String[]"/>
+      <param name="target" type="org.apache.hadoop.fs.Path"/>
+      <param name="link" type="org.apache.hadoop.fs.Path"/>
+      <param name="createParent" type="boolean"/>
       <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#createSymlink(Path, Path, boolean)};]]>
+      </doc>
     </method>
-    <method name="getCurrentKey" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
+    <method name="getLinkTarget" return="org.apache.hadoop.fs.Path"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
       <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Partially resolves the path. This is used during symlink resolution in
+ {@link FSLinkResolver}, and differs from the similarly named method
+ {@link FileContext#getLinkTarget(Path)}.
+ @throws IOException subclass implementations may throw IOException]]>
+      </doc>
     </method>
-    <method name="createKey" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
-      abstract="false" native="false" synchronized="false"
+    <method name="setPermission"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-      <param name="options" type="org.apache.hadoop.crypto.key.KeyProvider.Options"/>
-      <exception name="NoSuchAlgorithmException" type="java.security.NoSuchAlgorithmException"/>
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
       <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#setPermission(Path, FsPermission)} except that Path f
+ must be for this file system.]]>
+      </doc>
     </method>
-    <method name="rollNewVersion" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
-      abstract="false" native="false" synchronized="false"
+    <method name="setOwner"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-      <exception name="NoSuchAlgorithmException" type="java.security.NoSuchAlgorithmException"/>
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="username" type="java.lang.String"/>
+      <param name="groupname" type="java.lang.String"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
       <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#setOwner(Path, String, String)} except that Path f must
+ be for this file system.]]>
+      </doc>
     </method>
-    <method name="getKeyVersion" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
-      abstract="false" native="false" synchronized="false"
+    <method name="setTimes"
+      abstract="true" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="versionName" type="java.lang.String"/>
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+    

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/57] [abbrv] hadoop git commit: YARN-5678. Log demand as demand in FSLeafQueue and FSParentQueue. (Yufei Gu via kasha)

Posted by in...@apache.org.
YARN-5678. Log demand as demand in FSLeafQueue and FSParentQueue. (Yufei Gu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82857037
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82857037
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82857037

Branch: refs/heads/HDFS-10467
Commit: 82857037b6e960dccdaf9e6b1b238411498a0dfe
Parents: fe9ebe2
Author: Karthik Kambatla <ka...@apache.org>
Authored: Sun Oct 2 22:09:43 2016 -0700
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Sun Oct 2 22:09:43 2016 -0700

----------------------------------------------------------------------
 .../server/resourcemanager/scheduler/fair/FSLeafQueue.java     | 2 +-
 .../server/resourcemanager/scheduler/fair/FSParentQueue.java   | 6 +++---
 2 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82857037/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index a6adb47..9d5bbe5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -295,7 +295,7 @@ public class FSLeafQueue extends FSQueue {
     Resource toAdd = sched.getDemand();
     if (LOG.isDebugEnabled()) {
       LOG.debug("Counting resource from " + sched.getName() + " " + toAdd
-          + "; Total resource consumption for " + getName() + " now "
+          + "; Total resource demand for " + getName() + " now "
           + demand);
     }
     demand = Resources.add(demand, toAdd);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82857037/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
index e58c3f1..d05390b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
@@ -158,13 +158,13 @@ public class FSParentQueue extends FSQueue {
       for (FSQueue childQueue : childQueues) {
         childQueue.updateDemand();
         Resource toAdd = childQueue.getDemand();
+        demand = Resources.add(demand, toAdd);
+        demand = Resources.componentwiseMin(demand, maxShare);
         if (LOG.isDebugEnabled()) {
           LOG.debug("Counting resource from " + childQueue.getName() + " " +
-              toAdd + "; Total resource consumption for " + getName() +
+              toAdd + "; Total resource demand for " + getName() +
               " now " + demand);
         }
-        demand = Resources.add(demand, toAdd);
-        demand = Resources.componentwiseMin(demand, maxShare);
         if (Resources.equals(demand, maxShare)) {
           break;
         }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/57] [abbrv] hadoop git commit: HDFS-10376. Enhance setOwner testing. (John Zhuge via Yongjun Zhang)

Posted by in...@apache.org.
HDFS-10376. Enhance setOwner testing. (John Zhuge via Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2acfb1e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2acfb1e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2acfb1e1

Branch: refs/heads/HDFS-10467
Commit: 2acfb1e1e4355246ef707b7c17964871b5dc7a73
Parents: 1831be8
Author: Yongjun Zhang <yz...@cloudera.com>
Authored: Tue Sep 27 14:55:28 2016 -0700
Committer: Yongjun Zhang <yz...@cloudera.com>
Committed: Tue Sep 27 14:55:28 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/security/TestPermission.java  | 131 +++++++++++++++++--
 1 file changed, 117 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2acfb1e1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
index 7efa255..e505642 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.security;
 
+import static org.hamcrest.CoreMatchers.startsWith;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -24,6 +25,7 @@ import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Random;
 
@@ -60,6 +62,11 @@ public class TestPermission {
   final private static Random RAN = new Random();
   final private static String USER_NAME = "user" + RAN.nextInt();
   final private static String[] GROUP_NAMES = {"group1", "group2"};
+  final private static String NOUSER = "nouser";
+  final private static String NOGROUP = "nogroup";
+
+  private FileSystem nnfs;
+  private FileSystem userfs;
 
   static FsPermission checkPermission(FileSystem fs,
       String path, FsPermission expected) throws IOException {
@@ -73,6 +80,12 @@ public class TestPermission {
     return s.getPermission();
   }
 
+  static Path createFile(FileSystem fs, String filename) throws IOException {
+    Path path = new Path(ROOT_PATH, filename);
+    fs.create(path);
+    return path;
+  }
+
   /**
    * Tests backward compatibility. Configuration can be
    * either set with old param dfs.umask that takes decimal umasks
@@ -190,17 +203,10 @@ public class TestPermission {
     cluster.waitActive();
 
     try {
-      FileSystem nnfs = FileSystem.get(conf);
+      nnfs = FileSystem.get(conf);
       // test permissions on files that do not exist
       assertFalse(nnfs.exists(CHILD_FILE1));
       try {
-        nnfs.setOwner(CHILD_FILE1, "foo", "bar");
-        assertTrue(false);
-      }
-      catch(java.io.FileNotFoundException e) {
-        LOG.info("GOOD: got " + e);
-      }
-      try {
         nnfs.setPermission(CHILD_FILE1, new FsPermission((short)0777));
         assertTrue(false);
       }
@@ -262,7 +268,7 @@ public class TestPermission {
       UserGroupInformation userGroupInfo = 
         UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES );
       
-      FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
+      userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
 
       // make sure mkdir of a existing directory that is not owned by 
       // this user does not throw an exception.
@@ -286,20 +292,117 @@ public class TestPermission {
       // test permissions on files that do not exist
       assertFalse(userfs.exists(CHILD_FILE3));
       try {
-        userfs.setOwner(CHILD_FILE3, "foo", "bar");
-        fail("setOwner should fail for non-exist file");
-      } catch (java.io.FileNotFoundException ignored) {
-      }
-      try {
         userfs.setPermission(CHILD_FILE3, new FsPermission((short) 0777));
         fail("setPermission should fail for non-exist file");
       } catch (java.io.FileNotFoundException ignored) {
       }
+
+      // Make sure any user can create file in root.
+      nnfs.setPermission(ROOT_PATH, new FsPermission("777"));
+
+      testSuperCanChangeOwnerGroup();
+      testNonSuperCanChangeToOwnGroup();
+      testNonSuperCannotChangeToOtherGroup();
+      testNonSuperCannotChangeGroupForOtherFile();
+      testNonSuperCannotChangeGroupForNonExistentFile();
+      testNonSuperCannotChangeOwner();
+      testNonSuperCannotChangeOwnerForOtherFile();
+      testNonSuperCannotChangeOwnerForNonExistentFile();
     } finally {
       cluster.shutdown();
     }
   }
 
+  private void testSuperCanChangeOwnerGroup() throws Exception {
+    Path file = createFile(userfs, "testSuperCanChangeOwnerGroup");
+    nnfs.setOwner(file, NOUSER, NOGROUP);
+    FileStatus status = nnfs.getFileStatus(file);
+    assertThat("A super user can change owner", status.getOwner(),
+        is(NOUSER));
+    assertThat("A super user can change group", status.getGroup(),
+        is(NOGROUP));
+  }
+
+  private void testNonSuperCanChangeToOwnGroup() throws Exception {
+    Path file = createFile(userfs, "testNonSuperCanChangeToOwnGroup");
+    userfs.setOwner(file, null, GROUP_NAMES[1]);
+    assertThat("A non-super user can change a file to own group",
+        nnfs.getFileStatus(file).getGroup(), is(GROUP_NAMES[1]));
+  }
+
+  private void testNonSuperCannotChangeToOtherGroup() throws Exception {
+    Path file = createFile(userfs, "testNonSuperCannotChangeToOtherGroup");
+    try {
+      userfs.setOwner(file, null, NOGROUP);
+      fail("Expect ACE when a non-super user tries to change a file to a " +
+          "group where the user does not belong.");
+    } catch (AccessControlException e) {
+      assertThat(e.getMessage(), startsWith("User does not belong to"));
+    }
+  }
+
+  private void testNonSuperCannotChangeGroupForOtherFile() throws Exception {
+    Path file = createFile(nnfs, "testNonSuperCannotChangeGroupForOtherFile");
+    nnfs.setPermission(file, new FsPermission("777"));
+    try {
+      userfs.setOwner(file, null, GROUP_NAMES[1]);
+      fail("Expect ACE when a non-super user tries to set group for a file " +
+          "not owned");
+    } catch (AccessControlException e) {
+      assertThat(e.getMessage(), startsWith("Permission denied"));
+    }
+  }
+
+  private void testNonSuperCannotChangeGroupForNonExistentFile()
+      throws Exception {
+    Path file = new Path(ROOT_PATH,
+        "testNonSuperCannotChangeGroupForNonExistentFile");
+    try {
+      userfs.setOwner(file, null, GROUP_NAMES[1]);
+      fail("Expect FNFE when a non-super user tries to change group for a " +
+          "non-existent file");
+    } catch (FileNotFoundException e) {
+    }
+  }
+
+  private void testNonSuperCannotChangeOwner() throws Exception {
+    Path file = createFile(userfs, "testNonSuperCannotChangeOwner");
+    try {
+      userfs.setOwner(file, NOUSER, null);
+      fail("Expect ACE when a non-super user tries to change owner");
+    } catch (AccessControlException e) {
+      assertThat(e.getMessage(), startsWith(
+          "Non-super user cannot change owner"));
+    }
+  }
+
+  private void testNonSuperCannotChangeOwnerForOtherFile() throws Exception {
+    Path file = createFile(nnfs, "testNonSuperCannotChangeOwnerForOtherFile");
+    nnfs.setPermission(file, new FsPermission("777"));
+    try {
+      userfs.setOwner(file, USER_NAME, null);
+      fail("Expect ACE when a non-super user tries to own a file");
+    } catch (AccessControlException e) {
+      assertThat(e.getMessage(), startsWith("Permission denied"));
+    }
+  }
+
+  private void testNonSuperCannotChangeOwnerForNonExistentFile()
+      throws Exception {
+    Path file = new Path(ROOT_PATH,
+        "testNonSuperCannotChangeOwnerForNonExistentFile");
+    assertFalse(userfs.exists(file));
+    try {
+      userfs.setOwner(file, NOUSER, null);
+      fail("Expect ACE or FNFE when a non-super user tries to change owner " +
+          "for a non-existent file");
+    } catch (AccessControlException e) {
+      assertThat(e.getMessage(), startsWith(
+          "Non-super user cannot change owner"));
+    } catch (FileNotFoundException e) {
+    }
+  }
+
   static boolean canMkdirs(FileSystem fs, Path p) throws IOException {
     try {
       fs.mkdirs(p);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[55/57] [abbrv] hadoop git commit: MAPREDUCE-6638. Do not attempt to recover progress from previous job attempts if spill encryption is enabled. (Haibo Chen via kasha)

Posted by in...@apache.org.
MAPREDUCE-6638. Do not attempt to recover progress from previous job attempts if spill encryption is enabled. (Haibo Chen via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de7a0a92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de7a0a92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de7a0a92

Branch: refs/heads/HDFS-10467
Commit: de7a0a92ca1983b35ca4beb7ab712fd700a9e6e0
Parents: 7442084
Author: Karthik Kambatla <ka...@cloudera.com>
Authored: Mon Oct 3 10:30:22 2016 -0700
Committer: Karthik Kambatla <ka...@cloudera.com>
Committed: Mon Oct 3 10:30:22 2016 -0700

----------------------------------------------------------------------
 .../hadoop/mapreduce/v2/app/MRAppMaster.java    | 90 ++++++++++++++------
 .../hadoop/mapreduce/v2/app/TestRecovery.java   | 66 ++++++++++++++
 2 files changed, 129 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de7a0a92/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index d94f8a5..4a8a90e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -149,7 +149,6 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager;
 import org.apache.hadoop.yarn.util.Clock;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.SystemClock;
 import org.apache.log4j.LogManager;
 
@@ -1303,44 +1302,77 @@ public class MRAppMaster extends CompositeService {
   }
 
   private void processRecovery() throws IOException{
-    if (appAttemptID.getAttemptId() == 1) {
-      return;  // no need to recover on the first attempt
+    boolean attemptRecovery = shouldAttemptRecovery();
+    boolean recoverySucceeded = true;
+    if (attemptRecovery) {
+      LOG.info("Attempting to recover.");
+      try {
+        parsePreviousJobHistory();
+      } catch (IOException e) {
+        LOG.warn("Unable to parse prior job history, aborting recovery", e);
+        recoverySucceeded = false;
+      }
+    }
+
+    if (!isFirstAttempt() && (!attemptRecovery || !recoverySucceeded)) {
+      amInfos.addAll(readJustAMInfos());
+    }
+  }
+
+  private boolean isFirstAttempt() {
+    return appAttemptID.getAttemptId() == 1;
+  }
+
+  /**
+   * Check if the current job attempt should try to recover from previous
+   * job attempts if any.
+   */
+  private boolean shouldAttemptRecovery() throws IOException {
+    if (isFirstAttempt()) {
+      return false;  // no need to recover on the first attempt
     }
 
     boolean recoveryEnabled = getConfig().getBoolean(
         MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,
         MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE_DEFAULT);
+    if (!recoveryEnabled) {
+      LOG.info("Not attempting to recover. Recovery disabled. To enable " +
+          "recovery, set " + MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE);
+      return false;
+    }
 
     boolean recoverySupportedByCommitter = isRecoverySupported();
+    if (!recoverySupportedByCommitter) {
+      LOG.info("Not attempting to recover. Recovery is not supported by " +
+          committer.getClass() + ". Use an OutputCommitter that supports" +
+              " recovery.");
+      return false;
+    }
 
-    // If a shuffle secret was not provided by the job client then this app
-    // attempt will generate one.  However that disables recovery if there
-    // are reducers as the shuffle secret would be app attempt specific.
-    int numReduceTasks = getConfig().getInt(MRJobConfig.NUM_REDUCES, 0);
+    int reducerCount = getConfig().getInt(MRJobConfig.NUM_REDUCES, 0);
+
+    // If a shuffle secret was not provided by the job client, one will be
+    // generated in this job attempt. However, that disables recovery if
+    // there are reducers as the shuffle secret would be job attempt specific.
     boolean shuffleKeyValidForRecovery =
         TokenCache.getShuffleSecretKey(jobCredentials) != null;
+    if (reducerCount > 0 && !shuffleKeyValidForRecovery) {
+      LOG.info("Not attempting to recover. The shuffle key is invalid for " +
+          "recovery.");
+      return false;
+    }
 
-    if (recoveryEnabled && recoverySupportedByCommitter
-        && (numReduceTasks <= 0 || shuffleKeyValidForRecovery)) {
-      LOG.info("Recovery is enabled. "
-          + "Will try to recover from previous life on best effort basis.");
-      try {
-        parsePreviousJobHistory();
-      } catch (IOException e) {
-        LOG.warn("Unable to parse prior job history, aborting recovery", e);
-        // try to get just the AMInfos
-        amInfos.addAll(readJustAMInfos());
-      }
-    } else {
-      LOG.info("Will not try to recover. recoveryEnabled: "
-            + recoveryEnabled + " recoverySupportedByCommitter: "
-            + recoverySupportedByCommitter + " numReduceTasks: "
-            + numReduceTasks + " shuffleKeyValidForRecovery: "
-            + shuffleKeyValidForRecovery + " ApplicationAttemptID: "
-            + appAttemptID.getAttemptId());
-      // Get the amInfos anyways whether recovery is enabled or not
-      amInfos.addAll(readJustAMInfos());
+    // If the intermediate data is encrypted, recovering the job requires the
+    // access to the key. Until the encryption key is persisted, we should
+    // avoid attempts to recover.
+    boolean spillEncrypted = CryptoUtils.isEncryptedSpillEnabled(getConfig());
+    if (reducerCount > 0 && spillEncrypted) {
+      LOG.info("Not attempting to recover. Intermediate spill encryption" +
+          " is enabled.");
+      return false;
     }
+
+    return true;
   }
 
   private static FSDataInputStream getPreviousJobHistoryStream(
@@ -1440,6 +1472,10 @@ public class MRAppMaster extends CompositeService {
     return amInfos;
   }
 
+  public boolean recovered() {
+    return recoveredJobStartTime > 0;
+  }
+
   /**
    * This can be overridden to instantiate multiple jobs and create a 
    * workflow.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de7a0a92/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
index 9d5f0ae..071575a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
@@ -579,6 +579,72 @@ public class TestRecovery {
     app.verifyCompleted();
   }
 
+  @Test
+  public void testRecoveryWithSpillEncryption() throws Exception {
+    int runCount = 0;
+    MRApp app = new MRAppWithHistory(1, 1, false, this.getClass().getName(),
+        true, ++runCount) {
+    };
+    Configuration conf = new Configuration();
+    conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
+    conf.setBoolean("mapred.mapper.new-api", true);
+    conf.setBoolean("mapred.reducer.new-api", true);
+    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+    conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
+    conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true);
+
+    // run the MR job at the first attempt
+    Job jobAttempt1 = app.submit(conf);
+    app.waitForState(jobAttempt1, JobState.RUNNING);
+
+    Iterator<Task> tasks = jobAttempt1.getTasks().values().iterator();
+
+    // finish the map task but the reduce task
+    Task mapper = tasks.next();
+    app.waitForState(mapper, TaskState.RUNNING);
+    TaskAttempt mapAttempt = mapper.getAttempts().values().iterator().next();
+    app.waitForState(mapAttempt, TaskAttemptState.RUNNING);
+    app.getContext().getEventHandler().handle(
+        new TaskAttemptEvent(mapAttempt.getID(), TaskAttemptEventType.TA_DONE));
+    app.waitForState(mapper, TaskState.SUCCEEDED);
+
+    // crash the first attempt of the MR job
+    app.stop();
+
+    // run the MR job again at the second attempt
+    app = new MRAppWithHistory(1, 1, false, this.getClass().getName(), false,
+        ++runCount);
+    Job jobAttempt2 = app.submit(conf);
+    Assert.assertTrue("Recovery from previous job attempt is processed even " +
+        "though intermediate data encryption is enabled.", !app.recovered());
+
+    // The map task succeeded from previous job attempt will not be recovered
+    // because the data spill encryption is enabled.
+    // Let's finish the job at the second attempt and verify its completion.
+    app.waitForState(jobAttempt2, JobState.RUNNING);
+    tasks = jobAttempt2.getTasks().values().iterator();
+    mapper = tasks.next();
+    Task reducer = tasks.next();
+
+    // finish the map task first
+    app.waitForState(mapper, TaskState.RUNNING);
+    mapAttempt = mapper.getAttempts().values().iterator().next();
+    app.waitForState(mapAttempt, TaskAttemptState.RUNNING);
+    app.getContext().getEventHandler().handle(
+        new TaskAttemptEvent(mapAttempt.getID(), TaskAttemptEventType.TA_DONE));
+    app.waitForState(mapper, TaskState.SUCCEEDED);
+
+    // then finish the reduce task
+    TaskAttempt redAttempt = reducer.getAttempts().values().iterator().next();
+    app.waitForState(redAttempt, TaskAttemptState.RUNNING);
+    app.getContext().getEventHandler().handle(
+        new TaskAttemptEvent(redAttempt.getID(), TaskAttemptEventType.TA_DONE));
+    app.waitForState(reducer, TaskState.SUCCEEDED);
+
+    // verify that the job succeeds at the 2rd attempt
+    app.waitForState(jobAttempt2, JobState.SUCCEEDED);
+  }
+
   /**
    * This test case primarily verifies if the recovery is controlled through config
    * property. In this case, recover is turned OFF. AM with 3 maps and 0 reduce.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/57] [abbrv] hadoop git commit: HADOOP-13658. Replace config key literal strings with names I: hadoop common. Contributed by Chen Liang

Posted by in...@apache.org.
HADOOP-13658. Replace config key literal strings with names I: hadoop common. Contributed by Chen Liang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a44a832
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a44a832
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a44a832

Branch: refs/heads/HDFS-10467
Commit: 9a44a832a99eb967aa4e34338dfa75baf35f9845
Parents: 2acfb1e
Author: Mingliang Liu <li...@apache.org>
Authored: Tue Sep 27 17:36:14 2016 -0700
Committer: Mingliang Liu <li...@apache.org>
Committed: Tue Sep 27 17:36:53 2016 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/fs/FileSystem.java  |  3 ++-
 .../org/apache/hadoop/fs/ftp/FTPFileSystem.java     |  6 +++---
 .../hadoop/fs/shell/CommandWithDestination.java     |  5 ++++-
 .../java/org/apache/hadoop/io/BloomMapFile.java     | 11 +++++++++--
 .../src/main/java/org/apache/hadoop/io/IOUtils.java |  9 +++++++--
 .../src/main/java/org/apache/hadoop/io/MapFile.java |  6 +++++-
 .../java/org/apache/hadoop/io/SequenceFile.java     | 16 +++++++++++++---
 .../org/apache/hadoop/io/compress/BZip2Codec.java   |  9 +++++++--
 .../org/apache/hadoop/io/compress/DefaultCodec.java |  9 +++++++--
 .../org/apache/hadoop/io/compress/GzipCodec.java    |  9 ++++++---
 .../apache/hadoop/io/file/tfile/Compression.java    | 14 ++++++++++----
 .../org/apache/hadoop/net/SocksSocketFactory.java   |  4 +++-
 .../java/org/apache/hadoop/util/LineReader.java     |  6 ++++--
 .../main/java/org/apache/hadoop/util/hash/Hash.java |  6 +++++-
 .../hadoop/fs/contract/ContractTestUtils.java       |  9 +++++----
 15 files changed, 90 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a44a832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 5939f97..c36598f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -2944,7 +2944,8 @@ public abstract class FileSystem extends Configured implements Closeable {
         }
         fs.key = key;
         map.put(key, fs);
-        if (conf.getBoolean("fs.automatic.close", true)) {
+        if (conf.getBoolean(
+            FS_AUTOMATIC_CLOSE_KEY, FS_AUTOMATIC_CLOSE_DEFAULT)) {
           toAutoClose.add(key);
         }
         return fs;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a44a832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
index f1afacd..28b07e8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
@@ -105,13 +105,13 @@ public class FTPFileSystem extends FileSystem {
     // get port information from uri, (overrides info in conf)
     int port = uri.getPort();
     port = (port == -1) ? FTP.DEFAULT_PORT : port;
-    conf.setInt("fs.ftp.host.port", port);
+    conf.setInt(FS_FTP_HOST_PORT, port);
 
     // get user/password information from URI (overrides info in conf)
     String userAndPassword = uri.getUserInfo();
     if (userAndPassword == null) {
-      userAndPassword = (conf.get("fs.ftp.user." + host, null) + ":" + conf
-          .get("fs.ftp.password." + host, null));
+      userAndPassword = (conf.get(FS_FTP_USER_PREFIX + host, null) + ":" + conf
+          .get(FS_FTP_PASSWORD_PREFIX + host, null));
     }
     String[] userPasswdInfo = userAndPassword.split(":");
     Preconditions.checkState(userPasswdInfo.length > 1,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a44a832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
index 5fcfdf8..2ab0a26 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
@@ -46,6 +46,8 @@ import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.IOUtils;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
 import static org.apache.hadoop.fs.CreateFlag.CREATE;
 import static org.apache.hadoop.fs.CreateFlag.LAZY_PERSIST;
 
@@ -497,7 +499,8 @@ abstract class CommandWithDestination extends FsCommand {
                         FsPermission.getFileDefault().applyUMask(
                             FsPermission.getUMask(getConf())),
                         createFlags,
-                        getConf().getInt("io.file.buffer.size", 4096),
+                        getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
+                            IO_FILE_BUFFER_SIZE_DEFAULT),
                         lazyPersist ? 1 : getDefaultReplication(item.path),
                         getDefaultBlockSize(),
                         null,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a44a832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BloomMapFile.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BloomMapFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BloomMapFile.java
index 1b3857e..d4514c6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BloomMapFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BloomMapFile.java
@@ -37,6 +37,11 @@ import org.apache.hadoop.util.bloom.Filter;
 import org.apache.hadoop.util.bloom.Key;
 import org.apache.hadoop.util.hash.Hash;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOOM_ERROR_RATE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOOM_ERROR_RATE_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOOM_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOOM_SIZE_KEY;
+
 /**
  * This class extends {@link MapFile} and provides very much the same
  * functionality. However, it uses dynamic Bloom filters to provide
@@ -159,13 +164,15 @@ public class BloomMapFile {
     }
 
     private synchronized void initBloomFilter(Configuration conf) {
-      numKeys = conf.getInt("io.mapfile.bloom.size", 1024 * 1024);
+      numKeys = conf.getInt(
+          IO_MAPFILE_BLOOM_SIZE_KEY, IO_MAPFILE_BLOOM_SIZE_DEFAULT);
       // vector size should be <code>-kn / (ln(1 - c^(1/k)))</code> bits for
       // single key, where <code> is the number of hash functions,
       // <code>n</code> is the number of keys and <code>c</code> is the desired
       // max. error rate.
       // Our desired error rate is by default 0.005, i.e. 0.5%
-      float errorRate = conf.getFloat("io.mapfile.bloom.error.rate", 0.005f);
+      float errorRate = conf.getFloat(
+          IO_MAPFILE_BLOOM_ERROR_RATE_KEY, IO_MAPFILE_BLOOM_ERROR_RATE_DEFAULT);
       vectorSize = (int)Math.ceil((double)(-HASH_COUNT * numKeys) /
           Math.log(1.0 - Math.pow(errorRate, 1.0/HASH_COUNT)));
       bloomFilter = new DynamicBloomFilter(vectorSize, HASH_COUNT,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a44a832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index e6749b7..0d2e797 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -38,6 +38,9 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.ChunkedArrayList;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
 /**
  * An utility class for I/O related functionality. 
  */
@@ -105,7 +108,8 @@ public class IOUtils {
    */
   public static void copyBytes(InputStream in, OutputStream out, Configuration conf)
     throws IOException {
-    copyBytes(in, out, conf.getInt("io.file.buffer.size", 4096), true);
+    copyBytes(in, out, conf.getInt(
+        IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT), true);
   }
   
   /**
@@ -119,7 +123,8 @@ public class IOUtils {
    */
   public static void copyBytes(InputStream in, OutputStream out, Configuration conf, boolean close)
     throws IOException {
-    copyBytes(in, out, conf.getInt("io.file.buffer.size", 4096),  close);
+    copyBytes(in, out, conf.getInt(
+        IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT),  close);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a44a832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
index 96a4189..908a893 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
@@ -38,6 +38,9 @@ import org.apache.hadoop.util.Options;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SKIP_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SKIP_KEY;
+
 /** A file-based map from keys to values.
  * 
  * <p>A map is a directory containing two files, the <code>data</code> file,
@@ -395,7 +398,8 @@ public class MapFile {
         Options.getOption(ComparatorOption.class, opts);
       WritableComparator comparator =
         comparatorOption == null ? null : comparatorOption.getValue();
-      INDEX_SKIP = conf.getInt("io.map.index.skip", 0);
+      INDEX_SKIP = conf.getInt(
+          IO_MAP_INDEX_SKIP_KEY, IO_MAP_INDEX_SKIP_DEFAULT);
       open(dir, comparator, conf, opts);
     }
  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a44a832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
index d8bec48..2ac1389 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
@@ -51,6 +51,13 @@ import org.apache.hadoop.util.MergeSort;
 import org.apache.hadoop.util.PriorityQueue;
 import org.apache.hadoop.util.Time;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SEQFILE_COMPRESS_BLOCKSIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SEQFILE_COMPRESS_BLOCKSIZE_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSUM_ERRORS_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSUM_ERRORS_KEY;
+
 /** 
  * <code>SequenceFile</code>s are flat files consisting of binary key/value 
  * pairs.
@@ -1513,7 +1520,9 @@ public class SequenceFile {
                         Option... options) throws IOException {
       super(conf, options);
       compressionBlockSize = 
-        conf.getInt("io.seqfile.compress.blocksize", 1000000);
+        conf.getInt(IO_SEQFILE_COMPRESS_BLOCKSIZE_KEY,
+            IO_SEQFILE_COMPRESS_BLOCKSIZE_DEFAULT
+        );
       keySerializer.close();
       keySerializer.open(keyBuffer);
       uncompressedValSerializer.close();
@@ -1637,7 +1646,7 @@ public class SequenceFile {
 
   /** Get the configured buffer size */
   private static int getBufferSize(Configuration conf) {
-    return conf.getInt("io.file.buffer.size", 4096);
+    return conf.getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT);
   }
 
   /** Reads key/value pairs from a sequence-format file. */
@@ -2655,7 +2664,8 @@ public class SequenceFile {
 
     private void handleChecksumException(ChecksumException e)
       throws IOException {
-      if (this.conf.getBoolean("io.skip.checksum.errors", false)) {
+      if (this.conf.getBoolean(
+          IO_SKIP_CHECKSUM_ERRORS_KEY, IO_SKIP_CHECKSUM_ERRORS_DEFAULT)) {
         LOG.warn("Bad checksum at "+getPosition()+". Skipping entries.");
         sync(getPosition()+this.conf.getInt("io.bytes.per.checksum", 512));
       } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a44a832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
index bf78e0c..08b4d4d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
@@ -35,6 +35,9 @@ import org.apache.hadoop.io.compress.bzip2.CBZip2InputStream;
 import org.apache.hadoop.io.compress.bzip2.CBZip2OutputStream;
 import org.apache.hadoop.io.compress.bzip2.Bzip2Factory;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
 /**
  * This class provides output and input streams for bzip2 compression
  * and decompression.  It uses the native bzip2 library on the system
@@ -120,7 +123,8 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
       Compressor compressor) throws IOException {
     return Bzip2Factory.isNativeBzip2Loaded(conf) ?
       new CompressorStream(out, compressor, 
-                           conf.getInt("io.file.buffer.size", 4*1024)) :
+                           conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
+                                   IO_FILE_BUFFER_SIZE_DEFAULT)) :
       new BZip2CompressionOutputStream(out);
   }
 
@@ -174,7 +178,8 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
       Decompressor decompressor) throws IOException {
     return Bzip2Factory.isNativeBzip2Loaded(conf) ? 
       new DecompressorStream(in, decompressor,
-                             conf.getInt("io.file.buffer.size", 4*1024)) :
+                             conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
+                                 IO_FILE_BUFFER_SIZE_DEFAULT)) :
       new BZip2CompressionInputStream(in);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a44a832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
index 0e6f02c..31196cc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
@@ -31,6 +31,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.zlib.ZlibDecompressor;
 import org.apache.hadoop.io.compress.zlib.ZlibFactory;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class DefaultCodec implements Configurable, CompressionCodec, DirectDecompressionCodec {
@@ -60,7 +63,8 @@ public class DefaultCodec implements Configurable, CompressionCodec, DirectDecom
                                                     Compressor compressor) 
   throws IOException {
     return new CompressorStream(out, compressor, 
-                                conf.getInt("io.file.buffer.size", 4*1024));
+                                conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
+                                        IO_FILE_BUFFER_SIZE_DEFAULT));
   }
 
   @Override
@@ -85,7 +89,8 @@ public class DefaultCodec implements Configurable, CompressionCodec, DirectDecom
                                                   Decompressor decompressor) 
   throws IOException {
     return new DecompressorStream(in, decompressor, 
-                                  conf.getInt("io.file.buffer.size", 4*1024));
+                                  conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
+                                      IO_FILE_BUFFER_SIZE_DEFAULT));
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a44a832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
index c493f17..01b6434 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
@@ -27,6 +27,8 @@ import org.apache.hadoop.io.compress.DefaultCodec;
 import org.apache.hadoop.io.compress.zlib.*;
 import org.apache.hadoop.io.compress.zlib.ZlibDecompressor.ZlibDirectDecompressor;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
 import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
 
 /**
@@ -172,8 +174,8 @@ public class GzipCodec extends DefaultCodec {
   throws IOException {
     return (compressor != null) ?
                new CompressorStream(out, compressor,
-                                    conf.getInt("io.file.buffer.size", 
-                                                4*1024)) :
+                                    conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
+                                            IO_FILE_BUFFER_SIZE_DEFAULT)) :
                createOutputStream(out);
   }
 
@@ -206,7 +208,8 @@ public class GzipCodec extends DefaultCodec {
       decompressor = createDecompressor();  // always succeeds (or throws)
     }
     return new DecompressorStream(in, decompressor,
-                                  conf.getInt("io.file.buffer.size", 4*1024));
+                                  conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
+                                      IO_FILE_BUFFER_SIZE_DEFAULT));
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a44a832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
index da3fe34..f7ec7ac 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
@@ -36,6 +36,10 @@ import org.apache.hadoop.io.compress.Decompressor;
 import org.apache.hadoop.io.compress.DefaultCodec;
 import org.apache.hadoop.util.ReflectionUtils;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
 /**
  * Compression related stuff.
  */
@@ -124,7 +128,8 @@ final class Compression {
         } else {
           bis1 = downStream;
         }
-        conf.setInt("io.compression.codec.lzo.buffersize", 64 * 1024);
+        conf.setInt(IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY,
+            IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT);
         CompressionInputStream cis =
             codec.createInputStream(bis1, decompressor);
         BufferedInputStream bis2 = new BufferedInputStream(cis, DATA_IBUF_SIZE);
@@ -146,7 +151,8 @@ final class Compression {
         } else {
           bos1 = downStream;
         }
-        conf.setInt("io.compression.codec.lzo.buffersize", 64 * 1024);
+        conf.setInt(IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY,
+            IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT);
         CompressionOutputStream cos =
             codec.createOutputStream(bos1, compressor);
         BufferedOutputStream bos2 =
@@ -175,7 +181,7 @@ final class Compression {
           int downStreamBufferSize) throws IOException {
         // Set the internal buffer size to read from down stream.
         if (downStreamBufferSize > 0) {
-          codec.getConf().setInt("io.file.buffer.size", downStreamBufferSize);
+          codec.getConf().setInt(IO_FILE_BUFFER_SIZE_KEY, downStreamBufferSize);
         }
         CompressionInputStream cis =
             codec.createInputStream(downStream, decompressor);
@@ -193,7 +199,7 @@ final class Compression {
         } else {
           bos1 = downStream;
         }
-        codec.getConf().setInt("io.file.buffer.size", 32 * 1024);
+        codec.getConf().setInt(IO_FILE_BUFFER_SIZE_KEY, 32 * 1024);
         CompressionOutputStream cos =
             codec.createOutputStream(bos1, compressor);
         BufferedOutputStream bos2 =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a44a832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocksSocketFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocksSocketFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocksSocketFactory.java
index 6b84f9d..ba9e815 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocksSocketFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocksSocketFactory.java
@@ -31,6 +31,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SOCKS_SERVER_KEY;
+
 /**
  * Specialized SocketFactory to create sockets with a SOCKS proxy
  */
@@ -133,7 +135,7 @@ public class SocksSocketFactory extends SocketFactory implements
   @Override
   public void setConf(Configuration conf) {
     this.conf = conf;
-    String proxyStr = conf.get("hadoop.socks.server");
+    String proxyStr = conf.get(HADOOP_SOCKS_SERVER_KEY);
     if ((proxyStr != null) && (proxyStr.length() > 0)) {
       setProxy(proxyStr);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a44a832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
index e20a7c1..a1cf709 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
@@ -27,6 +27,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
 /**
  * A class that provides a line reader from an input stream.
  * Depending on the constructor used, lines will either be terminated by:
@@ -89,7 +91,7 @@ public class LineReader implements Closeable {
    * @throws IOException
    */
   public LineReader(InputStream in, Configuration conf) throws IOException {
-    this(in, conf.getInt("io.file.buffer.size", DEFAULT_BUFFER_SIZE));
+    this(in, conf.getInt(IO_FILE_BUFFER_SIZE_KEY, DEFAULT_BUFFER_SIZE));
   }
 
   /**
@@ -136,7 +138,7 @@ public class LineReader implements Closeable {
   public LineReader(InputStream in, Configuration conf,
       byte[] recordDelimiterBytes) throws IOException {
     this.in = in;
-    this.bufferSize = conf.getInt("io.file.buffer.size", DEFAULT_BUFFER_SIZE);
+    this.bufferSize = conf.getInt(IO_FILE_BUFFER_SIZE_KEY, DEFAULT_BUFFER_SIZE);
     this.buffer = new byte[this.bufferSize];
     this.recordDelimiterBytes = recordDelimiterBytes;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a44a832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/Hash.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/Hash.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/Hash.java
index 9f0ea16..50f6091 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/Hash.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/Hash.java
@@ -22,6 +22,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_UTIL_HASH_TYPE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_UTIL_HASH_TYPE_KEY;
+
 /**
  * This class represents a common API for hashing functions.
  */
@@ -59,7 +62,8 @@ public abstract class Hash {
    * @return one of the predefined constants
    */
   public static int getHashType(Configuration conf) {
-    String name = conf.get("hadoop.util.hash.type", "murmur");
+    String name = conf.get(HADOOP_UTIL_HASH_TYPE_KEY,
+        HADOOP_UTIL_HASH_TYPE_DEFAULT);
     return parseHashType(name);
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a44a832/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
index b8ca6b2..0a1ca49 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
@@ -47,6 +47,9 @@ import java.util.Properties;
 import java.util.Set;
 import java.util.UUID;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
 /**
  * Utilities used across test cases.
  */
@@ -55,8 +58,6 @@ public class ContractTestUtils extends Assert {
   private static final Logger LOG =
       LoggerFactory.getLogger(ContractTestUtils.class);
 
-  public static final String IO_FILE_BUFFER_SIZE = "io.file.buffer.size";
-
   // For scale testing, we can repeatedly write small chunk data to generate
   // a large file.
   public static final String IO_CHUNK_BUFFER_SIZE = "io.chunk.buffer.size";
@@ -150,8 +151,8 @@ public class ContractTestUtils extends Assert {
     FSDataOutputStream out = fs.create(path,
                                        overwrite,
                                        fs.getConf()
-                                         .getInt(IO_FILE_BUFFER_SIZE,
-                                                 4096),
+                                         .getInt(IO_FILE_BUFFER_SIZE_KEY,
+                                             IO_FILE_BUFFER_SIZE_DEFAULT),
                                        (short) 1,
                                        buffersize);
     out.write(src, 0, len);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/57] [abbrv] hadoop git commit: HDFS-10908. Improve StripedBlockReader#createBlockReader error logging. Contributed by Manoj Govindassamy.

Posted by in...@apache.org.
HDFS-10908. Improve StripedBlockReader#createBlockReader error logging. Contributed by Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ab1ef15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ab1ef15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ab1ef15

Branch: refs/heads/HDFS-10467
Commit: 2ab1ef15c5e0b05fed5106d6bbecb3ead2b25f9a
Parents: d6afcf3
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Fri Sep 30 12:35:47 2016 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Fri Sep 30 12:35:47 2016 -0700

----------------------------------------------------------------------
 .../hdfs/server/datanode/erasurecode/StripedBlockReader.java       | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ab1ef15/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
index 8f976c2..a27de9b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
@@ -122,7 +122,7 @@ class StripedBlockReader {
           "", newConnectedPeer(block, dnAddr, blockToken, source), source,
           null, stripedReader.getCachingStrategy(), datanode.getTracer(), -1);
     } catch (IOException e) {
-      LOG.debug("Exception while creating remote block reader, datanode {}",
+      LOG.info("Exception while creating remote block reader, datanode {}",
           source, e);
       return null;
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org