You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ka...@apache.org on 2014/12/15 19:35:52 UTC

[01/50] [abbrv] hadoop git commit: HADOOP-11367. Fix warnings from findbugs 3.0 in hadoop-streaming. Contributed by Li Lu.

Repository: hadoop
Updated Branches:
  refs/heads/YARN-2139 ddffcd8fa -> 6e13fc62e


HADOOP-11367. Fix warnings from findbugs 3.0 in hadoop-streaming. Contributed by Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7bceb13b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7bceb13b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7bceb13b

Branch: refs/heads/YARN-2139
Commit: 7bceb13ba9634123a92a091f93b3b04c14473678
Parents: b557693
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Dec 9 10:41:35 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Dec 9 10:41:35 2014 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                  | 2 ++
 .../src/main/java/org/apache/hadoop/streaming/Environment.java   | 4 +++-
 2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bceb13b/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index a0a10b8..425cab7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -534,6 +534,8 @@ Release 2.7.0 - UNRELEASED
     HADOOP-10134 [JDK8] Fix Javadoc errors caused by incorrect or illegal tags in doc
     comments. (apurtell via stevel)
 
+    HADOOP-11367. Fix warnings from findbugs 3.0 in hadoop-streaming. (Li Lu via wheat9)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bceb13b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/Environment.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/Environment.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/Environment.java
index bd76c31..98d8aa03 100644
--- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/Environment.java
+++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/Environment.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.streaming;
 
 import java.io.*;
 import java.net.InetAddress;
+import java.nio.charset.Charset;
 import java.util.*;
 
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -62,7 +63,8 @@ public class Environment extends Properties {
     // Read the environment variables
 
     Process pid = Runtime.getRuntime().exec(command);
-    BufferedReader in = new BufferedReader(new InputStreamReader(pid.getInputStream()));
+    BufferedReader in = new BufferedReader(
+        new InputStreamReader(pid.getInputStream(), Charset.forName("UTF-8")));
     try {
       while (true) {
         String line = in.readLine();


[23/50] [abbrv] hadoop git commit: HDFS-5578. [JDK8] Fix Javadoc errors caused by incorrect or illegal tags in doc comments. Contributed by Andrew Purtell.

Posted by ka...@apache.org.
HDFS-5578. [JDK8] Fix Javadoc errors caused by incorrect or illegal tags in doc comments. Contributed by Andrew Purtell.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92916ae4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92916ae4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92916ae4

Branch: refs/heads/YARN-2139
Commit: 92916ae4876e4e24f85b22ccf9418f347d8d9666
Parents: 84d5000
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Dec 10 14:54:08 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Wed Dec 10 14:54:37 2014 -0800

----------------------------------------------------------------------
 .../hadoop/fs/http/client/HttpFSFileSystem.java | 24 +++++------
 .../fs/http/client/HttpsFSFileSystem.java       |  8 ++--
 .../server/CheckUploadContentTypeFilter.java    |  4 +-
 .../http/server/HttpFSAuthenticationFilter.java |  2 +-
 .../fs/http/server/HttpFSExceptionProvider.java |  1 -
 .../hadoop/fs/http/server/HttpFSServer.java     |  4 +-
 .../fs/http/server/HttpFSServerWebApp.java      |  4 +-
 .../org/apache/hadoop/lib/lang/XException.java  |  4 +-
 .../apache/hadoop/lib/server/BaseService.java   | 16 ++++----
 .../org/apache/hadoop/lib/server/Server.java    | 42 ++++++++++----------
 .../lib/servlet/FileSystemReleaseFilter.java    |  6 +--
 .../hadoop/lib/servlet/HostnameFilter.java      |  6 +--
 .../apache/hadoop/lib/servlet/MDCFilter.java    |  6 +--
 .../apache/hadoop/lib/servlet/ServerWebApp.java | 20 +++++-----
 .../java/org/apache/hadoop/lib/util/Check.java  |  2 +-
 .../hadoop/lib/util/ConfigurationUtils.java     |  2 +-
 .../org/apache/hadoop/lib/wsrs/Parameters.java  |  4 +-
 .../nfs/nfs3/PrivilegedNfsGatewayStarter.java   |  2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 19 files changed, 81 insertions(+), 79 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92916ae4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 367308d..5b079e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -78,7 +78,7 @@ import java.util.Map;
 
 /**
  * HttpFSServer implementation of the FileSystemAccess FileSystem.
- * <p/>
+ * <p>
  * This implementation allows a user to access HDFS over HTTP via a HttpFSServer server.
  */
 @InterfaceAudience.Private
@@ -223,7 +223,7 @@ public class HttpFSFileSystem extends FileSystem
   /**
    * Convenience method that creates a <code>HttpURLConnection</code> for the
    * HttpFSServer file system operations.
-   * <p/>
+   * <p>
    * This methods performs and injects any needed authentication credentials
    * via the {@link #getConnection(URL, String)} method
    *
@@ -289,7 +289,7 @@ public class HttpFSFileSystem extends FileSystem
 
   /**
    * Convenience method that creates a <code>HttpURLConnection</code> for the specified URL.
-   * <p/>
+   * <p>
    * This methods performs and injects any needed authentication credentials.
    *
    * @param url url to connect to.
@@ -371,7 +371,7 @@ public class HttpFSFileSystem extends FileSystem
 
   /**
    * HttpFSServer subclass of the <code>FSDataInputStream</code>.
-   * <p/>
+   * <p>
    * This implementation does not support the
    * <code>PositionReadable</code> and <code>Seekable</code> methods.
    */
@@ -414,8 +414,8 @@ public class HttpFSFileSystem extends FileSystem
 
   /**
    * Opens an FSDataInputStream at the indicated Path.
-   * </p>
-   * IMPORTANT: the returned <code><FSDataInputStream/code> does not support the
+   * <p>
+   * IMPORTANT: the returned <code>FSDataInputStream</code> does not support the
    * <code>PositionReadable</code> and <code>Seekable</code> methods.
    *
    * @param f the file name to open
@@ -434,7 +434,7 @@ public class HttpFSFileSystem extends FileSystem
 
   /**
    * HttpFSServer subclass of the <code>FSDataOutputStream</code>.
-   * <p/>
+   * <p>
    * This implementation closes the underlying HTTP connection validating the Http connection status
    * at closing time.
    */
@@ -516,7 +516,7 @@ public class HttpFSFileSystem extends FileSystem
   /**
    * Opens an FSDataOutputStream at the indicated Path with write-progress
    * reporting.
-   * <p/>
+   * <p>
    * IMPORTANT: The <code>Progressable</code> parameter is not used.
    *
    * @param f the file name to open.
@@ -549,7 +549,7 @@ public class HttpFSFileSystem extends FileSystem
 
   /**
    * Append to an existing file (optional operation).
-   * <p/>
+   * <p>
    * IMPORTANT: The <code>Progressable</code> parameter is not used.
    *
    * @param f the existing file to be appended.
@@ -838,7 +838,7 @@ public class HttpFSFileSystem extends FileSystem
    * Modify the ACL entries for a file.
    *
    * @param path Path to modify
-   * @param aclSpec List<AclEntry> describing modifications
+   * @param aclSpec describing modifications
    * @throws IOException
    */
   @Override
@@ -855,7 +855,7 @@ public class HttpFSFileSystem extends FileSystem
   /**
    * Remove the specified ACL entries from a file
    * @param path Path to modify
-   * @param aclSpec List<AclEntry> describing entries to remove
+   * @param aclSpec describing entries to remove
    * @throws IOException
    */
   @Override
@@ -900,7 +900,7 @@ public class HttpFSFileSystem extends FileSystem
   /**
    * Set the ACLs for the given file
    * @param path Path to modify
-   * @param aclSpec List<AclEntry> describing modifications, must include
+   * @param aclSpec describing modifications, must include
    *                entries for user, group, and others for compatibility
    *                with permission bits.
    * @throws IOException

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92916ae4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpsFSFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpsFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpsFSFileSystem.java
index a696cd8..0a2e08d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpsFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpsFSFileSystem.java
@@ -18,10 +18,10 @@
 package org.apache.hadoop.fs.http.client;
 
 /**
- * HttpFSServer implementation of the FileSystemAccess FileSystem for SSL.
- * <p/>
- * This implementation allows a user to access HDFS over HTTPS via a
- * HttpFSServer server.
+ * <p>HttpFSServer implementation of the FileSystemAccess FileSystem for SSL.
+ * </p>
+ * <p>This implementation allows a user to access HDFS over HTTPS via a
+ * HttpFSServer server.</p>
  */
 public class HttpsFSFileSystem extends HttpFSFileSystem {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92916ae4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java
index 67df9a8..836b4ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java
@@ -50,7 +50,7 @@ public class CheckUploadContentTypeFilter implements Filter {
 
   /**
    * Initializes the filter.
-   * <p/>
+   * <p>
    * This implementation is a NOP.
    *
    * @param config filter configuration.
@@ -103,7 +103,7 @@ public class CheckUploadContentTypeFilter implements Filter {
 
   /**
    * Destroys the filter.
-   * <p/>
+   * <p>
    * This implementation is a NOP.
    */
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92916ae4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
index 8b332fc..f0fe4c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
@@ -44,7 +44,7 @@ public class HttpFSAuthenticationFilter
 
   /**
    * Returns the hadoop-auth configuration from HttpFSServer's configuration.
-   * <p/>
+   * <p>
    * It returns all HttpFSServer's configuration properties prefixed with
    * <code>httpfs.authentication</code>. The <code>httpfs.authentication</code>
    * prefix is removed from the returned property names.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92916ae4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java
index 3a8d9ad..aed6343 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java
@@ -43,7 +43,6 @@ public class HttpFSExceptionProvider extends ExceptionProvider {
 
   /**
    * Maps different exceptions thrown by HttpFSServer to HTTP status codes.
-   * <p/>
    * <ul>
    * <li>SecurityException : HTTP UNAUTHORIZED</li>
    * <li>FileNotFoundException : HTTP NOT_FOUND</li>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92916ae4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
index f9eb454..9103718 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
@@ -85,7 +85,7 @@ import java.util.Map;
 
 /**
  * Main class of HttpFSServer server.
- * <p/>
+ * <p>
  * The <code>HttpFSServer</code> class uses Jersey JAX-RS to binds HTTP requests to the
  * different operations.
  */
@@ -117,7 +117,7 @@ public class HttpFSServer {
   /**
    * Returns a filesystem instance. The fileystem instance is wired for release at the completion of
    * the current Servlet request via the {@link FileSystemReleaseFilter}.
-   * <p/>
+   * <p>
    * If a do-as user is specified, the current user must be a valid proxyuser, otherwise an
    * <code>AccessControlException</code> will be thrown.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92916ae4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java
index b7ae301..66438b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java
@@ -34,9 +34,9 @@ import java.io.IOException;
  * HttpFSServer server, it is a <code>javax.servlet.ServletContextListener
  * </code> implementation that is wired in HttpFSServer's WAR
  * <code>WEB-INF/web.xml</code>.
- * <p/>
+ * <p>
  * It provides acces to the server context via the singleton {@link #get}.
- * <p/>
+ * <p>
  * All the configuration is loaded from configuration properties prefixed
  * with <code>httpfs.</code>.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92916ae4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/XException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/XException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/XException.java
index f974159..467ca23 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/XException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/XException.java
@@ -61,7 +61,7 @@ public class XException extends Exception {
 
   /**
    * Creates an XException using another XException as cause.
-   * <p/>
+   * <p>
    * The error code and error message are extracted from the cause.
    *
    * @param cause exception cause.
@@ -95,7 +95,7 @@ public class XException extends Exception {
 
   /**
    * Creates a message using a error message template and arguments.
-   * <p/>
+   * <p>
    * The template must be in JDK <code>MessageFormat</code> syntax
    * (using {#} positional parameters).
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92916ae4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/BaseService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/BaseService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/BaseService.java
index 088f900..9d9ce7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/BaseService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/BaseService.java
@@ -44,11 +44,11 @@ public abstract class BaseService implements Service {
 
   /**
    * Initializes the service.
-   * <p/>
+   * <p>
    * It collects all service properties (properties having the
    * <code>#SERVER#.#SERVICE#.</code> prefix). The property names are then
    * trimmed from the <code>#SERVER#.#SERVICE#.</code> prefix.
-   * <p/>
+   * <p>
    * After collecting  the service properties it delegates to the
    * {@link #init()} method.
    *
@@ -75,7 +75,7 @@ public abstract class BaseService implements Service {
   /**
    * Post initializes the service. This method is called by the
    * {@link Server} after all services of the server have been initialized.
-   * <p/>
+   * <p>
    * This method does a NOP.
    *
    * @throws ServiceException thrown if the service could not be
@@ -88,7 +88,7 @@ public abstract class BaseService implements Service {
   /**
    * Destroy the services.  This method is called once, when the
    * {@link Server} owning the service is being destroyed.
-   * <p/>
+   * <p>
    * This method does a NOP.
    */
   @Override
@@ -98,7 +98,7 @@ public abstract class BaseService implements Service {
   /**
    * Returns the service dependencies of this service. The service will be
    * instantiated only if all the service dependencies are already initialized.
-   * <p/>
+   * <p>
    * This method returns an empty array (size 0)
    *
    * @return an empty array (size 0).
@@ -110,7 +110,7 @@ public abstract class BaseService implements Service {
 
   /**
    * Notification callback when the server changes its status.
-   * <p/>
+   * <p>
    * This method returns an empty array (size 0)
    *
    * @param oldStatus old server status.
@@ -154,7 +154,7 @@ public abstract class BaseService implements Service {
   /**
    * Returns the service configuration properties. Property
    * names are trimmed off from its prefix.
-   * <p/>
+   * <p>
    * The sevice configuration properties are all properties
    * with names starting with <code>#SERVER#.#SERVICE#.</code>
    * in the server configuration.
@@ -169,7 +169,7 @@ public abstract class BaseService implements Service {
 
   /**
    * Initializes the server.
-   * <p/>
+   * <p>
    * This method is called by {@link #init(Server)} after all service properties
    * (properties prefixed with
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92916ae4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
index d083831..5c1bb4f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
@@ -42,40 +42,39 @@ import java.util.Properties;
 /**
  * A Server class provides standard configuration, logging and {@link Service}
  * lifecyle management.
- * <p/>
+ * <p>
  * A Server normally has a home directory, a configuration directory, a temp
  * directory and logs directory.
- * <p/>
+ * <p>
  * The Server configuration is loaded from 2 overlapped files,
  * <code>#SERVER#-default.xml</code> and <code>#SERVER#-site.xml</code>. The
  * default file is loaded from the classpath, the site file is laoded from the
  * configuration directory.
- * <p/>
+ * <p>
  * The Server collects all configuration properties prefixed with
  * <code>#SERVER#</code>. The property names are then trimmed from the
  * <code>#SERVER#</code> prefix.
- * <p/>
+ * <p>
  * The Server log configuration is loaded from the
  * <code>#SERVICE#-log4j.properties</code> file in the configuration directory.
- * <p/>
+ * <p>
  * The lifecycle of server is defined in by {@link Server.Status} enum.
  * When a server is create, its status is UNDEF, when being initialized it is
  * BOOTING, once initialization is complete by default transitions to NORMAL.
  * The <code>#SERVER#.startup.status</code> configuration property can be used
  * to specify a different startup status (NORMAL, ADMIN or HALTED).
- * <p/>
+ * <p>
  * Services classes are defined in the <code>#SERVER#.services</code> and
  * <code>#SERVER#.services.ext</code> properties. They are loaded in order
  * (services first, then services.ext).
- * <p/>
+ * <p>
  * Before initializing the services, they are traversed and duplicate service
  * interface are removed from the service list. The last service using a given
  * interface wins (this enables a simple override mechanism).
- * <p/>
+ * <p>
  * After the services have been resoloved by interface de-duplication they are
  * initialized in order. Once all services are initialized they are
  * post-initialized (this enables late/conditional service bindings).
- * <p/>
  */
 @InterfaceAudience.Private
 public class Server {
@@ -152,7 +151,7 @@ public class Server {
 
   /**
    * Creates a server instance.
-   * <p/>
+   * <p>
    * The config, log and temp directories are all under the specified home directory.
    *
    * @param name server name.
@@ -177,9 +176,9 @@ public class Server {
 
   /**
    * Creates a server instance.
-   * <p/>
+   * <p>
    * The config, log and temp directories are all under the specified home directory.
-   * <p/>
+   * <p>
    * It uses the provided configuration instead loading it from the config dir.
    *
    * @param name server name.
@@ -192,7 +191,7 @@ public class Server {
 
   /**
    * Creates a server instance.
-   * <p/>
+   * <p>
    * It uses the provided configuration instead loading it from the config dir.
    *
    * @param name server name.
@@ -250,9 +249,9 @@ public class Server {
 
   /**
    * Sets a new server status.
-   * <p/>
+   * <p>
    * The status must be settable.
-   * <p/>
+   * <p>
    * All services will be notified o the status change via the
    * {@link Service#serverStatusChange(Server.Status, Server.Status)} method. If a service
    * throws an exception during the notification, the server will be destroyed.
@@ -299,7 +298,7 @@ public class Server {
   /**
    * Convenience method that returns a resource as inputstream from the
    * classpath.
-   * <p/>
+   * <p>
    * It first attempts to use the Thread's context classloader and if not
    * set it uses the <code>ClassUtils</code> classloader.
    *
@@ -319,7 +318,7 @@ public class Server {
 
   /**
    * Initializes the Server.
-   * <p/>
+   * <p>
    * The initialization steps are:
    * <ul>
    * <li>It verifies the service home and temp directories exist</li>
@@ -335,6 +334,7 @@ public class Server {
    * <li>Initializes the services</li>
    * <li>Post-initializes the services</li>
    * <li>Sets the server startup status</li>
+   * </ul>
    *
    * @throws ServerException thrown if the server could not be initialized.
    */
@@ -625,7 +625,7 @@ public class Server {
 
   /**
    * Destroys the server.
-   * <p/>
+   * <p>
    * All services are destroyed in reverse order of initialization, then the
    * Log4j framework is shutdown.
    */
@@ -651,7 +651,7 @@ public class Server {
 
   /**
    * Returns the server prefix for server configuration properties.
-   * <p/>
+   * <p>
    * By default it is the server name.
    *
    * @return the prefix for server configuration properties.
@@ -733,10 +733,10 @@ public class Server {
 
   /**
    * Adds a service programmatically.
-   * <p/>
+   * <p>
    * If a service with the same interface exists, it will be destroyed and
    * removed before the given one is initialized and added.
-   * <p/>
+   * <p>
    * If an exception is thrown the server is destroyed.
    *
    * @param klass service class to add.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92916ae4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/FileSystemReleaseFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/FileSystemReleaseFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/FileSystemReleaseFilter.java
index 827bcff..cf73979 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/FileSystemReleaseFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/FileSystemReleaseFilter.java
@@ -33,7 +33,7 @@ import java.io.IOException;
 /**
  * The <code>FileSystemReleaseFilter</code> releases back to the
  * {@link FileSystemAccess} service a <code>FileSystem</code> instance.
- * <p/>
+ * <p>
  * This filter is useful in situations where a servlet request
  * is streaming out HDFS data and the corresponding filesystem
  * instance have to be closed after the streaming completes.
@@ -44,7 +44,7 @@ public abstract class FileSystemReleaseFilter implements Filter {
 
   /**
    * Initializes the filter.
-   * <p/>
+   * <p>
    * This implementation is a NOP.
    *
    * @param filterConfig filter configuration.
@@ -83,7 +83,7 @@ public abstract class FileSystemReleaseFilter implements Filter {
 
   /**
    * Destroys the filter.
-   * <p/>
+   * <p>
    * This implementation is a NOP.
    */
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92916ae4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java
index dd395f6..64f4926 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java
@@ -43,7 +43,7 @@ public class HostnameFilter implements Filter {
 
   /**
    * Initializes the filter.
-   * <p/>
+   * <p>
    * This implementation is a NOP.
    *
    * @param config filter configuration.
@@ -56,7 +56,7 @@ public class HostnameFilter implements Filter {
 
   /**
    * Resolves the requester hostname and delegates the request to the chain.
-   * <p/>
+   * <p>
    * The requester hostname is available via the {@link #get} method.
    *
    * @param request servlet request.
@@ -101,7 +101,7 @@ public class HostnameFilter implements Filter {
 
   /**
    * Destroys the filter.
-   * <p/>
+   * <p>
    * This implementation is a NOP.
    */
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92916ae4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/MDCFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/MDCFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/MDCFilter.java
index 07b552d..156cf64 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/MDCFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/MDCFilter.java
@@ -33,7 +33,7 @@ import java.security.Principal;
 
 /**
  * Filter that sets request contextual information for the slf4j MDC.
- * <p/>
+ * <p>
  * It sets the following values:
  * <ul>
  * <li>hostname: if the {@link HostnameFilter} is present and configured
@@ -48,7 +48,7 @@ public class MDCFilter implements Filter {
 
   /**
    * Initializes the filter.
-   * <p/>
+   * <p>
    * This implementation is a NOP.
    *
    * @param config filter configuration.
@@ -93,7 +93,7 @@ public class MDCFilter implements Filter {
 
   /**
    * Destroys the filter.
-   * <p/>
+   * <p>
    * This implementation is a NOP.
    */
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92916ae4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java
index 9b0ea2a..cd16593 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java
@@ -75,21 +75,21 @@ public abstract class ServerWebApp extends Server implements ServletContextListe
   /**
    * Constructor. Subclasses must have a default constructor specifying
    * the server name.
-   * <p/>
+   * <p>
    * The server name is used to resolve the Java System properties that define
    * the server home, config, log and temp directories.
-   * <p/>
+   * <p>
    * The home directory is looked in the Java System property
    * <code>#SERVER_NAME#.home.dir</code>.
-   * <p/>
+   * <p>
    * The config directory is looked in the Java System property
    * <code>#SERVER_NAME#.config.dir</code>, if not defined it resolves to
    * the <code>#SERVER_HOME_DIR#/conf</code> directory.
-   * <p/>
+   * <p>
    * The log directory is looked in the Java System property
    * <code>#SERVER_NAME#.log.dir</code>, if not defined it resolves to
    * the <code>#SERVER_HOME_DIR#/log</code> directory.
-   * <p/>
+   * <p>
    * The temp directory is looked in the Java System property
    * <code>#SERVER_NAME#.temp.dir</code>, if not defined it resolves to
    * the <code>#SERVER_HOME_DIR#/temp</code> directory.
@@ -105,7 +105,7 @@ public abstract class ServerWebApp extends Server implements ServletContextListe
 
   /**
    * Returns the server home directory.
-   * <p/>
+   * <p>
    * It is looked up in the Java System property
    * <code>#SERVER_NAME#.home.dir</code>.
    *
@@ -159,15 +159,15 @@ public abstract class ServerWebApp extends Server implements ServletContextListe
   }
 
   /**
-   * Resolves the host & port InetSocketAddress the web server is listening to.
-   * <p/>
+   * Resolves the host and port InetSocketAddress the web server is listening to.
+   * <p>
    * This implementation looks for the following 2 properties:
    * <ul>
    *   <li>#SERVER_NAME#.http.hostname</li>
    *   <li>#SERVER_NAME#.http.port</li>
    * </ul>
    *
-   * @return the host & port InetSocketAddress the web server is listening to.
+   * @return the host and port InetSocketAddress the web server is listening to.
    * @throws ServerException thrown if any of the above 2 properties is not defined.
    */
   protected InetSocketAddress resolveAuthority() throws ServerException {
@@ -217,7 +217,7 @@ public abstract class ServerWebApp extends Server implements ServletContextListe
 
   /**
    * Sets an alternate hostname:port InetSocketAddress to use.
-   * <p/>
+   * <p>
    * For testing purposes.
    * 
    * @param authority alterante authority.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92916ae4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/Check.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/Check.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/Check.java
index a398e75..31666e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/Check.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/Check.java
@@ -26,7 +26,7 @@ import java.util.regex.Pattern;
 
 /**
  * Utility methods to check preconditions.
- * <p/>
+ * <p>
  * Commonly used for method arguments preconditions.
  */
 @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92916ae4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/ConfigurationUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/ConfigurationUtils.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/ConfigurationUtils.java
index 660eae0..6611dd2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/ConfigurationUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/ConfigurationUtils.java
@@ -90,7 +90,7 @@ public abstract class ConfigurationUtils {
 
   /**
    * Create a configuration from an InputStream.
-   * <p/>
+   * <p>
    * ERROR canibalized from <code>Configuration.loadResource()</code>.
    *
    * @param is inputstream to read the configuration from.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92916ae4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java
index 0f16a9b..e0f6200 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java
@@ -26,7 +26,7 @@ import java.util.Map;
 
 /**
  * Class that contains all parsed JAX-RS parameters.
- * <p/>
+ * <p>
  * Instances are created by the {@link ParametersProvider} class.
  */
 @InterfaceAudience.Private
@@ -63,7 +63,7 @@ public class Parameters {
    *
    * @param name parameter name.
    * @param klass class of the parameter, used for value casting.
-   * @return List<V> the values of the parameter.
+   * @return the values of the parameter.
    */
   @SuppressWarnings("unchecked")
   public <V, T extends Param<V>> List<V> getValues(String name, Class<T> klass) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92916ae4/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
index 98862ed..3934d7c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 
 /**
  * This class is used to allow the initial registration of the NFS gateway with
- * the system portmap daemon to come from a privileged (< 1024) port. This is
+ * the system portmap daemon to come from a privileged (&lt; 1024) port. This is
  * necessary on certain operating systems to work around this bug in rpcbind:
  * 
  * Red Hat: https://bugzilla.redhat.com/show_bug.cgi?id=731542

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92916ae4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9f3f9ee..1db358f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -564,6 +564,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7502. Fix findbugs warning in hdfs-nfs project.
     (Brandon Li via wheat9)
 
+    HDFS-5578. [JDK8] Fix Javadoc errors caused by incorrect or illegal tags
+    in doc comments. (Andrew Purtell via wheat9)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[34/50] [abbrv] hadoop git commit: HDFS-7497. Inconsistent report of decommissioning DataNodes between dfsadmin and NameNode webui. Contributed by Yongjun Zhang.

Posted by ka...@apache.org.
HDFS-7497. Inconsistent report of decommissioning DataNodes between dfsadmin and NameNode webui. Contributed by Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b437f5ee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b437f5ee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b437f5ee

Branch: refs/heads/YARN-2139
Commit: b437f5eef40874287d4fbf9d8e43f1a857b5621f
Parents: 5b9fced
Author: Andrew Wang <wa...@apache.org>
Authored: Thu Dec 11 18:12:47 2014 -0800
Committer: Andrew Wang <wa...@apache.org>
Committed: Thu Dec 11 18:12:47 2014 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt             |  3 +++
 .../hdfs/server/blockmanagement/DatanodeManager.java    | 12 ++----------
 .../hdfs/server/namenode/TestDecommissioningStatus.java |  7 ++++++-
 3 files changed, 11 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b437f5ee/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5e75424..5977ed7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -576,6 +576,9 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7515. Fix new findbugs warnings in hadoop-hdfs. (wheat9)
 
+    HDFS-7497. Inconsistent report of decommissioning DataNodes between
+    dfsadmin and NameNode webui. (Yongjun Zhang via wang)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b437f5ee/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 356a4a3..0ff469a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1112,16 +1112,8 @@ public class DatanodeManager {
   public List<DatanodeDescriptor> getDecommissioningNodes() {
     // There is no need to take namesystem reader lock as
     // getDatanodeListForReport will synchronize on datanodeMap
-    final List<DatanodeDescriptor> decommissioningNodes
-        = new ArrayList<DatanodeDescriptor>();
-    final List<DatanodeDescriptor> results = getDatanodeListForReport(
-        DatanodeReportType.LIVE);
-    for(DatanodeDescriptor node : results) {
-      if (node.isDecommissionInProgress()) {
-        decommissioningNodes.add(node);
-      }
-    }
-    return decommissioningNodes;
+    // A decommissioning DN may be "alive" or "dead".
+    return getDatanodeListForReport(DatanodeReportType.DECOMMISSIONING);
   }
   
   /* Getter and Setter for stale DataNodes related attributes */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b437f5ee/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
index 28f5eb4..a9aba86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
@@ -239,10 +239,10 @@ public class TestDecommissioningStatus {
       System.setOut(oldOut);
     }
   }
+
   /**
    * Tests Decommissioning Status in DFS.
    */
-
   @Test
   public void testDecommissionStatus() throws IOException, InterruptedException {
     InetSocketAddress addr = new InetSocketAddress("localhost", cluster
@@ -351,6 +351,11 @@ public class TestDecommissioningStatus {
     assertTrue("the node should be DECOMMISSION_IN_PROGRESSS",
         dead.get(0).isDecommissionInProgress());
 
+    // Check DatanodeManager#getDecommissionNodes, make sure it returns
+    // the node as decommissioning, even if it's dead
+    List<DatanodeDescriptor> decomlist = dm.getDecommissioningNodes();
+    assertTrue("The node should be be decommissioning", decomlist.size() == 1);
+    
     // Delete the under-replicated file, which should let the 
     // DECOMMISSION_IN_PROGRESS node become DECOMMISSIONED
     cleanupFile(fileSys, f);


[42/50] [abbrv] hadoop git commit: HADOOP-11238. Update the NameNode's Group Cache in the background when possible (Chris Li via Colin P. McCabe)

Posted by ka...@apache.org.
HADOOP-11238. Update the NameNode's Group Cache in the background when possible (Chris Li via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e5a69251
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e5a69251
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e5a69251

Branch: refs/heads/YARN-2139
Commit: e5a692519956aefb3a540ed0137b63cf598ac10d
Parents: c78e3a7
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Fri Dec 12 16:30:52 2014 -0800
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Fri Dec 12 16:30:52 2014 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../java/org/apache/hadoop/security/Groups.java | 193 ++++++++-------
 .../hadoop/security/TestGroupsCaching.java      | 236 +++++++++++++++++++
 3 files changed, 342 insertions(+), 90 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5a69251/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 45f226f..1e59395 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -437,6 +437,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11323. WritableComparator#compare keeps reference to byte array.
     (Wilfred Spiegelenburg via wang)
 
+    HADOOP-11238. Update the NameNode's Group Cache in the background when
+    possible (Chris Li via Colin P. McCabe)
+
   BUG FIXES
 
     HADOOP-11236. NFS: Fix javadoc warning in RpcProgram.java (Abhiraj Butala via harsh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5a69251/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
index c500419..f3c5094 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
@@ -24,7 +24,13 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
 
+import com.google.common.base.Ticker;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -52,10 +58,11 @@ public class Groups {
   private static final Log LOG = LogFactory.getLog(Groups.class);
   
   private final GroupMappingServiceProvider impl;
-  
-  private final Map<String, CachedGroups> userToGroupsMap = 
-    new ConcurrentHashMap<String, CachedGroups>();
-  private final Map<String, List<String>> staticUserToGroupsMap = 
+
+  private final LoadingCache<String, List<String>> cache;
+  private final ConcurrentHashMap<String, Long> negativeCacheMask =
+    new ConcurrentHashMap<String, Long>();
+  private final Map<String, List<String>> staticUserToGroupsMap =
       new HashMap<String, List<String>>();
   private final long cacheTimeout;
   private final long negativeCacheTimeout;
@@ -66,7 +73,7 @@ public class Groups {
     this(conf, new Timer());
   }
 
-  public Groups(Configuration conf, Timer timer) {
+  public Groups(Configuration conf, final Timer timer) {
     impl = 
       ReflectionUtils.newInstance(
           conf.getClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, 
@@ -86,6 +93,11 @@ public class Groups {
     parseStaticMapping(conf);
 
     this.timer = timer;
+    this.cache = CacheBuilder.newBuilder()
+      .refreshAfterWrite(cacheTimeout, TimeUnit.MILLISECONDS)
+      .ticker(new TimerToTickerAdapter(timer))
+      .expireAfterWrite(10 * cacheTimeout, TimeUnit.MILLISECONDS)
+      .build(new GroupCacheLoader());
 
     if(LOG.isDebugEnabled())
       LOG.debug("Group mapping impl=" + impl.getClass().getName() + 
@@ -123,78 +135,112 @@ public class Groups {
     }
   }
 
-  /**
-   * Determine whether the CachedGroups is expired.
-   * @param groups cached groups for one user.
-   * @return true if groups is expired from useToGroupsMap.
-   */
-  private boolean hasExpired(CachedGroups groups, long startMs) {
-    if (groups == null) {
-      return true;
-    }
-    long timeout = cacheTimeout;
-    if (isNegativeCacheEnabled() && groups.getGroups().isEmpty()) {
-      // This CachedGroups is in the negative cache, thus it should expire
-      // sooner.
-      timeout = negativeCacheTimeout;
-    }
-    return groups.getTimestamp() + timeout <= startMs;
-  }
-  
   private boolean isNegativeCacheEnabled() {
     return negativeCacheTimeout > 0;
   }
 
+  private IOException noGroupsForUser(String user) {
+    return new IOException("No groups found for user " + user);
+  }
+
   /**
    * Get the group memberships of a given user.
+   * If the user's group is not cached, this method may block.
    * @param user User's name
    * @return the group memberships of the user
-   * @throws IOException
+   * @throws IOException if user does not exist
    */
-  public List<String> getGroups(String user) throws IOException {
+  public List<String> getGroups(final String user) throws IOException {
     // No need to lookup for groups of static users
     List<String> staticMapping = staticUserToGroupsMap.get(user);
     if (staticMapping != null) {
       return staticMapping;
     }
-    // Return cached value if available
-    CachedGroups groups = userToGroupsMap.get(user);
-    long startMs = timer.monotonicNow();
-    if (!hasExpired(groups, startMs)) {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Returning cached groups for '" + user + "'");
-      }
-      if (groups.getGroups().isEmpty()) {
-        // Even with enabling negative cache, getGroups() has the same behavior
-        // that throws IOException if the groups for the user is empty.
-        throw new IOException("No groups found for user " + user);
+
+    // Check the negative cache first
+    if (isNegativeCacheEnabled()) {
+      Long expirationTime = negativeCacheMask.get(user);
+      if (expirationTime != null) {
+        if (timer.monotonicNow() < expirationTime) {
+          throw noGroupsForUser(user);
+        } else {
+          negativeCacheMask.remove(user, expirationTime);
+        }
       }
-      return groups.getGroups();
     }
 
-    // Create and cache user's groups
-    List<String> groupList = impl.getGroups(user);
-    long endMs = timer.monotonicNow();
-    long deltaMs = endMs - startMs ;
-    UserGroupInformation.metrics.addGetGroups(deltaMs);
-    if (deltaMs > warningDeltaMs) {
-      LOG.warn("Potential performance problem: getGroups(user=" + user +") " +
-          "took " + deltaMs + " milliseconds.");
+    try {
+      return cache.get(user);
+    } catch (ExecutionException e) {
+      throw (IOException)e.getCause();
     }
-    groups = new CachedGroups(groupList, endMs);
-    if (groups.getGroups().isEmpty()) {
-      if (isNegativeCacheEnabled()) {
-        userToGroupsMap.put(user, groups);
+  }
+
+  /**
+   * Convert millisecond times from hadoop's timer to guava's nanosecond ticker.
+   */
+  private static class TimerToTickerAdapter extends Ticker {
+    private Timer timer;
+
+    public TimerToTickerAdapter(Timer timer) {
+      this.timer = timer;
+    }
+
+    @Override
+    public long read() {
+      final long NANOSECONDS_PER_MS = 1000000;
+      return timer.monotonicNow() * NANOSECONDS_PER_MS;
+    }
+  }
+
+  /**
+   * Deals with loading data into the cache.
+   */
+  private class GroupCacheLoader extends CacheLoader<String, List<String>> {
+    /**
+     * This method will block if a cache entry doesn't exist, and
+     * any subsequent requests for the same user will wait on this
+     * request to return. If a user already exists in the cache,
+     * this will be run in the background.
+     * @param user key of cache
+     * @return List of groups belonging to user
+     * @throws IOException to prevent caching negative entries
+     */
+    @Override
+    public List<String> load(String user) throws Exception {
+      List<String> groups = fetchGroupList(user);
+
+      if (groups.isEmpty()) {
+        if (isNegativeCacheEnabled()) {
+          long expirationTime = timer.monotonicNow() + negativeCacheTimeout;
+          negativeCacheMask.put(user, expirationTime);
+        }
+
+        // We throw here to prevent Cache from retaining an empty group
+        throw noGroupsForUser(user);
       }
-      throw new IOException("No groups found for user " + user);
+
+      return groups;
     }
-    userToGroupsMap.put(user, groups);
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Returning fetched groups for '" + user + "'");
+
+    /**
+     * Queries impl for groups belonging to the user. This could involve I/O and take awhile.
+     */
+    private List<String> fetchGroupList(String user) throws IOException {
+      long startMs = timer.monotonicNow();
+      List<String> groupList = impl.getGroups(user);
+      long endMs = timer.monotonicNow();
+      long deltaMs = endMs - startMs ;
+      UserGroupInformation.metrics.addGetGroups(deltaMs);
+      if (deltaMs > warningDeltaMs) {
+        LOG.warn("Potential performance problem: getGroups(user=" + user +") " +
+          "took " + deltaMs + " milliseconds.");
+      }
+
+      return groupList;
     }
-    return groups.getGroups();
   }
-  
+
   /**
    * Refresh all user-to-groups mappings.
    */
@@ -205,7 +251,8 @@ public class Groups {
     } catch (IOException e) {
       LOG.warn("Error refreshing groups cache", e);
     }
-    userToGroupsMap.clear();
+    cache.invalidateAll();
+    negativeCacheMask.clear();
   }
 
   /**
@@ -221,40 +268,6 @@ public class Groups {
     }
   }
 
-  /**
-   * Class to hold the cached groups
-   */
-  private static class CachedGroups {
-    final long timestamp;
-    final List<String> groups;
-    
-    /**
-     * Create and initialize group cache
-     */
-    CachedGroups(List<String> groups, long timestamp) {
-      this.groups = groups;
-      this.timestamp = timestamp;
-    }
-
-    /**
-     * Returns time of last cache update
-     *
-     * @return time of last cache update
-     */
-    public long getTimestamp() {
-      return timestamp;
-    }
-
-    /**
-     * Get list of cached groups
-     *
-     * @return cached groups
-     */
-    public List<String> getGroups() {
-      return groups;
-    }
-  }
-
   private static Groups GROUPS = null;
   
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5a69251/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
index a814b0d..89e5b2d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
@@ -51,6 +51,9 @@ public class TestGroupsCaching {
 
   @Before
   public void setup() {
+    FakeGroupMapping.resetRequestCount();
+    ExceptionalGroupMapping.resetRequestCount();
+
     conf = new Configuration();
     conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
       FakeGroupMapping.class,
@@ -61,16 +64,32 @@ public class TestGroupsCaching {
     // any to n mapping
     private static Set<String> allGroups = new HashSet<String>();
     private static Set<String> blackList = new HashSet<String>();
+    private static int requestCount = 0;
+    private static long getGroupsDelayMs = 0;
 
     @Override
     public List<String> getGroups(String user) throws IOException {
       LOG.info("Getting groups for " + user);
+      requestCount++;
+
+      delayIfNecessary();
+
       if (blackList.contains(user)) {
         return new LinkedList<String>();
       }
       return new LinkedList<String>(allGroups);
     }
 
+    private void delayIfNecessary() {
+      if (getGroupsDelayMs > 0) {
+        try {
+          Thread.sleep(getGroupsDelayMs);
+        } catch (InterruptedException e) {
+          throw new RuntimeException(e);
+        }
+      }
+    }
+
     @Override
     public void cacheGroupsRefresh() throws IOException {
       LOG.info("Cache is being refreshed.");
@@ -93,6 +112,36 @@ public class TestGroupsCaching {
       LOG.info("Adding " + user + " to the blacklist");
       blackList.add(user);
     }
+
+    public static int getRequestCount() {
+      return requestCount;
+    }
+
+    public static void resetRequestCount() {
+      requestCount = 0;
+    }
+
+    public static void setGetGroupsDelayMs(long delayMs) {
+      getGroupsDelayMs = delayMs;
+    }
+  }
+
+  public static class ExceptionalGroupMapping extends ShellBasedUnixGroupsMapping {
+    private static int requestCount = 0;
+
+    @Override
+    public List<String> getGroups(String user) throws IOException {
+      requestCount++;
+      throw new IOException("For test");
+    }
+
+    public static int getRequestCount() {
+      return requestCount;
+    }
+
+    public static void resetRequestCount() {
+      requestCount = 0;
+    }
   }
 
   @Test
@@ -219,4 +268,191 @@ public class TestGroupsCaching {
     // groups for the user is fetched.
     assertEquals(Arrays.asList(myGroups), groups.getGroups(user));
   }
+
+  @Test
+  public void testCachePreventsImplRequest() throws Exception {
+    // Disable negative cache.
+    conf.setLong(
+      CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 0);
+    Groups groups = new Groups(conf);
+    groups.cacheGroupsAdd(Arrays.asList(myGroups));
+    groups.refresh();
+    FakeGroupMapping.clearBlackList();
+
+    assertEquals(0, FakeGroupMapping.getRequestCount());
+
+    // First call hits the wire
+    assertTrue(groups.getGroups("me").size() == 2);
+    assertEquals(1, FakeGroupMapping.getRequestCount());
+
+    // Second count hits cache
+    assertTrue(groups.getGroups("me").size() == 2);
+    assertEquals(1, FakeGroupMapping.getRequestCount());
+  }
+
+  @Test
+  public void testExceptionsFromImplNotCachedInNegativeCache() {
+    conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
+      ExceptionalGroupMapping.class,
+      ShellBasedUnixGroupsMapping.class);
+    conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 10000);
+    Groups groups = new Groups(conf);
+    groups.cacheGroupsAdd(Arrays.asList(myGroups));
+    groups.refresh();
+
+    assertEquals(0, ExceptionalGroupMapping.getRequestCount());
+
+    // First call should hit the wire
+    try {
+      groups.getGroups("anything");
+      fail("Should have thrown");
+    } catch (IOException e) {
+      // okay
+    }
+    assertEquals(1, ExceptionalGroupMapping.getRequestCount());
+
+    // Second call should hit the wire (no negative caching)
+    try {
+      groups.getGroups("anything");
+      fail("Should have thrown");
+    } catch (IOException e) {
+      // okay
+    }
+    assertEquals(2, ExceptionalGroupMapping.getRequestCount());
+  }
+
+  @Test
+  public void testOnlyOneRequestWhenNoEntryIsCached() throws Exception {
+    // Disable negative cache.
+    conf.setLong(
+      CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 0);
+    final Groups groups = new Groups(conf);
+    groups.cacheGroupsAdd(Arrays.asList(myGroups));
+    groups.refresh();
+    FakeGroupMapping.clearBlackList();
+    FakeGroupMapping.setGetGroupsDelayMs(100);
+
+    ArrayList<Thread> threads = new ArrayList<Thread>();
+    for (int i = 0; i < 10; i++) {
+      threads.add(new Thread() {
+        public void run() {
+          try {
+            assertEquals(2, groups.getGroups("me").size());
+          } catch (IOException e) {
+            fail("Should not happen");
+          }
+        }
+      });
+    }
+
+    // We start a bunch of threads who all see no cached value
+    for (Thread t : threads) {
+      t.start();
+    }
+
+    for (Thread t : threads) {
+      t.join();
+    }
+
+    // But only one thread should have made the request
+    assertEquals(1, FakeGroupMapping.getRequestCount());
+  }
+
+  @Test
+  public void testOnlyOneRequestWhenExpiredEntryExists() throws Exception {
+    conf.setLong(
+      CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 1);
+    FakeTimer timer = new FakeTimer();
+    final Groups groups = new Groups(conf, timer);
+    groups.cacheGroupsAdd(Arrays.asList(myGroups));
+    groups.refresh();
+    FakeGroupMapping.clearBlackList();
+    FakeGroupMapping.setGetGroupsDelayMs(100);
+
+    // We make an initial request to populate the cache
+    groups.getGroups("me");
+    int startingRequestCount = FakeGroupMapping.getRequestCount();
+
+    // Then expire that entry
+    timer.advance(400 * 1000);
+    Thread.sleep(100);
+
+    ArrayList<Thread> threads = new ArrayList<Thread>();
+    for (int i = 0; i < 10; i++) {
+      threads.add(new Thread() {
+        public void run() {
+          try {
+            assertEquals(2, groups.getGroups("me").size());
+          } catch (IOException e) {
+            fail("Should not happen");
+          }
+        }
+      });
+    }
+
+    // We start a bunch of threads who all see the cached value
+    for (Thread t : threads) {
+      t.start();
+    }
+
+    for (Thread t : threads) {
+      t.join();
+    }
+
+    // Only one extra request is made
+    assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
+  }
+
+  @Test
+  public void testCacheEntriesExpire() throws Exception {
+    conf.setLong(
+      CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 1);
+    FakeTimer timer = new FakeTimer();
+    final Groups groups = new Groups(conf, timer);
+    groups.cacheGroupsAdd(Arrays.asList(myGroups));
+    groups.refresh();
+    FakeGroupMapping.clearBlackList();
+
+    // We make an entry
+    groups.getGroups("me");
+    int startingRequestCount = FakeGroupMapping.getRequestCount();
+
+    timer.advance(20 * 1000);
+
+    // Cache entry has expired so it results in a new fetch
+    groups.getGroups("me");
+    assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
+  }
+
+  @Test
+  public void testNegativeCacheClearedOnRefresh() throws Exception {
+    conf.setLong(
+      CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 100);
+    final Groups groups = new Groups(conf);
+    groups.cacheGroupsAdd(Arrays.asList(myGroups));
+    groups.refresh();
+    FakeGroupMapping.clearBlackList();
+    FakeGroupMapping.addToBlackList("dne");
+
+    try {
+      groups.getGroups("dne");
+      fail("Should have failed to find this group");
+    } catch (IOException e) {
+      // pass
+    }
+
+    int startingRequestCount = FakeGroupMapping.getRequestCount();
+
+    groups.refresh();
+    FakeGroupMapping.addToBlackList("dne");
+
+    try {
+      List<String> g = groups.getGroups("dne");
+      fail("Should have failed to find this group");
+    } catch (IOException e) {
+      // pass
+    }
+
+    assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
+  }
 }


[33/50] [abbrv] hadoop git commit: HADOOP-11389. Clean up byte to string encoding issues in hadoop-common. Contributed by Haohui Mai.

Posted by ka...@apache.org.
HADOOP-11389. Clean up byte to string encoding issues in hadoop-common. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b9fcedb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b9fcedb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b9fcedb

Branch: refs/heads/YARN-2139
Commit: 5b9fcedb4d116d91d70aaad6cbf59093eeee36df
Parents: f6f2a3f
Author: Haohui Mai <wh...@apache.org>
Authored: Thu Dec 11 16:41:30 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Thu Dec 11 16:42:38 2014 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../org/apache/hadoop/conf/Configuration.java    |  3 ++-
 .../apache/hadoop/crypto/key/KeyProvider.java    |  6 ++++--
 .../hadoop/crypto/key/kms/KMSClientProvider.java |  3 ++-
 .../java/org/apache/hadoop/fs/shell/Display.java |  8 +++++---
 .../java/org/apache/hadoop/ha/StreamPumper.java  |  4 +++-
 .../java/org/apache/hadoop/http/HtmlQuoting.java | 19 +++++++++++--------
 .../java/org/apache/hadoop/http/HttpServer2.java | 11 +++++++----
 .../org/apache/hadoop/io/DefaultStringifier.java |  3 ++-
 .../java/org/apache/hadoop/io/SequenceFile.java  |  4 +++-
 .../apache/hadoop/io/compress/BZip2Codec.java    |  5 +++--
 .../apache/hadoop/io/file/tfile/TFileDumper.java |  3 ++-
 .../java/org/apache/hadoop/ipc/RpcConstants.java |  4 +++-
 .../main/java/org/apache/hadoop/ipc/Server.java  |  5 +++--
 .../java/org/apache/hadoop/log/LogLevel.java     |  3 ++-
 .../hadoop/metrics/ganglia/GangliaContext.java   |  3 ++-
 .../hadoop/metrics2/impl/MetricsConfig.java      |  8 ++++----
 .../apache/hadoop/metrics2/sink/FileSink.java    | 12 ++++++------
 .../hadoop/metrics2/sink/GraphiteSink.java       |  4 +++-
 .../sink/ganglia/AbstractGangliaSink.java        |  3 ++-
 .../java/org/apache/hadoop/net/TableMapping.java | 19 +++++++------------
 .../AuthenticationFilterInitializer.java         | 11 +++++++----
 .../org/apache/hadoop/security/Credentials.java  |  4 +++-
 .../hadoop/security/LdapGroupsMapping.java       | 10 +++++++---
 .../apache/hadoop/security/SaslRpcServer.java    |  8 +++++---
 .../hadoop/security/ShellBasedIdMapping.java     |  8 ++++++--
 .../security/alias/JavaKeyStoreProvider.java     |  3 ++-
 .../hadoop/security/alias/UserProvider.java      |  4 +++-
 .../apache/hadoop/tracing/SpanReceiverHost.java  |  4 +++-
 .../org/apache/hadoop/tracing/TraceAdmin.java    |  9 ++++++---
 .../org/apache/hadoop/util/FileBasedIPList.java  |  8 +++++++-
 .../org/apache/hadoop/util/HostsFileReader.java  |  4 +++-
 .../org/apache/hadoop/util/ReflectionUtils.java  | 14 ++++++++++----
 .../main/java/org/apache/hadoop/util/Shell.java  |  9 +++++----
 .../hadoop/hdfs/TestDataTransferKeepalive.java   |  4 +---
 35 files changed, 147 insertions(+), 86 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index d923b87..45f226f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -574,6 +574,9 @@ Release 2.7.0 - UNRELEASED
 
     HADOOP-11211. mapreduce.job.classloader.system.classes semantics should be
     be order-independent. (Yitong Zhou via gera)
+
+    HADOOP-11389. Clean up byte to string encoding issues in hadoop-common.
+    (wheat9)
     
 Release 2.6.0 - 2014-11-18
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index d2c8052..c71f35a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -67,6 +67,7 @@ import javax.xml.transform.TransformerFactory;
 import javax.xml.transform.dom.DOMSource;
 import javax.xml.transform.stream.StreamResult;
 
+import com.google.common.base.Charsets;
 import org.apache.commons.collections.map.UnmodifiableMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -2263,7 +2264,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
         LOG.info("found resource " + name + " at " + url);
       }
 
-      return new InputStreamReader(url.openStream());
+      return new InputStreamReader(url.openStream(), Charsets.UTF_8);
     } catch (Exception e) {
       return null;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
index dd2d5b9..a0675c2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
@@ -32,6 +32,7 @@ import java.util.Map;
 
 import com.google.gson.stream.JsonReader;
 import com.google.gson.stream.JsonWriter;
+import org.apache.commons.io.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -207,7 +208,8 @@ public abstract class KeyProvider {
      */
     protected byte[] serialize() throws IOException {
       ByteArrayOutputStream buffer = new ByteArrayOutputStream();
-      JsonWriter writer = new JsonWriter(new OutputStreamWriter(buffer));
+      JsonWriter writer = new JsonWriter(
+          new OutputStreamWriter(buffer, Charsets.UTF_8));
       try {
         writer.beginObject();
         if (cipher != null) {
@@ -251,7 +253,7 @@ public abstract class KeyProvider {
       String description = null;
       Map<String, String> attributes = null;
       JsonReader reader = new JsonReader(new InputStreamReader
-        (new ByteArrayInputStream(bytes)));
+        (new ByteArrayInputStream(bytes), Charsets.UTF_8));
       try {
         reader.beginObject();
         while (reader.hasNext()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 50dd1ad..0464f55 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.crypto.key.kms;
 
 import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.io.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProvider;
@@ -209,7 +210,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
   }
 
   private static void writeJson(Map map, OutputStream os) throws IOException {
-    Writer writer = new OutputStreamWriter(os);
+    Writer writer = new OutputStreamWriter(os, Charsets.UTF_8);
     ObjectMapper jsonMapper = new ObjectMapper();
     jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, map);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
index ba65cd2..f0d7b8d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
@@ -32,6 +32,7 @@ import org.apache.avro.generic.GenericDatumWriter;
 import org.apache.avro.io.DatumWriter;
 import org.apache.avro.io.EncoderFactory;
 import org.apache.avro.io.JsonEncoder;
+import org.apache.commons.io.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -234,10 +235,10 @@ class Display extends FsCommand {
         if (!r.next(key, val)) {
           return -1;
         }
-        byte[] tmp = key.toString().getBytes();
+        byte[] tmp = key.toString().getBytes(Charsets.UTF_8);
         outbuf.write(tmp, 0, tmp.length);
         outbuf.write('\t');
-        tmp = val.toString().getBytes();
+        tmp = val.toString().getBytes(Charsets.UTF_8);
         outbuf.write(tmp, 0, tmp.length);
         outbuf.write('\n');
         inbuf.reset(outbuf.getData(), outbuf.getLength());
@@ -299,7 +300,8 @@ class Display extends FsCommand {
       encoder.flush();
       if (!fileReader.hasNext()) {
         // Write a new line after the last Avro record.
-        output.write(System.getProperty("line.separator").getBytes());
+        output.write(System.getProperty("line.separator")
+                         .getBytes(Charsets.UTF_8));
         output.flush();
       }
       pos = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java
index 8bc16af..00c6401 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 
+import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.Log;
 
 /**
@@ -76,7 +77,8 @@ class StreamPumper {
   }
 
   protected void pump() throws IOException {
-    InputStreamReader inputStreamReader = new InputStreamReader(stream);
+    InputStreamReader inputStreamReader = new InputStreamReader(
+        stream, Charsets.UTF_8);
     BufferedReader br = new BufferedReader(inputStreamReader);
     String line = null;
     while ((line = br.readLine()) != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HtmlQuoting.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HtmlQuoting.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HtmlQuoting.java
index 99befee..57acebd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HtmlQuoting.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HtmlQuoting.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.http;
 
+import org.apache.commons.io.Charsets;
+
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
@@ -25,11 +27,11 @@ import java.io.OutputStream;
  * This class is responsible for quoting HTML characters.
  */
 public class HtmlQuoting {
-  private static final byte[] ampBytes = "&amp;".getBytes();
-  private static final byte[] aposBytes = "&apos;".getBytes();
-  private static final byte[] gtBytes = "&gt;".getBytes();
-  private static final byte[] ltBytes = "&lt;".getBytes();
-  private static final byte[] quotBytes = "&quot;".getBytes();
+  private static final byte[] ampBytes = "&amp;".getBytes(Charsets.UTF_8);
+  private static final byte[] aposBytes = "&apos;".getBytes(Charsets.UTF_8);
+  private static final byte[] gtBytes = "&gt;".getBytes(Charsets.UTF_8);
+  private static final byte[] ltBytes = "&lt;".getBytes(Charsets.UTF_8);
+  private static final byte[] quotBytes = "&quot;".getBytes(Charsets.UTF_8);
 
   /**
    * Does the given string need to be quoted?
@@ -63,7 +65,7 @@ public class HtmlQuoting {
     if (str == null) {
       return false;
     }
-    byte[] bytes = str.getBytes();
+    byte[] bytes = str.getBytes(Charsets.UTF_8);
     return needsQuoting(bytes, 0 , bytes.length);
   }
 
@@ -98,15 +100,16 @@ public class HtmlQuoting {
     if (item == null) {
       return null;
     }
-    byte[] bytes = item.getBytes();
+    byte[] bytes = item.getBytes(Charsets.UTF_8);
     if (needsQuoting(bytes, 0, bytes.length)) {
       ByteArrayOutputStream buffer = new ByteArrayOutputStream();
       try {
         quoteHtmlChars(buffer, bytes, 0, bytes.length);
+        return buffer.toString("UTF-8");
       } catch (IOException ioe) {
         // Won't happen, since it is a bytearrayoutputstream
+        return null;
       }
-      return buffer.toString();
     } else {
       return item;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 45b6419..63a32fb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.http;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InterruptedIOException;
+import java.io.OutputStream;
+import java.io.PrintStream;
 import java.io.PrintWriter;
 import java.net.BindException;
 import java.net.InetSocketAddress;
@@ -1065,13 +1067,14 @@ public final class HttpServer2 implements FilterContainer {
     public void doGet(HttpServletRequest request, HttpServletResponse response)
       throws ServletException, IOException {
       if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
-                                                     request, response)) {
+                                                      request, response)) {
         return;
       }
       response.setContentType("text/plain; charset=UTF-8");
-      PrintWriter out = response.getWriter();
-      ReflectionUtils.printThreadInfo(out, "");
-      out.close();
+      try (PrintStream out = new PrintStream(
+          response.getOutputStream(), false, "UTF-8")) {
+        ReflectionUtils.printThreadInfo(out, "");
+      }
       ReflectionUtils.logThreadInfo(LOG, "jsp requested", 1);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java
index d32d58b..3ba577f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java
@@ -23,6 +23,7 @@ import java.nio.charset.UnsupportedCharsetException;
 import java.util.ArrayList;
 
 import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.io.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -90,7 +91,7 @@ public class DefaultStringifier<T> implements Stringifier<T> {
     serializer.serialize(obj);
     byte[] buf = new byte[outBuf.getLength()];
     System.arraycopy(outBuf.getData(), 0, buf, 0, buf.length);
-    return new String(Base64.encodeBase64(buf));
+    return new String(Base64.encodeBase64(buf), Charsets.UTF_8);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
index 4cda107..7a59149 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
@@ -22,6 +22,8 @@ import java.io.*;
 import java.util.*;
 import java.rmi.server.UID;
 import java.security.MessageDigest;
+
+import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.*;
 import org.apache.hadoop.util.Options;
 import org.apache.hadoop.fs.*;
@@ -849,7 +851,7 @@ public class SequenceFile {
       try {                                       
         MessageDigest digester = MessageDigest.getInstance("MD5");
         long time = Time.now();
-        digester.update((new UID()+"@"+time).getBytes());
+        digester.update((new UID()+"@"+time).getBytes(Charsets.UTF_8));
         sync = digester.digest();
       } catch (Exception e) {
         throw new RuntimeException(e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
index 37b97f2..91178ec 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
@@ -23,6 +23,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 
+import org.apache.commons.io.Charsets;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 
@@ -281,7 +282,7 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
         // The compressed bzip2 stream should start with the
         // identifying characters BZ. Caller of CBZip2OutputStream
         // i.e. this class must write these characters.
-        out.write(HEADER.getBytes());
+        out.write(HEADER.getBytes(Charsets.UTF_8));
       }
     }
 
@@ -415,7 +416,7 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
         byte[] headerBytes = new byte[HEADER_LEN];
         int actualRead = bufferedIn.read(headerBytes, 0, HEADER_LEN);
         if (actualRead != -1) {
-          String header = new String(headerBytes);
+          String header = new String(headerBytes, Charsets.UTF_8);
           if (header.compareTo(HEADER) != 0) {
             bufferedIn.reset();
           } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
index ad94c42..aabdf57 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
@@ -24,6 +24,7 @@ import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -233,7 +234,7 @@ class TFileDumper {
               out.printf("%X", b);
             }
           } else {
-            out.print(new String(key, 0, sampleLen));
+            out.print(new String(key, 0, sampleLen, Charsets.UTF_8));
           }
           if (sampleLen < key.length) {
             out.print("...");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcConstants.java
index c457500..d5e795b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcConstants.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcConstants.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.ipc;
 
 import java.nio.ByteBuffer;
 
+import org.apache.commons.io.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 
 @InterfaceAudience.Private
@@ -53,7 +54,8 @@ public class RpcConstants {
   /**
    * The first four bytes of Hadoop RPC connections
    */
-  public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes());
+  public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes
+      (Charsets.UTF_8));
   public static final int HEADER_LEN_AFTER_HRPC_PART = 3; // 3 bytes that follow
   
   // 1 : Introduce ping and server does not throw away RPCs

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index a4d669a..e508d4e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -69,6 +69,7 @@ import javax.security.sasl.Sasl;
 import javax.security.sasl.SaslException;
 import javax.security.sasl.SaslServer;
 
+import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -182,7 +183,7 @@ public abstract class Server {
    * and send back a nicer response.
    */
   private static final ByteBuffer HTTP_GET_BYTES = ByteBuffer.wrap(
-      "GET ".getBytes());
+      "GET ".getBytes(Charsets.UTF_8));
   
   /**
    * An HTTP response to send back if we detect an HTTP request to our IPC
@@ -1709,7 +1710,7 @@ public abstract class Server {
     private void setupHttpRequestOnIpcPortResponse() throws IOException {
       Call fakeCall = new Call(0, RpcConstants.INVALID_RETRY_COUNT, null, this);
       fakeCall.setResponse(ByteBuffer.wrap(
-          RECEIVED_HTTP_REQ_RESPONSE.getBytes()));
+          RECEIVED_HTTP_REQ_RESPONSE.getBytes(Charsets.UTF_8)));
       responder.doRespond(fakeCall);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
index 77f74cc..4749ce1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
@@ -24,6 +24,7 @@ import java.util.regex.Pattern;
 import javax.servlet.*;
 import javax.servlet.http.*;
 
+import com.google.common.base.Charsets;
 import org.apache.commons.logging.*;
 import org.apache.commons.logging.impl.*;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -66,7 +67,7 @@ public class LogLevel {
       connection.connect();
 
       BufferedReader in = new BufferedReader(new InputStreamReader(
-          connection.getInputStream()));
+          connection.getInputStream(), Charsets.UTF_8));
       for(String line; (line = in.readLine()) != null; )
         if (line.startsWith(MARKER)) {
           System.out.println(TAG.matcher(line).replaceAll(""));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java
index 841874f..0e70778 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext.java
@@ -29,6 +29,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
@@ -225,7 +226,7 @@ public class GangliaContext extends AbstractMetricsContext {
    * a multiple of 4.
    */
   protected void xdr_string(String s) {
-    byte[] bytes = s.getBytes();
+    byte[] bytes = s.getBytes(Charsets.UTF_8);
     int len = bytes.length;
     xdr_int(len);
     System.arraycopy(bytes, 0, buffer, offset, len);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
index e4b5580..167205e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
@@ -269,14 +269,14 @@ class MetricsConfig extends SubsetConfiguration {
 
   static String toString(Configuration c) {
     ByteArrayOutputStream buffer = new ByteArrayOutputStream();
-    PrintStream ps = new PrintStream(buffer);
-    PropertiesConfiguration tmp = new PropertiesConfiguration();
-    tmp.copy(c);
     try {
+      PrintStream ps = new PrintStream(buffer, false, "UTF-8");
+      PropertiesConfiguration tmp = new PropertiesConfiguration();
+      tmp.copy(c);
       tmp.save(ps);
+      return buffer.toString("UTF-8");
     } catch (Exception e) {
       throw new MetricsConfigException(e);
     }
-    return buffer.toString();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/FileSink.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/FileSink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/FileSink.java
index d136416..ab121bc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/FileSink.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/FileSink.java
@@ -20,9 +20,9 @@ package org.apache.hadoop.metrics2.sink;
 
 import java.io.Closeable;
 import java.io.File;
-import java.io.FileWriter;
+import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.PrintWriter;
+import java.io.PrintStream;
 
 import org.apache.commons.configuration.SubsetConfiguration;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -40,15 +40,15 @@ import org.apache.hadoop.metrics2.MetricsTag;
 @InterfaceStability.Evolving
 public class FileSink implements MetricsSink, Closeable {
   private static final String FILENAME_KEY = "filename";
-  private PrintWriter writer;
+  private PrintStream writer;
 
   @Override
   public void init(SubsetConfiguration conf) {
     String filename = conf.getString(FILENAME_KEY);
     try {
-      writer = filename == null
-          ? new PrintWriter(System.out)
-          : new PrintWriter(new FileWriter(new File(filename), true));
+      writer = filename == null ? System.out
+          : new PrintStream(new FileOutputStream(new File(filename)),
+                            true, "UTF-8");
     } catch (Exception e) {
       throw new MetricsException("Error creating "+ filename, e);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
index 9bc3f15..e72fe24 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
@@ -25,6 +25,7 @@ import java.io.Closeable;
 import java.net.Socket;
 
 import org.apache.commons.configuration.SubsetConfiguration;
+import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -64,7 +65,8 @@ public class GraphiteSink implements MetricsSink, Closeable {
         try {
             // Open an connection to Graphite server.
             socket = new Socket(serverHost, serverPort);
-            writer = new OutputStreamWriter(socket.getOutputStream());
+            writer = new OutputStreamWriter(
+                socket.getOutputStream(), Charsets.UTF_8);
         } catch (Exception e) {
             throw new MetricsException("Error creating connection, "
                     + serverHost + ":" + serverPort, e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/AbstractGangliaSink.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/AbstractGangliaSink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/AbstractGangliaSink.java
index b3581f9..164ea08 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/AbstractGangliaSink.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/AbstractGangliaSink.java
@@ -29,6 +29,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.commons.configuration.SubsetConfiguration;
+import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.metrics2.MetricsSink;
@@ -223,7 +224,7 @@ public abstract class AbstractGangliaSink implements MetricsSink {
    * @param s the string to be written to buffer at offset location
    */
   protected void xdr_string(String s) {
-    byte[] bytes = s.getBytes();
+    byte[] bytes = s.getBytes(Charsets.UTF_8);
     int len = bytes.length;
     xdr_int(len);
     System.arraycopy(bytes, 0, buffer, offset, len);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
index 2662108..59c0ca9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
@@ -20,13 +20,16 @@ package org.apache.hadoop.net;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY;
 
 import java.io.BufferedReader;
+import java.io.FileInputStream;
 import java.io.FileReader;
 import java.io.IOException;
+import java.io.InputStreamReader;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.commons.io.Charsets;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -96,9 +99,10 @@ public class TableMapping extends CachedDNSToSwitchMapping {
         return null;
       }
   
-      BufferedReader reader = null;
-      try {
-        reader = new BufferedReader(new FileReader(filename));
+
+      try (BufferedReader reader =
+               new BufferedReader(new InputStreamReader(
+                   new FileInputStream(filename), Charsets.UTF_8))) {
         String line = reader.readLine();
         while (line != null) {
           line = line.trim();
@@ -115,15 +119,6 @@ public class TableMapping extends CachedDNSToSwitchMapping {
       } catch (Exception e) {
         LOG.warn(filename + " cannot be read.", e);
         return null;
-      } finally {
-        if (reader != null) {
-          try {
-            reader.close();
-          } catch (IOException e) {
-            LOG.warn(filename + " cannot be read.", e);
-            return null;
-          }
-        }
       }
       return loadMap;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
index 4fb9e45..43d1b66 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.security;
 
+import com.google.common.base.Charsets;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.conf.Configuration;
@@ -24,8 +25,10 @@ import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.FilterInitializer;
 import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
 
+import java.io.FileInputStream;
 import java.io.FileReader;
 import java.io.IOException;
+import java.io.InputStreamReader;
 import java.io.Reader;
 import java.util.HashMap;
 import java.util.Map;
@@ -78,10 +81,10 @@ public class AuthenticationFilterInitializer extends FilterInitializer {
     if (signatureSecretFile == null) {
       throw new RuntimeException("Undefined property: " + SIGNATURE_SECRET_FILE);      
     }
-    
-    try {
-      StringBuilder secret = new StringBuilder();
-      Reader reader = new FileReader(signatureSecretFile);
+
+    StringBuilder secret = new StringBuilder();
+    try (Reader reader = new InputStreamReader(
+        new FileInputStream(signatureSecretFile), Charsets.UTF_8)) {
       int c = reader.read();
       while (c > -1) {
         secret.append((char)c);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
index b81e810..e6b8722 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
@@ -32,6 +32,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -217,7 +218,8 @@ public class Credentials implements Writable {
     readFields(in);
   }
   
-  private static final byte[] TOKEN_STORAGE_MAGIC = "HDTS".getBytes();
+  private static final byte[] TOKEN_STORAGE_MAGIC =
+      "HDTS".getBytes(Charsets.UTF_8);
   private static final byte TOKEN_STORAGE_VERSION = 0;
   
   public void writeTokenStorageToStream(DataOutputStream os)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
index e72d988..c0c8d2b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
@@ -17,8 +17,10 @@
  */
 package org.apache.hadoop.security;
 
+import java.io.FileInputStream;
 import java.io.FileReader;
 import java.io.IOException;
+import java.io.InputStreamReader;
 import java.io.Reader;
 import java.util.ArrayList;
 import java.util.Hashtable;
@@ -34,6 +36,7 @@ import javax.naming.directory.InitialDirContext;
 import javax.naming.directory.SearchControls;
 import javax.naming.directory.SearchResult;
 
+import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -366,9 +369,10 @@ public class LdapGroupsMapping
       // an anonymous bind
       return "";
     }
-    
-    try (Reader reader = new FileReader(pwFile)) {
-      StringBuilder password = new StringBuilder();
+
+    StringBuilder password = new StringBuilder();
+    try (Reader reader = new InputStreamReader(
+        new FileInputStream(pwFile), Charsets.UTF_8)) {
       int c = reader.read();
       while (c > -1) {
         password.append((char)c);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
index 83f46ef..f2b21e8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
@@ -44,6 +44,7 @@ import javax.security.sasl.SaslServer;
 import javax.security.sasl.SaslServerFactory;
 
 import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -184,11 +185,11 @@ public class SaslRpcServer {
   }
   
   static String encodeIdentifier(byte[] identifier) {
-    return new String(Base64.encodeBase64(identifier));
+    return new String(Base64.encodeBase64(identifier), Charsets.UTF_8);
   }
 
   static byte[] decodeIdentifier(String identifier) {
-    return Base64.decodeBase64(identifier.getBytes());
+    return Base64.decodeBase64(identifier.getBytes(Charsets.UTF_8));
   }
 
   public static <T extends TokenIdentifier> T getIdentifier(String id,
@@ -206,7 +207,8 @@ public class SaslRpcServer {
   }
 
   static char[] encodePassword(byte[] password) {
-    return new String(Base64.encodeBase64(password)).toCharArray();
+    return new String(Base64.encodeBase64(password),
+                      Charsets.UTF_8).toCharArray();
   }
 
   /** Splitting fully qualified Kerberos name into parts */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java
index e152d46..e995cb6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java
@@ -22,11 +22,13 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
+import java.nio.charset.Charset;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -217,7 +219,9 @@ public class ShellBasedIdMapping implements IdMappingServiceProvider {
     try {
       Process process = Runtime.getRuntime().exec(
           new String[] { "bash", "-c", command });
-      br = new BufferedReader(new InputStreamReader(process.getInputStream()));
+      br = new BufferedReader(
+          new InputStreamReader(process.getInputStream(),
+                                Charset.defaultCharset()));
       String line = null;
       while ((line = br.readLine()) != null) {
         String[] nameId = line.split(regex);
@@ -552,7 +556,7 @@ public class ShellBasedIdMapping implements IdMappingServiceProvider {
     Map<Integer, Integer> gidMapping = new HashMap<Integer, Integer>();
     
     BufferedReader in = new BufferedReader(new InputStreamReader(
-        new FileInputStream(staticMapFile)));
+        new FileInputStream(staticMapFile), Charsets.UTF_8));
     
     try {
       String line = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java
index 5dc2abf..05958a0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.security.alias;
 
+import org.apache.commons.io.Charsets;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -165,7 +166,7 @@ public class JavaKeyStoreProvider extends CredentialProvider {
   }
   
   public static char[] bytesToChars(byte[] bytes) {
-    String pass = new String(bytes);
+    String pass = new String(bytes, Charsets.UTF_8);
     return pass.toCharArray();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java
index 262cbad..127ccf0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java
@@ -23,6 +23,7 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.commons.io.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
@@ -56,7 +57,8 @@ public class UserProvider extends CredentialProvider {
     if (bytes == null) {
       return null;
     }
-    return new CredentialEntry(alias, new String(bytes).toCharArray());
+    return new CredentialEntry(
+        alias, new String(bytes, Charsets.UTF_8).toCharArray());
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanReceiverHost.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanReceiverHost.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanReceiverHost.java
index 81993e9..f461dac 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanReceiverHost.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanReceiverHost.java
@@ -31,6 +31,7 @@ import java.util.Map;
 import java.util.TreeMap;
 import java.util.UUID;
 
+import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -100,7 +101,8 @@ public class SpanReceiverHost implements TraceAdminProtocol {
       // out of /proc/self/stat.  (There isn't any portable way to get the
       // process ID from Java.)
       reader = new BufferedReader(
-          new InputStreamReader(new FileInputStream("/proc/self/stat")));
+          new InputStreamReader(new FileInputStream("/proc/self/stat"),
+                                Charsets.UTF_8));
       String line = reader.readLine();
       if (line == null) {
         throw new EOFException();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
index 4ae5aed..5fdfbfa 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
@@ -25,6 +25,7 @@ import java.util.Arrays;
 import java.util.LinkedList;
 import java.util.List;
 
+import org.apache.commons.io.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
@@ -91,7 +92,7 @@ public class TraceAdmin extends Configured implements Tool {
       return 1;
     }
     ByteArrayOutputStream configStream = new ByteArrayOutputStream();
-    PrintStream configsOut = new PrintStream(configStream);
+    PrintStream configsOut = new PrintStream(configStream, false, "UTF-8");
     SpanReceiverInfoBuilder factory = new SpanReceiverInfoBuilder(className);
     String prefix = "";
     for (int i = 0; i < args.size(); ++i) {
@@ -113,13 +114,15 @@ public class TraceAdmin extends Configured implements Tool {
       configsOut.print(prefix + key + " = " + value);
       prefix = ", ";
     }
+
+    String configStreamStr = configStream.toString("UTF-8");
     try {
       long id = remote.addSpanReceiver(factory.build());
       System.out.println("Added trace span receiver " + id +
-          " with configuration " + configStream.toString());
+          " with configuration " + configStreamStr);
     } catch (IOException e) {
       System.out.println("addSpanReceiver error with configuration " +
-          configStream.toString());
+                             configStreamStr);
       throw e;
     }
     return 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FileBasedIPList.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FileBasedIPList.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FileBasedIPList.java
index 8bfb5d9..8020b7a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FileBasedIPList.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/FileBasedIPList.java
@@ -19,13 +19,18 @@ package org.apache.hadoop.util;
 
 import java.io.BufferedReader;
 import java.io.File;
+import java.io.FileInputStream;
 import java.io.FileReader;
 import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashSet;
 import java.util.List;
 
+import org.apache.commons.io.Charsets;
+import org.apache.commons.io.IOUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
@@ -78,7 +83,8 @@ public class FileBasedIPList implements IPList {
       if (fileName != null) {
         File file = new File (fileName);
         if (file.exists()) {
-          FileReader fileReader = new FileReader(file);
+          Reader fileReader = new InputStreamReader(
+              new FileInputStream(file), Charsets.UTF_8);
           BufferedReader bufferedReader = new BufferedReader(fileReader);
           List<String> lines = new ArrayList<String>();
           String line = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
index b012add..ae77e6c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
@@ -22,6 +22,7 @@ import java.io.*;
 import java.util.Set;
 import java.util.HashSet;
 
+import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -72,7 +73,8 @@ public class HostsFileReader {
       throws IOException {
     BufferedReader reader = null;
     try {
-      reader = new BufferedReader(new InputStreamReader(fileInputStream));
+      reader = new BufferedReader(
+          new InputStreamReader(fileInputStream, Charsets.UTF_8));
       String line;
       while ((line = reader.readLine()) != null) {
         String[] nodes = line.split("[ \t\n\f\r]+");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
index 3977e60..d9a7326 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
@@ -20,13 +20,16 @@ package org.apache.hadoop.util;
 
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
+import java.io.PrintStream;
 import java.io.PrintWriter;
+import java.io.UnsupportedEncodingException;
 import java.lang.management.ManagementFactory;
 import java.lang.management.ThreadInfo;
 import java.lang.management.ThreadMXBean;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.Field;
 import java.lang.reflect.Method;
+import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
@@ -154,7 +157,7 @@ public class ReflectionUtils {
    * @param stream the stream to
    * @param title a string title for the stack trace
    */
-  public synchronized static void printThreadInfo(PrintWriter stream,
+  public synchronized static void printThreadInfo(PrintStream stream,
                                      String title) {
     final int STACK_DEPTH = 20;
     boolean contention = threadBean.isThreadContentionMonitoringEnabled();
@@ -215,9 +218,12 @@ public class ReflectionUtils {
         }
       }
       if (dumpStack) {
-        ByteArrayOutputStream buffer = new ByteArrayOutputStream();
-        printThreadInfo(new PrintWriter(buffer), title);
-        log.info(buffer.toString());
+        try {
+          ByteArrayOutputStream buffer = new ByteArrayOutputStream();
+          printThreadInfo(new PrintStream(buffer, false, "UTF-8"), title);
+          log.info(buffer.toString(Charset.defaultCharset().name()));
+        } catch (UnsupportedEncodingException ignored) {
+        }
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index a44e992..f0100d4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -22,6 +22,7 @@ import java.io.File;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.InputStream;
+import java.nio.charset.Charset;
 import java.util.Arrays;
 import java.util.Map;
 import java.util.Timer;
@@ -493,11 +494,11 @@ abstract public class Shell {
       timeOutTimer.schedule(timeoutTimerTask, timeOutInterval);
     }
     final BufferedReader errReader = 
-            new BufferedReader(new InputStreamReader(process
-                                                     .getErrorStream()));
+            new BufferedReader(new InputStreamReader(
+                process.getErrorStream(), Charset.defaultCharset()));
     BufferedReader inReader = 
-            new BufferedReader(new InputStreamReader(process
-                                                     .getInputStream()));
+            new BufferedReader(new InputStreamReader(
+                process.getInputStream(), Charset.defaultCharset()));
     final StringBuffer errMsg = new StringBuffer();
     
     // read error and input streams as this would free up the buffers

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9fcedb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
index eae8ea7..08aa2c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
@@ -245,9 +245,7 @@ public class TestDataTransferKeepalive {
   private void assertXceiverCount(int expected) {
     int count = getXceiverCountWithoutServer();
     if (count != expected) {
-      ReflectionUtils.printThreadInfo(
-          new PrintWriter(System.err),
-          "Thread dumps");
+      ReflectionUtils.printThreadInfo(System.err, "Thread dumps");
       fail("Expected " + expected + " xceivers, found " +
           count);
     }


[19/50] [abbrv] hadoop git commit: HADOOP-10950. rework heap management vars (John Smith via aw)

Posted by ka...@apache.org.
HADOOP-10950. rework heap management vars (John Smith via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7c6c710
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7c6c710
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7c6c710

Branch: refs/heads/YARN-2139
Commit: a7c6c710b2366cea1b7c24e3a2cd46be1eb0f05b
Parents: 44870dc
Author: Allen Wittenauer <aw...@apache.org>
Authored: Wed Dec 10 13:37:32 2014 -0800
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Dec 10 13:37:32 2014 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 +
 .../hadoop-common/src/main/bin/hadoop           |  2 -
 .../src/main/bin/hadoop-functions.sh            | 36 +++++++++++-----
 .../hadoop-common/src/main/bin/rcc              |  2 -
 .../hadoop-common/src/main/conf/hadoop-env.sh   | 15 ++++++-
 .../main/conf/hadoop-user-functions.sh.example  |  7 ----
 .../hadoop-hdfs/src/main/bin/hdfs               |  1 -
 hadoop-mapreduce-project/bin/mapred             |  3 +-
 hadoop-mapreduce-project/conf/mapred-env.sh     | 17 ++++----
 hadoop-yarn-project/hadoop-yarn/bin/yarn        | 14 ++++---
 .../hadoop-yarn/conf/yarn-env.sh                | 44 +++++++-------------
 11 files changed, 73 insertions(+), 70 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7c6c710/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6242cee..7310dd4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -11,6 +11,8 @@ Trunk (Unreleased)
 
     HADOOP-9902. Shell script rewrite (aw)
 
+    HADOOP-10950. rework heap management vars (John Smith via aw)
+
   NEW FEATURES
 
     HADOOP-9629. Support Windows Azure Storage - Blob as a file system in Hadoop.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7c6c710/hadoop-common-project/hadoop-common/src/main/bin/hadoop
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
index 9df2c7d..be38382 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
@@ -183,8 +183,6 @@ esac
 hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
 HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
 
-hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
-
 hadoop_finalize
 hadoop_java_exec "${COMMAND}" "${CLASS}" "$@"
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7c6c710/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 2b56634..3e353d9 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -162,7 +162,6 @@ function hadoop_basic_init
   HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-${HADOOP_LOGLEVEL},console}
   HADOOP_DAEMON_ROOT_LOGGER=${HADOOP_DAEMON_ROOT_LOGGER:-${HADOOP_LOGLEVEL},RFA}
   HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}
-  HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-1024}
   HADOOP_SSH_OPTS=${HADOOP_SSH_OPTS:-"-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s"}
   HADOOP_SECURE_LOG_DIR=${HADOOP_SECURE_LOG_DIR:-${HADOOP_LOG_DIR}}
   HADOOP_SECURE_PID_DIR=${HADOOP_SECURE_PID_DIR:-${HADOOP_PID_DIR}}
@@ -597,15 +596,6 @@ function hadoop_java_setup
     hadoop_error "ERROR: $JAVA is not executable."
     exit 1
   fi
-  # shellcheck disable=SC2034
-  JAVA_HEAP_MAX=-Xmx1g
-  HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-1024}
-  
-  # check envvars which might override default args
-  if [[ -n "$HADOOP_HEAPSIZE" ]]; then
-    # shellcheck disable=SC2034
-    JAVA_HEAP_MAX="-Xmx${HADOOP_HEAPSIZE}m"
-  fi
 }
 
 function hadoop_finalize_libpaths
@@ -617,6 +607,31 @@ function hadoop_finalize_libpaths
   fi
 }
 
+function hadoop_finalize_hadoop_heap
+{
+  if [[ -n "${HADOOP_HEAPSIZE_MAX}" ]]; then
+    if [[ "${HADOOP_HEAPSIZE_MAX}" =~ ^[0-9]+$ ]]; then
+      HADOOP_HEAPSIZE_MAX="${HADOOP_HEAPSIZE_MAX}m"
+    fi
+    hadoop_add_param HADOOP_OPTS Xmx "-Xmx${HADOOP_HEAPSIZE_MAX}"
+  fi
+
+  # backwards compatibility
+  if [[ -n "${HADOOP_HEAPSIZE}" ]]; then
+    if [[ "${HADOOP_HEAPSIZE}" =~ ^[0-9]+$ ]]; then
+      HADOOP_HEAPSIZE="${HADOOP_HEAPSIZE}m"
+    fi
+    hadoop_add_param HADOOP_OPTS Xmx "-Xmx${HADOOP_HEAPSIZE}"
+  fi
+
+  if [[ -n "${HADOOP_HEAPSIZE_MIN}" ]]; then
+    if [[ "${HADOOP_HEAPSIZE_MIN}" =~ ^[0-9]+$ ]]; then
+      HADOOP_HEAPSIZE_MIN="${HADOOP_HEAPSIZE_MIN}m"
+    fi
+    hadoop_add_param HADOOP_OPTS Xms "-Xms${HADOOP_HEAPSIZE_MIN}"
+  fi
+}
+
 #
 # fill in any last minute options that might not have been defined yet
 #
@@ -646,6 +661,7 @@ function hadoop_finalize
   # override of CONF dirs and more
   hadoop_finalize_classpath
   hadoop_finalize_libpaths
+  hadoop_finalize_hadoop_heap
   hadoop_finalize_hadoop_opts
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7c6c710/hadoop-common-project/hadoop-common/src/main/bin/rcc
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/rcc b/hadoop-common-project/hadoop-common/src/main/bin/rcc
index 7425353..512fc2c 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/rcc
+++ b/hadoop-common-project/hadoop-common/src/main/bin/rcc
@@ -37,7 +37,5 @@ CLASS='org.apache.hadoop.record.compiler.generated.Rcc'
 hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
 HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
 
-hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
-
 hadoop_finalize
 hadoop_java_exec rcc "${CLASS}" "$@"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7c6c710/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
index ed9382b..588b02a 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
@@ -64,8 +64,19 @@
 # path.
 # export HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop
 
-# The maximum amount of heap to use, in MB. Default is 1024.
-# export HADOOP_HEAPSIZE=1024
+# The maximum amount of heap to use (Java -Xmx).  If no unit 
+# is provided, it will be converted to MB.  Daemons will 
+# prefer any Xmx setting in their respective _OPT variable.
+# There is no default; the JVM will autoscale based upon machine
+# memory size.
+# export HADOOP_HEAPSIZE_MAX=
+
+# The minimum amount of heap to use (Java -Xms).  If no unit 
+# is provided, it will be converted to MB.  Daemons will 
+# prefer any Xms setting in their respective _OPT variable.
+# There is no default; the JVM will autoscale based upon machine
+# memory size.
+# export HADOOP_HEAPSIZE_MIN=
 
 # Extra Java runtime options for all Hadoop commands. We don't support
 # IPv6 yet/still, so by default the preference is set to IPv4.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7c6c710/hadoop-common-project/hadoop-common/src/main/conf/hadoop-user-functions.sh.example
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-user-functions.sh.example b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-user-functions.sh.example
index 7699c40..b2f78f8 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-user-functions.sh.example
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-user-functions.sh.example
@@ -84,11 +84,4 @@
 #    echo "ERROR: ${JAVA} is not executable." 1>&2
 #    exit 1
 #  fi
-#  JAVA_HEAP_MAX=-Xmx1g
-#  HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-128}
-#
-#  # check envvars which might override default args
-#  if [[ -n "$HADOOP_HEAPSIZE" ]]; then
-#    JAVA_HEAP_MAX="-Xmx${HADOOP_HEAPSIZE}m"
-#  fi
 #}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7c6c710/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index ee666f3..8140f18 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -273,7 +273,6 @@ if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then
   fi
 fi
 
-hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
 hadoop_finalize
 
 if [[ -n "${supportdaemonization}" ]]; then

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7c6c710/hadoop-mapreduce-project/bin/mapred
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred
index 667777a..9f28471 100755
--- a/hadoop-mapreduce-project/bin/mapred
+++ b/hadoop-mapreduce-project/bin/mapred
@@ -95,7 +95,7 @@ case ${COMMAND} in
     hadoop_debug "Appending HADOOP_JOB_HISTORYSERVER_OPTS onto HADOOP_OPTS"
     HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_JOB_HISTORYSERVER_OPTS}"
     if [ -n "${HADOOP_JOB_HISTORYSERVER_HEAPSIZE}" ]; then
-      JAVA_HEAP_MAX="-Xmx${HADOOP_JOB_HISTORYSERVER_HEAPSIZE}m"
+      HADOOP_HEAPSIZE_MAX="${HADOOP_JOB_HISTORYSERVER_HEAPSIZE}"
     fi
     HADOOP_DAEMON_ROOT_LOGGER=${HADOOP_JHS_LOGGER:-$HADOOP_DAEMON_ROOT_LOGGER}
   ;;
@@ -147,7 +147,6 @@ if [[  "${HADOOP_DAEMON_MODE}" != "default" ]]; then
   HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.log"
 fi
 
-hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
 hadoop_finalize
 
 if [[ -n "${supportdaemonization}" ]]; then

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7c6c710/hadoop-mapreduce-project/conf/mapred-env.sh
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/conf/mapred-env.sh b/hadoop-mapreduce-project/conf/mapred-env.sh
index 8a4b372..4088ceb 100644
--- a/hadoop-mapreduce-project/conf/mapred-env.sh
+++ b/hadoop-mapreduce-project/conf/mapred-env.sh
@@ -52,17 +52,14 @@
 # Job History Server specific parameters
 ###
 
-# Specify the max heapsize for the Job History Server using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1000.
-# This value will be overridden by an Xmx setting specified in either
-# MAPRED_OPTS, HADOOP_OPTS, and/or HADOOP_JOB_HISTORYSERVER_OPTS.
-# If not specified, the default value will be picked from either HADOOP_HEAPSIZE
-# or the built-in default.
-#
-#export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000
+# Specify the max heapsize for the JobHistoryServer.  If no units are
+# given, it will be assumed to be in MB.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS,
+# HADOOP_OPTS, and/or HADOOP_JOB_HISTORYSERVER_OPTS.
+# Default is the same as HADOOP_HEAPSIZE_MAX.
+#export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=
 
-# Specify the JVM options to be used when starting the ResourceManager.
+# Specify the JVM options to be used when starting the HistoryServer.
 # These options will be appended to the options specified as YARN_OPTS
 # and therefore may override any similar flags set in YARN_OPTS
 #export HADOOP_JOB_HISTORYSERVER_OPTS=

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7c6c710/hadoop-yarn-project/hadoop-yarn/bin/yarn
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index dfa27e4..760d8e6 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -117,8 +117,9 @@ case "${COMMAND}" in
     CLASS='org.apache.hadoop.yarn.server.nodemanager.NodeManager'
     hadoop_debug "Append YARN_NODEMANAGER_OPTS onto YARN_OPTS"
     YARN_OPTS="${YARN_OPTS} ${YARN_NODEMANAGER_OPTS}"
+    # Backwards compatibility
     if [[ -n "${YARN_NODEMANAGER_HEAPSIZE}" ]]; then
-      JAVA_HEAP_MAX="-Xmx${YARN_NODEMANAGER_HEAPSIZE}m"
+      HADOOP_HEAPSIZE_MAX="${YARN_NODEMANAGER_HEAPSIZE}"
     fi
   ;;
   proxyserver)
@@ -126,8 +127,9 @@ case "${COMMAND}" in
     CLASS='org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer'
     hadoop_debug "Append YARN_PROXYSERVER_OPTS onto YARN_OPTS"
     YARN_OPTS="${YARN_OPTS} ${YARN_PROXYSERVER_OPTS}"
+    # Backwards compatibility
     if [[ -n "${YARN_PROXYSERVER_HEAPSIZE}" ]]; then
-      JAVA_HEAP_MAX="-Xmx${YARN_PROXYSERVER_HEAPSIZE}m"
+      HADOOP_HEAPSIZE_MAX="${YARN_PROXYSERVER_HEAPSIZE}"
     fi
   ;;
   queue)
@@ -140,8 +142,9 @@ case "${COMMAND}" in
     CLASS='org.apache.hadoop.yarn.server.resourcemanager.ResourceManager'
     YARN_OPTS="${YARN_OPTS} ${YARN_RESOURCEMANAGER_OPTS}"
     hadoop_debug "Append YARN_RESOURCEMANAGER_OPTS onto YARN_OPTS"
+    # Backwards compatibility
     if [[ -n "${YARN_RESOURCEMANAGER_HEAPSIZE}" ]]; then
-      JAVA_HEAP_MAX="-Xmx${YARN_RESOURCEMANAGER_HEAPSIZE}m"
+      HADOOP_HEAPSIZE_MAX="${YARN_RESOURCEMANAGER_HEAPSIZE}"
     fi
   ;;
   rmadmin)
@@ -154,8 +157,9 @@ case "${COMMAND}" in
     CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
     hadoop_debug "Append YARN_TIMELINESERVER_OPTS onto YARN_OPTS"
     YARN_OPTS="${YARN_OPTS} ${YARN_TIMELINESERVER_OPTS}"
+    # Backwards compatibility
     if [[ -n "${YARN_TIMELINESERVER_HEAPSIZE}" ]]; then
-      JAVA_HEAP_MAX="-Xmx${YARN_TIMELINESERVER_HEAPSIZE}m"
+      HADOOP_HEAPSIZE_MAX="${YARN_TIMELINESERVER_HEAPSIZE}"
     fi
   ;;
   sharedcachemanager)
@@ -196,8 +200,6 @@ if [[  "${HADOOP_DAEMON_MODE}" != "default" ]]; then
   HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.log"
 fi
 
-hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
-
 # Add YARN custom options to comamnd line in case someone actaully 
 # used these.
 #

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7c6c710/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
index 3d3a036..fdf601f 100644
--- a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
+++ b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
@@ -49,15 +49,12 @@
 # Resource Manager specific parameters
 ###
 
-# Specify the max heapsize for the ResourceManager using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1000.
+# Specify the max heapsize for the ResourceManager.  If no units are
+# given, it will be assumed to be in MB.
 # This value will be overridden by an Xmx setting specified in either YARN_OPTS,
 # HADOOP_OPTS, and/or YARN_RESOURCEMANAGER_OPTS.
-# If not specified, the default value will be picked from either HADOOP_HEAPSIZE
-# or the built-in default.
-#
-#export YARN_RESOURCEMANAGER_HEAPSIZE=1000
+# Default is the same as HADOOP_HEAPSIZE_MAX
+#export YARN_RESOURCEMANAGER_HEAPSIZE=
 
 # Specify the JVM options to be used when starting the ResourceManager.
 # These options will be appended to the options specified as YARN_OPTS
@@ -83,15 +80,12 @@
 # Node Manager specific parameters
 ###
 
-# Specify the max Heapsize for the NodeManager using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1000.
+# Specify the max heapsize for the NodeManager.  If no units are
+# given, it will be assumed to be in MB.
 # This value will be overridden by an Xmx setting specified in either YARN_OPTS,
 # HADOOP_OPTS, and/or YARN_NODEMANAGER_OPTS.
-# If not specified, the default value will be picked from either HADOOP_HEAPSIZE
-# or the built-in default.
-#
-#export YARN_NODEMANAGER_HEAPSIZE=1000
+# Default is the same as HADOOP_HEAPSIZE_MAX.
+#export YARN_NODEMANAGER_HEAPSIZE=
 
 # Specify the JVM options to be used when starting the NodeManager.
 # These options will be appended to the options specified as YARN_OPTS
@@ -105,15 +99,12 @@
 # TimeLineServer specifc parameters
 ###
 
-# Specify the max Heapsize for the timeline server using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1000.
+# Specify the max heapsize for the timelineserver.  If no units are
+# given, it will be assumed to be in MB.
 # This value will be overridden by an Xmx setting specified in either YARN_OPTS,
 # HADOOP_OPTS, and/or YARN_TIMELINESERVER_OPTS.
-# If not specified, the default value will be picked from either HADOOP_HEAPSIZE
-# or the built-in default.
-#
-#export YARN_TIMELINESERVER_HEAPSIZE=1000
+# Default is the same as HADOOP_HEAPSIZE_MAX.
+#export YARN_TIMELINE_HEAPSIZE=
 
 # Specify the JVM options to be used when starting the TimeLineServer.
 # These options will be appended to the options specified as YARN_OPTS
@@ -127,15 +118,12 @@
 # Web App Proxy Server specifc parameters
 ###
 
-# Specify the max Heapsize for the proxy server using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1000.
+# Specify the max heapsize for the web app proxy server.  If no units are
+# given, it will be assumed to be in MB.
 # This value will be overridden by an Xmx setting specified in either YARN_OPTS,
 # HADOOP_OPTS, and/or YARN_PROXYSERVER_OPTS.
-# If not specified, the default value will be picked from either HADOOP_HEAPSIZE
-# or the built-in default.
-#
-#export YARN_PROXYSERVER_HEAPSIZE=1000
+# Default is the same as HADOOP_HEAPSIZE_MAX.
+#export YARN_PROXYSERVER_HEAPSIZE=
 
 # Specify the JVM options to be used when starting the proxy server.
 # These options will be appended to the options specified as YARN_OPTS


[08/50] [abbrv] hadoop git commit: HADOOP-11379. Fix new findbugs warnings in hadoop-auth*. Contributed by Li Lu.

Posted by ka...@apache.org.
HADOOP-11379. Fix new findbugs warnings in hadoop-auth*. Contributed by Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6df457a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6df457a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6df457a3

Branch: refs/heads/YARN-2139
Commit: 6df457a3d7661a890e84fc89567f29d0fe23c970
Parents: 5776a41
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Dec 9 13:08:51 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Dec 9 13:08:51 2014 -0800

----------------------------------------------------------------------
 .../hadoop/security/authentication/examples/WhoClient.java      | 5 ++++-
 .../authentication/util/RandomSignerSecretProvider.java         | 4 +++-
 .../org/apache/hadoop/security/authentication/util/Signer.java  | 3 ++-
 .../authentication/util/StringSignerSecretProvider.java         | 3 ++-
 .../security/authentication/util/ZKSignerSecretProvider.java    | 3 ++-
 hadoop-common-project/hadoop-common/CHANGES.txt                 | 2 ++
 6 files changed, 15 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6df457a3/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java b/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java
index 2299ae1..f5cff2b 100644
--- a/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java
+++ b/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java
@@ -19,6 +19,7 @@ import java.io.BufferedReader;
 import java.io.InputStreamReader;
 import java.net.HttpURLConnection;
 import java.net.URL;
+import java.nio.charset.Charset;
 
 /**
  * Example that uses <code>AuthenticatedURL</code>.
@@ -39,7 +40,9 @@ public class WhoClient {
       System.out.println("Status code: " + conn.getResponseCode() + " " + conn.getResponseMessage());
       System.out.println();
       if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
-        BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
+        BufferedReader reader = new BufferedReader(
+            new InputStreamReader(
+                conn.getInputStream(), Charset.forName("UTF-8")));
         String line = reader.readLine();
         while (line != null) {
           System.out.println(line);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6df457a3/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
index 29e5661..41059a7 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
@@ -14,6 +14,8 @@
 package org.apache.hadoop.security.authentication.util;
 
 import com.google.common.annotations.VisibleForTesting;
+
+import java.nio.charset.Charset;
 import java.util.Random;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -46,6 +48,6 @@ public class RandomSignerSecretProvider extends RolloverSignerSecretProvider {
 
   @Override
   protected byte[] generateNewSecret() {
-    return Long.toString(rand.nextLong()).getBytes();
+    return Long.toString(rand.nextLong()).getBytes(Charset.forName("UTF-8"));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6df457a3/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
index f639503..aa63e40 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
@@ -15,6 +15,7 @@ package org.apache.hadoop.security.authentication.util;
 
 import org.apache.commons.codec.binary.Base64;
 
+import java.nio.charset.Charset;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
 
@@ -86,7 +87,7 @@ public class Signer {
   protected String computeSignature(byte[] secret, String str) {
     try {
       MessageDigest md = MessageDigest.getInstance("SHA");
-      md.update(str.getBytes());
+      md.update(str.getBytes(Charset.forName("UTF-8")));
       md.update(secret);
       byte[] digest = md.digest();
       return new Base64(0).encodeToString(digest);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6df457a3/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java
index 7aaccd2..57ddd37 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java
@@ -13,6 +13,7 @@
  */
 package org.apache.hadoop.security.authentication.util;
 
+import java.nio.charset.Charset;
 import java.util.Properties;
 import javax.servlet.ServletContext;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -36,7 +37,7 @@ public class StringSignerSecretProvider extends SignerSecretProvider {
           long tokenValidity) throws Exception {
     String signatureSecret = config.getProperty(
             AuthenticationFilter.SIGNATURE_SECRET, null);
-    secret = signatureSecret.getBytes();
+    secret = signatureSecret.getBytes(Charset.forName("UTF-8"));
     secrets = new byte[][]{secret};
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6df457a3/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
index f8db2ee..11bfccd 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
@@ -15,6 +15,7 @@ package org.apache.hadoop.security.authentication.util;
 
 import com.google.common.annotations.VisibleForTesting;
 import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
@@ -369,7 +370,7 @@ public class ZKSignerSecretProvider extends RolloverSignerSecretProvider {
   }
 
   private byte[] generateRandomSecret() {
-    return Long.toString(rand.nextLong()).getBytes();
+    return Long.toString(rand.nextLong()).getBytes(Charset.forName("UTF-8"));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6df457a3/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index b030bf7..e6b44e9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -548,6 +548,8 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11273. TestMiniKdc failure: login options not compatible with IBM
     JDK. (Gao Zhong Liang via wheat9)
 
+    HADOOP-11379. Fix new findbugs warnings in hadoop-auth*. (Li Lu via wheat9)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES


[02/50] [abbrv] hadoop git commit: HADOOP-11369. Fix new findbugs warnings in hadoop-mapreduce-client, non-core directories. Contributed by Li Lu.

Posted by ka...@apache.org.
HADOOP-11369. Fix new findbugs warnings in hadoop-mapreduce-client, non-core directories. Contributed by Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d777a1e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d777a1e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d777a1e4

Branch: refs/heads/YARN-2139
Commit: d777a1e4ca8e7cf0ce8967f79dd475468906c733
Parents: 7bceb13
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Dec 9 10:46:13 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Dec 9 10:46:13 2014 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                   | 3 +++
 .../hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java       | 3 +--
 .../org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java     | 1 -
 .../org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java     | 2 +-
 4 files changed, 5 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d777a1e4/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 425cab7..5e2ff8d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -536,6 +536,9 @@ Release 2.7.0 - UNRELEASED
 
     HADOOP-11367. Fix warnings from findbugs 3.0 in hadoop-streaming. (Li Lu via wheat9)
 
+    HADOOP-11369. Fix new findbugs warnings in hadoop-mapreduce-client,
+    non-core directories. (Li Lu via wheat9)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d777a1e4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index 45ddb9e..97de8fa 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -870,8 +870,7 @@ public class JobHistoryEventHandler extends AbstractService
         TaskAttemptStartedEvent tase = (TaskAttemptStartedEvent) event;
         tEvent.addEventInfo("TASK_TYPE", tase.getTaskType().toString());
         tEvent.addEventInfo("TASK_ATTEMPT_ID",
-            tase.getTaskAttemptId().toString() == null ?
-            "" : tase.getTaskAttemptId().toString());
+            tase.getTaskAttemptId().toString());
         tEvent.addEventInfo("START_TIME", tase.getStartTime());
         tEvent.addEventInfo("HTTP_PORT", tase.getHttpPort());
         tEvent.addEventInfo("TRACKER_NAME", tase.getTrackerName());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d777a1e4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
index 6c58a68..cd4e272 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
@@ -76,7 +76,6 @@ public abstract class RMCommunicator extends AbstractService
   protected EventHandler eventHandler;
   protected ApplicationMasterProtocol scheduler;
   private final ClientService clientService;
-  protected int lastResponseID;
   private Resource maxContainerCapability;
   protected Map<ApplicationAccessType, String> applicationACLs;
   private volatile long lastHeartbeatTime;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d777a1e4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
index f53f188..40844df 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
@@ -848,7 +848,7 @@ public class HistoryFileManager extends AbstractService {
             }
           });
         }
-      } else if (old != null && !old.isMovePending()) {
+      } else if (!old.isMovePending()) {
         //This is a duplicate so just delete it
         if (LOG.isDebugEnabled()) {
           LOG.debug("Duplicate: deleting");


[38/50] [abbrv] hadoop git commit: HDFS-7517. Remove redundant non-null checks in FSNamesystem#getBlockLocations. Contributed by Haohui Mai.

Posted by ka...@apache.org.
HDFS-7517. Remove redundant non-null checks in FSNamesystem#getBlockLocations. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46612c7a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46612c7a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46612c7a

Branch: refs/heads/YARN-2139
Commit: 46612c7a5135d20b20403780b47dd00654aab057
Parents: 3681de2
Author: Haohui Mai <wh...@apache.org>
Authored: Fri Dec 12 11:51:17 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Fri Dec 12 11:51:17 2014 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                      | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java     | 4 ----
 2 files changed, 3 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46612c7a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5977ed7..9cd5b05 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -579,6 +579,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7497. Inconsistent report of decommissioning DataNodes between
     dfsadmin and NameNode webui. (Yongjun Zhang via wang)
 
+    HDFS-7517. Remove redundant non-null checks in FSNamesystem#
+    getBlockLocations. (wheat9)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46612c7a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index c17c4f5..5dd5920 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1782,10 +1782,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
     logAuditEvent(true, "open", src);
 
-    if (res == null) {
-      return null;
-    }
-
     if (res.updateAccessTime()) {
       writeLock();
       final long now = now();


[32/50] [abbrv] hadoop git commit: HDFS-7449. Add metrics to NFS gateway. Contributed by Brandon Li

Posted by ka...@apache.org.
HDFS-7449. Add metrics to NFS gateway. Contributed by Brandon Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6f2a3f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6f2a3f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6f2a3f1

Branch: refs/heads/YARN-2139
Commit: f6f2a3f1c73266bfedd802eacde60d8b19b81015
Parents: 0bcea11
Author: Brandon Li <br...@apache.org>
Authored: Thu Dec 11 15:40:45 2014 -0800
Committer: Brandon Li <br...@apache.org>
Committed: Thu Dec 11 15:40:45 2014 -0800

----------------------------------------------------------------------
 .../hadoop/hdfs/nfs/conf/NfsConfigKeys.java     |   3 +
 .../org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java   |   3 +-
 .../hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java       | 220 +++++++++++++++++++
 .../apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java  |   4 +
 .../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java       |  17 +-
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java    |  49 ++++-
 .../apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java   |   4 +-
 .../hadoop/hdfs/nfs/nfs3/WriteManager.java      |   6 +-
 .../hdfs/nfs/nfs3/TestNfs3HttpServer.java       |   4 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 10 files changed, 297 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6f2a3f1/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
index 7566791..9e4aaf5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
@@ -70,4 +70,7 @@ public class NfsConfigKeys {
   public static final int NFS_HTTPS_PORT_DEFAULT = 50579;
   public static final String NFS_HTTPS_ADDRESS_KEY = "nfs.https.address";
   public static final String NFS_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + NFS_HTTPS_PORT_DEFAULT;
+  
+  public static final String  NFS_METRICS_PERCENTILES_INTERVALS_KEY = "nfs.metrics.percentiles.intervals";
+  public static final String  NFS_METRICS_PERCENTILES_INTERVALS_DEFAULT = "";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6f2a3f1/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java
index 3daf7bb..ac9abf8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java
@@ -42,7 +42,8 @@ public class Nfs3 extends Nfs3Base {
   
   public Nfs3(NfsConfiguration conf, DatagramSocket registrationSocket,
       boolean allowInsecurePorts) throws IOException {
-    super(new RpcProgramNfs3(conf, registrationSocket, allowInsecurePorts), conf);
+    super(RpcProgramNfs3.createRpcProgramNfs3(conf, registrationSocket,
+        allowInsecurePorts), conf);
     mountd = new Mountd(conf, registrationSocket, allowInsecurePorts);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6f2a3f1/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
new file mode 100644
index 0000000..d36ea73
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
@@ -0,0 +1,220 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.nfs.nfs3;
+
+import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableQuantiles;
+import org.apache.hadoop.metrics2.lib.MutableRate;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+
+/**
+ * This class is for maintaining the various NFS gateway activity statistics and
+ * publishing them through the metrics interfaces.
+ */
+@InterfaceAudience.Private
+@Metrics(about = "Nfs3 metrics", context = "dfs")
+public class Nfs3Metrics {
+  // All mutable rates are in nanoseconds
+  // No metric for nullProcedure;
+  @Metric MutableRate getattr;
+  @Metric MutableRate setattr;
+  @Metric MutableRate lookup;
+  @Metric MutableRate access;
+  @Metric MutableRate readlink;
+  @Metric MutableRate read;
+  final MutableQuantiles[] readNanosQuantiles;
+  @Metric MutableRate write;
+  final MutableQuantiles[] writeNanosQuantiles;
+  @Metric MutableRate create;
+  @Metric MutableRate mkdir;
+  @Metric MutableRate symlink;
+  @Metric MutableRate mknod;
+  @Metric MutableRate remove;
+  @Metric MutableRate rmdir;
+  @Metric MutableRate rename;
+  @Metric MutableRate link;
+  @Metric MutableRate readdir;
+  @Metric MutableRate readdirplus;
+  @Metric MutableRate fsstat;
+  @Metric MutableRate fsinfo;
+  @Metric MutableRate pathconf;
+  @Metric MutableRate commit;
+  final MutableQuantiles[] commitNanosQuantiles;
+
+  @Metric MutableCounterLong bytesWritten;
+  @Metric MutableCounterLong bytesRead;
+
+  final MetricsRegistry registry = new MetricsRegistry("nfs3");
+  final String name;
+  JvmMetrics jvmMetrics = null;
+
+  public Nfs3Metrics(String name, String sessionId, int[] intervals,
+      final JvmMetrics jvmMetrics) {
+    this.name = name;
+    this.jvmMetrics = jvmMetrics;
+    registry.tag(SessionId, sessionId);
+
+    final int len = intervals.length;
+    readNanosQuantiles = new MutableQuantiles[len];
+    writeNanosQuantiles = new MutableQuantiles[len];
+    commitNanosQuantiles = new MutableQuantiles[len];
+
+    for (int i = 0; i < len; i++) {
+      int interval = intervals[i];
+      readNanosQuantiles[i] = registry.newQuantiles("readProcessNanos"
+          + interval + "s", "Read process in ns", "ops", "latency", interval);
+      writeNanosQuantiles[i] = registry.newQuantiles("writeProcessNanos"
+          + interval + "s", " process in ns", "ops", "latency", interval);
+      commitNanosQuantiles[i] = registry.newQuantiles("commitProcessNanos"
+          + interval + "s", "Read process in ns", "ops", "latency", interval);
+    }
+  }
+
+  public static Nfs3Metrics create(Configuration conf, String gatewayName) {
+    String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    JvmMetrics jm = JvmMetrics.create(gatewayName, sessionId, ms);
+
+    // Percentile measurement is [,,,] by default 
+    int[] intervals = conf.getInts(conf.get(
+        NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY,
+        NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_DEFAULT));
+    return ms.register(new Nfs3Metrics(gatewayName, sessionId, intervals, jm));
+  }
+  
+  public String name() {
+    return name;
+  }
+
+  public JvmMetrics getJvmMetrics() {
+    return jvmMetrics;
+  }
+
+  public void incrBytesWritten(long bytes) {
+    bytesWritten.incr(bytes);
+  }
+
+  public void incrBytesRead(long bytes) {
+    bytesRead.incr(bytes);
+  }
+
+  public void addGetattr(long latencyNanos) {
+    getattr.add(latencyNanos);
+  }
+
+  public void addSetattr(long latencyNanos) {
+    setattr.add(latencyNanos);
+  }
+
+  public void addLookup(long latencyNanos) {
+    lookup.add(latencyNanos);
+  }
+
+  public void addAccess(long latencyNanos) {
+    access.add(latencyNanos);
+  }
+
+  public void addReadlink(long latencyNanos) {
+    readlink.add(latencyNanos);
+  }
+
+  public void addRead(long latencyNanos) {
+    read.add(latencyNanos);
+    for (MutableQuantiles q : readNanosQuantiles) {
+      q.add(latencyNanos);
+    }
+  }
+
+  public void addWrite(long latencyNanos) {
+    write.add(latencyNanos);
+    for (MutableQuantiles q : writeNanosQuantiles) {
+      q.add(latencyNanos);
+    }
+  }
+
+  public void addCreate(long latencyNanos) {
+    create.add(latencyNanos);
+  }
+
+  public void addMkdir(long latencyNanos) {
+    mkdir.add(latencyNanos);
+  }
+
+  public void addSymlink(long latencyNanos) {
+    symlink.add(latencyNanos);
+  }
+
+  public void addMknod(long latencyNanos) {
+    mknod.add(latencyNanos);
+  }
+
+  public void addRemove(long latencyNanos) {
+    remove.add(latencyNanos);
+  }
+
+  public void addRmdir(long latencyNanos) {
+    rmdir.add(latencyNanos);
+  }
+
+  public void addRename(long latencyNanos) {
+    rename.add(latencyNanos);
+  }
+
+  public void addLink(long latencyNanos) {
+    link.add(latencyNanos);
+  }
+
+  public void addReaddir(long latencyNanos) {
+    readdir.add(latencyNanos);
+  }
+
+  public void addReaddirplus(long latencyNanos) {
+    readdirplus.add(latencyNanos);
+  }
+
+  public void addFsstat(long latencyNanos) {
+    fsstat.add(latencyNanos);
+  }
+
+  public void addFsinfo(long latencyNanos) {
+    fsinfo.add(latencyNanos);
+  }
+
+  public void addPathconf(long latencyNanos) {
+    pathconf.add(latencyNanos);
+  }
+
+  public void addCommit(long latencyNanos) {
+    commit.add(latencyNanos);
+    for (MutableQuantiles q : commitNanosQuantiles) {
+      q.add(latencyNanos);
+    }
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6f2a3f1/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
index 50e83ed..cc17394 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
@@ -213,4 +213,8 @@ public class Nfs3Utils {
     data[7] = (byte) (v >>> 0);
     return data;
   }
+  
+  public static long getElapsedTime(long startTimeNano) {
+    return System.nanoTime() - startTimeNano;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6f2a3f1/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
index b31baf5..a06d1c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
@@ -129,9 +129,8 @@ class OpenFileCtx {
     private final Channel channel;
     private final int xid;
     private final Nfs3FileAttributes preOpAttr;
-
-    // Remember time for debug purpose
-    private final long startTime;
+    
+    public final long startTime;
 
     long getOffset() {
       return offset;
@@ -159,7 +158,7 @@ class OpenFileCtx {
       this.channel = channel;
       this.xid = xid;
       this.preOpAttr = preOpAttr;
-      this.startTime = Time.monotonicNow();
+      this.startTime = System.nanoTime();
     }
 
     @Override
@@ -687,6 +686,8 @@ class OpenFileCtx {
         WccData fileWcc = new WccData(preOpAttr, latestAttr);
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
             fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
+        RpcProgramNfs3.metrics.addWrite(Nfs3Utils
+            .getElapsedTime(writeCtx.startTime));
         Nfs3Utils
             .writeChannel(channel, response.serialize(new XDR(),
                 xid, new VerifierNone()), xid);
@@ -1131,14 +1132,16 @@ class OpenFileCtx {
 
       COMMIT3Response response = new COMMIT3Response(status, wccData,
           Nfs3Constant.WRITE_COMMIT_VERF);
+      RpcProgramNfs3.metrics.addCommit(Nfs3Utils
+          .getElapsedTime(commit.startTime));
       Nfs3Utils.writeChannelCommit(commit.getChannel(), response
           .serialize(new XDR(), commit.getXid(),
               new VerifierNone()), commit.getXid());
       
       if (LOG.isDebugEnabled()) {
         LOG.debug("FileId: " + latestAttr.getFileId() + " Service time:"
-            + (Time.monotonicNow() - commit.getStartTime())
-            + "ms. Sent response for commit:" + commit);
+            + Nfs3Utils.getElapsedTime(commit.startTime)
+            + "ns. Sent response for commit:" + commit);
       }
       entry = pendingCommits.firstEntry();
     }
@@ -1162,6 +1165,7 @@ class OpenFileCtx {
       // The write is not protected by lock. asyncState is used to make sure
       // there is one thread doing write back at any time    
       writeCtx.writeData(fos);
+      RpcProgramNfs3.metrics.incrBytesWritten(writeCtx.getCount());
       
       long flushedOffset = getFlushedOffset();
       if (flushedOffset != (offset + count)) {
@@ -1213,6 +1217,7 @@ class OpenFileCtx {
         }
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
             fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
+        RpcProgramNfs3.metrics.addWrite(Nfs3Utils.getElapsedTime(writeCtx.startTime));
         Nfs3Utils.writeChannel(channel, response.serialize(
             new XDR(), xid, new VerifierNone()), xid);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6f2a3f1/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index aaac797..148d4f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -48,6 +48,8 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.nfs.AccessPrivilege;
 import org.apache.hadoop.nfs.NfsExports;
 import org.apache.hadoop.nfs.NfsFileType;
@@ -164,6 +166,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   private final RpcCallCache rpcCallCache;
   private JvmPauseMonitor pauseMonitor;
   private Nfs3HttpServer infoServer = null;
+  static Nfs3Metrics metrics;
 
   public RpcProgramNfs3(NfsConfiguration config, DatagramSocket registrationSocket,
       boolean allowInsecurePorts) throws IOException {
@@ -209,6 +212,17 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     infoServer = new Nfs3HttpServer(config);
   }
 
+  public static RpcProgramNfs3 createRpcProgramNfs3(NfsConfiguration config,
+      DatagramSocket registrationSocket, boolean allowInsecurePorts)
+      throws IOException {
+    DefaultMetricsSystem.initialize("Nfs3");
+    String displayName = DNS.getDefaultHost("default", "default")
+        + config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,
+            NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT);
+    metrics = Nfs3Metrics.create(config, displayName);
+    return new RpcProgramNfs3(config, registrationSocket, allowInsecurePorts);
+  }
+  
   private void clearDirectory(String writeDumpDir) throws IOException {
     File dumpDir = new File(writeDumpDir);
     if (dumpDir.exists()) {
@@ -225,10 +239,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   }
   
   @Override
-  public void startDaemons() {    
+  public void startDaemons() {
     if (pauseMonitor == null) {
       pauseMonitor = new JvmPauseMonitor(config);
       pauseMonitor.start();
+      metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
     }
     writeManager.startAsyncDataSerivce();
     try {
@@ -770,6 +785,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
         try {
           readCount = fis.read(offset, readbuffer, 0, count);
+          metrics.incrBytesRead(readCount);
         } catch (IOException e) {
           // TODO: A cleaner way is to throw a new type of exception
           // which requires incompatible changes.
@@ -2049,8 +2065,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           : (request.getOffset() + request.getCount());
 
       // Insert commit as an async request
-      writeManager.handleCommit(dfsClient, handle, commitOffset,
-          channel, xid, preOpAttr);
+      writeManager.handleCommit(dfsClient, handle, commitOffset, channel, xid,
+          preOpAttr);
       return null;
     } catch (IOException e) {
       LOG.warn("Exception ", e);
@@ -2132,20 +2148,29 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         }
       }
     }
-
+    
+    // Since write and commit could be async, they use their own startTime and
+    // only record success requests.
+    final long startTime = System.nanoTime();
+    
     NFS3Response response = null;
     if (nfsproc3 == NFSPROC3.NULL) {
       response = nullProcedure();
     } else if (nfsproc3 == NFSPROC3.GETATTR) {
       response = getattr(xdr, info);
+      metrics.addGetattr(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.SETATTR) {
       response = setattr(xdr, info);
+      metrics.addSetattr(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.LOOKUP) {
       response = lookup(xdr, info);
+      metrics.addLookup(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.ACCESS) {
       response = access(xdr, info);
+      metrics.addAccess(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.READLINK) {
       response = readlink(xdr, info);
+      metrics.addReadlink(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.READ) {
       if (LOG.isDebugEnabled()) {
           LOG.debug(Nfs3Utils.READ_RPC_START + xid);
@@ -2154,6 +2179,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       if (LOG.isDebugEnabled() && (nfsproc3 == NFSPROC3.READ)) {
         LOG.debug(Nfs3Utils.READ_RPC_END + xid);
       }
+      metrics.addRead(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.WRITE) {
       if (LOG.isDebugEnabled()) {
           LOG.debug(Nfs3Utils.WRITE_RPC_START + xid);
@@ -2162,30 +2188,43 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       // Write end debug trace is in Nfs3Utils.writeChannel
     } else if (nfsproc3 == NFSPROC3.CREATE) {
       response = create(xdr, info);
+      metrics.addCreate(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.MKDIR) {
       response = mkdir(xdr, info);
+      metrics.addMkdir(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.SYMLINK) {
       response = symlink(xdr, info);
+      metrics.addSymlink(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.MKNOD) {
       response = mknod(xdr, info);
+      metrics.addMknod(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.REMOVE) {
       response = remove(xdr, info);
+      metrics.addRemove(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.RMDIR) {
       response = rmdir(xdr, info);
+      metrics.addRmdir(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.RENAME) {
       response = rename(xdr, info);
+      metrics.addRename(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.LINK) {
       response = link(xdr, info);
+      metrics.addLink(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.READDIR) {
       response = readdir(xdr, info);
+      metrics.addReaddir(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.READDIRPLUS) {
       response = readdirplus(xdr, info);
+      metrics.addReaddirplus(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.FSSTAT) {
       response = fsstat(xdr, info);
+      metrics.addFsstat(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.FSINFO) {
       response = fsinfo(xdr, info);
+      metrics.addFsinfo(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.PATHCONF) {
-      response = pathconf(xdr,info);
+      response = pathconf(xdr, info);
+      metrics.addPathconf(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.COMMIT) {
       response = commit(xdr, info);
     } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6f2a3f1/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
index 758fd39..82c826f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
@@ -84,7 +84,8 @@ class WriteCtx {
   private long dumpFileOffset;
   
   private volatile DataState dataState;
-
+  public final long startTime;
+  
   public DataState getDataState() {
     return dataState;
   }
@@ -235,6 +236,7 @@ class WriteCtx {
     this.replied = replied;
     this.dataState = dataState;
     raf = null;
+    this.startTime = System.nanoTime();
   }
   
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6f2a3f1/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
index e71eaa5..df02e04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
@@ -224,6 +224,7 @@ public class WriteManager {
       status = Nfs3Status.NFS3_OK;
 
     } else {
+      // commit request triggered by read won't create pending comment obj
       COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset,
           null, 0, null, true);
       switch (ret) {
@@ -260,6 +261,7 @@ public class WriteManager {
   
   void handleCommit(DFSClient dfsClient, FileHandle fileHandle,
       long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
+    long startTime = System.nanoTime();
     int status;
     OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
 
@@ -306,9 +308,9 @@ public class WriteManager {
     WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
     COMMIT3Response response = new COMMIT3Response(status, fileWcc,
         Nfs3Constant.WRITE_COMMIT_VERF);
+    RpcProgramNfs3.metrics.addCommit(Nfs3Utils.getElapsedTime(startTime));
     Nfs3Utils.writeChannelCommit(channel,
-        response.serialize(new XDR(), xid, new VerifierNone()),
-        xid);
+        response.serialize(new XDR(), xid, new VerifierNone()), xid);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6f2a3f1/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3HttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3HttpServer.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3HttpServer.java
index d44e9ab..46dbd42 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3HttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3HttpServer.java
@@ -48,6 +48,10 @@ public class TestNfs3HttpServer {
         HttpConfig.Policy.HTTP_AND_HTTPS.name());
     conf.set(NfsConfigKeys.NFS_HTTP_ADDRESS_KEY, "localhost:0");
     conf.set(NfsConfigKeys.NFS_HTTPS_ADDRESS_KEY, "localhost:0");
+    // Use emphral port in case tests are running in parallel
+    conf.setInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, 0);
+    conf.setInt(NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY, 0);
+    
     File base = new File(BASEDIR);
     FileUtil.fullyDelete(base);
     base.mkdirs();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6f2a3f1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e2db1f6..5e75424 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -273,6 +273,8 @@ Release 2.7.0 - UNRELEASED
     (Maysam Yabandeh via wang)
 
     HDFS-7424. Add web UI for NFS gateway (brandonli)
+    
+    HDFS-7449. Add metrics to NFS gateway (brandonli)
 
   IMPROVEMENTS
 


[30/50] [abbrv] hadoop git commit: HDFS-7515. Fix new findbugs warnings in hadoop-hdfs. Contributed by Haohui Mai.

Posted by ka...@apache.org.
HDFS-7515. Fix new findbugs warnings in hadoop-hdfs. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9f6d0c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9f6d0c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9f6d0c9

Branch: refs/heads/YARN-2139
Commit: b9f6d0c956f0278c8b9b83e05b523a442a730ebb
Parents: 614b6af
Author: Haohui Mai <wh...@apache.org>
Authored: Thu Dec 11 12:36:13 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Thu Dec 11 12:36:13 2014 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 .../apache/hadoop/hdfs/BlockReaderFactory.java  |  2 -
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 52 +++++++-------
 .../hdfs/qjournal/server/JournalNode.java       |  1 +
 .../hadoop/hdfs/server/common/Storage.java      |  2 +-
 .../hdfs/server/datanode/BlockReceiver.java     |  7 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  8 ++-
 .../hdfs/server/datanode/DataStorage.java       |  2 +-
 .../datanode/fsdataset/impl/BlockPoolSlice.java | 16 ++---
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 33 ++++-----
 .../datanode/web/webhdfs/ExceptionHandler.java  |  7 +-
 .../datanode/web/webhdfs/WebHdfsHandler.java    |  8 ++-
 .../apache/hadoop/hdfs/server/mover/Mover.java  |  5 +-
 .../hdfs/server/namenode/FSDirRenameOp.java     |  1 -
 .../server/namenode/FSDirStatAndListingOp.java  |  6 +-
 .../hdfs/server/namenode/FSImageUtil.java       |  4 +-
 .../server/namenode/FileJournalManager.java     |  2 +-
 .../hadoop/hdfs/server/namenode/INode.java      |  4 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  8 ---
 .../hdfs/server/namenode/NamenodeFsck.java      |  4 --
 .../server/namenode/XAttrPermissionFilter.java  |  2 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  2 +-
 .../DelimitedImageVisitor.java                  |  2 +-
 .../offlineImageViewer/FSImageHandler.java      | 71 ++++++++++++--------
 .../tools/offlineImageViewer/FSImageLoader.java |  8 +--
 .../FileDistributionCalculator.java             | 13 ++--
 .../FileDistributionVisitor.java                |  4 +-
 .../offlineImageViewer/LsImageVisitor.java      |  6 +-
 .../OfflineImageViewerPB.java                   | 47 ++++++-------
 .../offlineImageViewer/PBImageXmlWriter.java    | 13 ++--
 .../apache/hadoop/hdfs/TestEncryptionZones.java |  6 +-
 .../server/namenode/snapshot/TestSnapshot.java  |  8 +--
 .../TestOfflineImageViewer.java                 | 30 +++++----
 33 files changed, 187 insertions(+), 199 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9049083..e2db1f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -572,6 +572,8 @@ Release 2.7.0 - UNRELEASED
     HDFS-7475. Make TestLazyPersistFiles#testLazyPersistBlocksAreSaved
     deterministic. (Xiaoyu Yao via Arpit Agarwal)
 
+    HDFS-7515. Fix new findbugs warnings in hadoop-hdfs. (wheat9)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
index 13e0a52..7e40917 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
@@ -668,7 +668,6 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
       Peer peer = null;
       try {
         curPeer = nextTcpPeer();
-        if (curPeer == null) break;
         if (curPeer.fromCache) remainingCacheTries--;
         peer = curPeer.peer;
         blockReader = getRemoteBlockReader(peer);
@@ -699,7 +698,6 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
         }
       }
     }
-    return null;
   }
 
   public static class BlockReaderPeer {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index e574d1d..67d3143 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -39,6 +39,7 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -241,8 +242,6 @@ public class DFSOutputStream extends FSOutputSummer
     /**
      * Create a new packet.
      * 
-     * @param pktSize maximum size of the packet, 
-     *                including checksum data and actual data.
      * @param chunksPerPkt maximum number of chunks per packet.
      * @param offsetInBlock offset in bytes into the HDFS block.
      */
@@ -405,7 +404,8 @@ public class DFSOutputStream extends FSOutputSummer
     private String[] favoredNodes;
     volatile boolean hasError = false;
     volatile int errorIndex = -1;
-    volatile int restartingNodeIndex = -1; // Restarting node index
+    // Restarting node index
+    AtomicInteger restartingNodeIndex = new AtomicInteger(-1);
     private long restartDeadline = 0; // Deadline of DN restart
     private BlockConstructionStage stage;  // block construction stage
     private long bytesSent = 0; // number of bytes that've been sent
@@ -556,7 +556,7 @@ public class DFSOutputStream extends FSOutputSummer
         try {
           // process datanode IO errors if any
           boolean doSleep = false;
-          if (hasError && (errorIndex >= 0 || restartingNodeIndex >= 0)) {
+          if (hasError && (errorIndex >= 0 || restartingNodeIndex.get() >= 0)) {
             doSleep = processDatanodeError();
           }
 
@@ -699,7 +699,7 @@ public class DFSOutputStream extends FSOutputSummer
           }
         } catch (Throwable e) {
           // Log warning if there was a real error.
-          if (restartingNodeIndex == -1) {
+          if (restartingNodeIndex.get() == -1) {
             DFSClient.LOG.warn("DataStreamer Exception", e);
           }
           if (e instanceof IOException) {
@@ -708,7 +708,7 @@ public class DFSOutputStream extends FSOutputSummer
             setLastException(new IOException("DataStreamer Exception: ",e));
           }
           hasError = true;
-          if (errorIndex == -1 && restartingNodeIndex == -1) {
+          if (errorIndex == -1 && restartingNodeIndex.get() == -1) {
             // Not a datanode issue
             streamerClosed = true;
           }
@@ -806,7 +806,7 @@ public class DFSOutputStream extends FSOutputSummer
 
     /** Set the restarting node index. Called by responder */
     synchronized void setRestartingNodeIndex(int idx) {
-      restartingNodeIndex = idx;
+      restartingNodeIndex.set(idx);
       // If the data streamer has already set the primary node
       // bad, clear it. It is likely that the write failed due to
       // the DN shutdown. Even if it was a real failure, the pipeline
@@ -821,7 +821,7 @@ public class DFSOutputStream extends FSOutputSummer
      */
     synchronized void tryMarkPrimaryDatanodeFailed() {
       // There should be no existing error and no ongoing restart.
-      if ((errorIndex == -1) && (restartingNodeIndex == -1)) {
+      if ((errorIndex == -1) && (restartingNodeIndex.get() == -1)) {
         errorIndex = 0;
       }
     }
@@ -962,7 +962,7 @@ public class DFSOutputStream extends FSOutputSummer
               synchronized (dataQueue) {
                 dataQueue.notifyAll();
               }
-              if (restartingNodeIndex == -1) {
+              if (restartingNodeIndex.get() == -1) {
                 DFSClient.LOG.warn("DFSOutputStream ResponseProcessor exception "
                      + " for block " + block, e);
               }
@@ -1186,7 +1186,7 @@ public class DFSOutputStream extends FSOutputSummer
         // Sleep before reconnect if a dn is restarting.
         // This process will be repeated until the deadline or the datanode
         // starts back up.
-        if (restartingNodeIndex >= 0) {
+        if (restartingNodeIndex.get() >= 0) {
           // 4 seconds or the configured deadline period, whichever is shorter.
           // This is the retry interval and recovery will be retried in this
           // interval until timeout or success.
@@ -1196,7 +1196,7 @@ public class DFSOutputStream extends FSOutputSummer
             Thread.sleep(delay);
           } catch (InterruptedException ie) {
             lastException.set(new IOException("Interrupted while waiting for " +
-                "datanode to restart. " + nodes[restartingNodeIndex]));
+                "datanode to restart. " + nodes[restartingNodeIndex.get()]));
             streamerClosed = true;
             return false;
           }
@@ -1237,21 +1237,21 @@ public class DFSOutputStream extends FSOutputSummer
           setPipeline(newnodes, newStorageTypes, newStorageIDs);
 
           // Just took care of a node error while waiting for a node restart
-          if (restartingNodeIndex >= 0) {
+          if (restartingNodeIndex.get() >= 0) {
             // If the error came from a node further away than the restarting
             // node, the restart must have been complete.
-            if (errorIndex > restartingNodeIndex) {
-              restartingNodeIndex = -1;
-            } else if (errorIndex < restartingNodeIndex) {
+            if (errorIndex > restartingNodeIndex.get()) {
+              restartingNodeIndex.set(-1);
+            } else if (errorIndex < restartingNodeIndex.get()) {
               // the node index has shifted.
-              restartingNodeIndex--;
+              restartingNodeIndex.decrementAndGet();
             } else {
               // this shouldn't happen...
               assert false;
             }
           }
 
-          if (restartingNodeIndex == -1) {
+          if (restartingNodeIndex.get() == -1) {
             hasError = false;
           }
           lastException.set(null);
@@ -1293,10 +1293,10 @@ public class DFSOutputStream extends FSOutputSummer
           success = createBlockOutputStream(nodes, storageTypes, newGS, isRecovery);
         }
 
-        if (restartingNodeIndex >= 0) {
+        if (restartingNodeIndex.get() >= 0) {
           assert hasError == true;
           // check errorIndex set above
-          if (errorIndex == restartingNodeIndex) {
+          if (errorIndex == restartingNodeIndex.get()) {
             // ignore, if came from the restarting node
             errorIndex = -1;
           }
@@ -1306,8 +1306,8 @@ public class DFSOutputStream extends FSOutputSummer
           }
           // expired. declare the restarting node dead
           restartDeadline = 0;
-          int expiredNodeIndex = restartingNodeIndex;
-          restartingNodeIndex = -1;
+          int expiredNodeIndex = restartingNodeIndex.get();
+          restartingNodeIndex.set(-1);
           DFSClient.LOG.warn("Datanode did not restart in time: " +
               nodes[expiredNodeIndex]);
           // Mark the restarting node as failed. If there is any other failed
@@ -1459,7 +1459,7 @@ public class DFSOutputStream extends FSOutputSummer
           // from the local datanode. Thus it is safe to treat this as a
           // regular node error.
           if (PipelineAck.isRestartOOBStatus(pipelineStatus) &&
-            restartingNodeIndex == -1) {
+            restartingNodeIndex.get() == -1) {
             checkRestart = true;
             throw new IOException("A datanode is restarting.");
           }
@@ -1476,10 +1476,10 @@ public class DFSOutputStream extends FSOutputSummer
           assert null == blockStream : "Previous blockStream unclosed";
           blockStream = out;
           result =  true; // success
-          restartingNodeIndex = -1;
+          restartingNodeIndex.set(-1);
           hasError = false;
         } catch (IOException ie) {
-          if (restartingNodeIndex == -1) {
+          if (restartingNodeIndex.get() == -1) {
             DFSClient.LOG.info("Exception in createBlockOutputStream", ie);
           }
           if (ie instanceof InvalidEncryptionKeyException && refetchEncryptionKey > 0) {
@@ -1511,10 +1511,10 @@ public class DFSOutputStream extends FSOutputSummer
           if (checkRestart && shouldWaitForRestart(errorIndex)) {
             restartDeadline = dfsClient.getConf().datanodeRestartTimeout +
                 Time.now();
-            restartingNodeIndex = errorIndex;
+            restartingNodeIndex.set(errorIndex);
             errorIndex = -1;
             DFSClient.LOG.info("Waiting for the datanode to be restarted: " +
-                nodes[restartingNodeIndex]);
+                nodes[restartingNodeIndex.get()]);
           }
           hasError = true;
           setLastException(ie);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
index 588bc58..a5a40f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
@@ -233,6 +233,7 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
     Preconditions.checkArgument(jid != null &&
         !jid.isEmpty(),
         "bad journal identifier: %s", jid);
+    assert jid != null;
     return new File(new File(dir), jid);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 31fdb84..e6bd5b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -727,7 +727,7 @@ public abstract class Storage extends StorageInfo {
         file.close();
         throw e;
       }
-      if (res != null && !deletionHookAdded) {
+      if (!deletionHookAdded) {
         // If the file existed prior to our startup, we didn't
         // call deleteOnExit above. But since we successfully locked
         // the dir, we can take care of cleaning it up.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 2e388f9..08c96be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -29,6 +29,8 @@ import java.io.FileOutputStream;
 import java.io.FileWriter;
 import java.io.IOException;
 import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
 import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.LinkedList;
@@ -836,9 +838,8 @@ class BlockReceiver implements Closeable {
               LOG.warn("Failed to delete restart meta file: " +
                   restartMeta.getPath());
             }
-            FileWriter out = null;
-            try {
-              out = new FileWriter(restartMeta);
+            try (Writer out = new OutputStreamWriter(
+                new FileOutputStream(restartMeta), "UTF-8")) {
               // write out the current time.
               out.write(Long.toString(Time.now() + restartBudget));
               out.flush();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 13c32d5..899a729 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -580,7 +580,8 @@ public class DataNode extends ReconfigurableBase
           try {
             IOException ioe = ioExceptionFuture.get();
             if (ioe != null) {
-              errorMessageBuilder.append(String.format("FAILED TO ADD: %s: %s\n",
+              errorMessageBuilder.append(
+                  String.format("FAILED TO ADD: %s: %s%n",
                   volume, ioe.getMessage()));
               LOG.error("Failed to add volume: " + volume, ioe);
             } else {
@@ -588,8 +589,9 @@ public class DataNode extends ReconfigurableBase
               LOG.info("Successfully added volume: " + volume);
             }
           } catch (Exception e) {
-            errorMessageBuilder.append(String.format("FAILED to ADD: %s: %s\n",
-                volume, e.getMessage()));
+            errorMessageBuilder.append(
+                String.format("FAILED to ADD: %s: %s%n", volume,
+                              e.toString()));
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index c90ef95..15e7f55 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -425,7 +425,7 @@ public class DataStorage extends Storage {
           LOG.warn(String.format(
             "I/O error attempting to unlock storage directory %s.",
             sd.getRoot()), e);
-          errorMsgBuilder.append(String.format("Failed to remove %s: %s\n",
+          errorMsgBuilder.append(String.format("Failed to remove %s: %s%n",
               sd.getRoot(), e.getMessage()));
         }
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index 77cdb91..5a69e1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -22,10 +22,13 @@ import java.io.DataInputStream;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
 import java.io.FileWriter;
 import java.io.IOException;
 import java.io.InputStream;
+import java.io.OutputStreamWriter;
 import java.io.RandomAccessFile;
+import java.io.Writer;
 import java.util.Scanner;
 
 import org.apache.commons.io.FileUtils;
@@ -186,7 +189,7 @@ class BlockPoolSlice {
     Scanner sc;
 
     try {
-      sc = new Scanner(new File(currentDir, DU_CACHE_FILE));
+      sc = new Scanner(new File(currentDir, DU_CACHE_FILE), "UTF-8");
     } catch (FileNotFoundException fnfe) {
       return -1;
     }
@@ -227,23 +230,18 @@ class BlockPoolSlice {
         outFile.getParent());
     }
 
-    FileWriter out = null;
     try {
       long used = getDfsUsed();
-      if (used > 0) {
-        out = new FileWriter(outFile);
+      try (Writer out = new OutputStreamWriter(
+          new FileOutputStream(outFile), "UTF-8")) {
         // mtime is written last, so that truncated writes won't be valid.
         out.write(Long.toString(used) + " " + Long.toString(Time.now()));
         out.flush();
-        out.close();
-        out = null;
       }
     } catch (IOException ioe) {
       // If write failed, the volume might be bad. Since the cache file is
       // not critical, log the error and continue.
       FsDatasetImpl.LOG.warn("Failed to write dfsUsed to " + outFile, ioe);
-    } finally {
-      IOUtils.cleanup(null, out);
     }
   }
 
@@ -447,7 +445,7 @@ class BlockPoolSlice {
             File.pathSeparator + "." + file.getName() + ".restart");
         Scanner sc = null;
         try {
-          sc = new Scanner(restartMeta);
+          sc = new Scanner(restartMeta, "UTF-8");
           // The restart meta file exists
           if (sc.hasNextLong() && (sc.nextLong() > Time.now())) {
             // It didn't expire. Load the replica as a RBW.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 2c6f409..538c796 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -769,7 +769,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     final byte[] crcs = new byte[checksum.getChecksumSize(data.length)];
 
     DataOutputStream metaOut = null;
-    InputStream dataIn = null;
     try {
       File parentFile = dstMeta.getParentFile();
       if (parentFile != null) {
@@ -782,22 +781,23 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
           new FileOutputStream(dstMeta), HdfsConstants.SMALL_BUFFER_SIZE));
       BlockMetadataHeader.writeHeader(metaOut, checksum);
 
-      dataIn = isNativeIOAvailable ?
+      int offset = 0;
+      try (InputStream dataIn = isNativeIOAvailable ?
           NativeIO.getShareDeleteFileInputStream(blockFile) :
-          new FileInputStream(blockFile);
+          new FileInputStream(blockFile)) {
 
-      int offset = 0;
-      for(int n; (n = dataIn.read(data, offset, data.length - offset)) != -1; ) {
-        if (n > 0) {
-          n += offset;
-          offset = n % checksum.getBytesPerChecksum();
-          final int length = n - offset;
+        for (int n; (n = dataIn.read(data, offset, data.length - offset)) != -1; ) {
+          if (n > 0) {
+            n += offset;
+            offset = n % checksum.getBytesPerChecksum();
+            final int length = n - offset;
 
-          if (length > 0) {
-            checksum.calculateChunkedSums(data, 0, length, crcs, 0);
-            metaOut.write(crcs, 0, checksum.getChecksumSize(length));
+            if (length > 0) {
+              checksum.calculateChunkedSums(data, 0, length, crcs, 0);
+              metaOut.write(crcs, 0, checksum.getChecksumSize(length));
 
-            System.arraycopy(data, length, data, 0, offset);
+              System.arraycopy(data, length, data, 0, offset);
+            }
           }
         }
       }
@@ -806,7 +806,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       checksum.calculateChunkedSums(data, 0, offset, crcs, 0);
       metaOut.write(crcs, 0, 4);
     } finally {
-      IOUtils.cleanup(LOG, dataIn, metaOut);
+      IOUtils.cleanup(LOG, metaOut);
     }
   }
 
@@ -1599,11 +1599,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
         }
         f = info.getBlockFile();
         v = (FsVolumeImpl)info.getVolume();
-        if (f == null) {
-          errors.add("Failed to delete replica " + invalidBlks[i]
-              +  ": File not found, volume=" + v);
-          continue;
-        }
         if (v == null) {
           errors.add("Failed to delete replica " + invalidBlks[i]
               +  ". No volume for this replica, file=" + f);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java
index fea40d7..a7bb490 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.web.webhdfs;
 
+import com.google.common.base.Charsets;
 import com.sun.jersey.api.ParamException;
 import com.sun.jersey.api.container.ContainerException;
 import io.netty.buffer.Unpooled;
@@ -39,7 +40,7 @@ import static io.netty.handler.codec.http.HttpResponseStatus.FORBIDDEN;
 import static io.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR;
 import static io.netty.handler.codec.http.HttpResponseStatus.NOT_FOUND;
 import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
-import static org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.APPLICATION_JSON;
+import static org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.APPLICATION_JSON_UTF8;
 
 class ExceptionHandler {
   static Log LOG = WebHdfsHandler.LOG;
@@ -82,11 +83,11 @@ class ExceptionHandler {
       s = INTERNAL_SERVER_ERROR;
     }
 
-    final byte[] js = JsonUtil.toJsonString(e).getBytes();
+    final byte[] js = JsonUtil.toJsonString(e).getBytes(Charsets.UTF_8);
     DefaultFullHttpResponse resp =
       new DefaultFullHttpResponse(HTTP_1_1, s, Unpooled.wrappedBuffer(js));
 
-    resp.headers().set(CONTENT_TYPE, APPLICATION_JSON);
+    resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
     resp.headers().set(CONTENT_LENGTH, js.length);
     return resp;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
index cf70218..f02780a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
@@ -29,6 +29,7 @@ import io.netty.handler.codec.http.HttpMethod;
 import io.netty.handler.codec.http.HttpRequest;
 import io.netty.handler.codec.http.QueryStringDecoder;
 import io.netty.handler.stream.ChunkedStream;
+import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -77,7 +78,8 @@ public class WebHdfsHandler extends SimpleChannelInboundHandler<HttpRequest> {
   public static final int WEBHDFS_PREFIX_LENGTH = WEBHDFS_PREFIX.length();
   public static final String APPLICATION_OCTET_STREAM =
     "application/octet-stream";
-  public static final String APPLICATION_JSON = "application/json";
+  public static final String APPLICATION_JSON_UTF8 =
+      "application/json; charset=utf-8";
 
   private final Configuration conf;
   private final Configuration confForCreate;
@@ -224,11 +226,11 @@ public class WebHdfsHandler extends SimpleChannelInboundHandler<HttpRequest> {
     } finally {
       IOUtils.cleanup(LOG, dfsclient);
     }
-    final byte[] js = JsonUtil.toJsonString(checksum).getBytes();
+    final byte[] js = JsonUtil.toJsonString(checksum).getBytes(Charsets.UTF_8);
     DefaultFullHttpResponse resp =
       new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(js));
 
-    resp.headers().set(CONTENT_TYPE, APPLICATION_JSON);
+    resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
     resp.headers().set(CONTENT_LENGTH, js.length);
     resp.headers().set(CONNECTION, CLOSE);
     ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index 108eb38..a22f920 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -48,8 +48,10 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
 import java.io.BufferedReader;
+import java.io.FileInputStream;
 import java.io.FileReader;
 import java.io.IOException;
+import java.io.InputStreamReader;
 import java.net.URI;
 import java.text.DateFormat;
 import java.util.*;
@@ -579,7 +581,8 @@ public class Mover {
 
     private static String[] readPathFile(String file) throws IOException {
       List<String> list = Lists.newArrayList();
-      BufferedReader reader = new BufferedReader(new FileReader(file));
+      BufferedReader reader = new BufferedReader(
+          new InputStreamReader(new FileInputStream(file), "UTF-8"));
       try {
         String line;
         while ((line = reader.readLine()) != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index 511de7a..c62c88e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -433,7 +433,6 @@ class FSDirRenameOp {
         } else {
           fsd.addLastINodeNoQuotaCheck(dstIIP, removedDst);
         }
-        assert removedDst != null;
         if (removedDst.isReference()) {
           final INodeReference removedDstRef = removedDst.asReference();
           final INodeReference.WithCount wc = (INodeReference.WithCount)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 2e7ed6b..0f94171 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.base.Preconditions;
+import org.apache.commons.io.Charsets;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
 import org.apache.hadoop.fs.FileEncryptionInfo;
@@ -50,7 +51,7 @@ class FSDirStatAndListingOp {
     FSPermissionChecker pc = fsd.getPermissionChecker();
     byte[][] pathComponents = FSDirectory
         .getPathComponentsForReservedPath(srcArg);
-    final String startAfterString = new String(startAfter);
+    final String startAfterString = new String(startAfter, Charsets.UTF_8);
     final String src = fsd.resolvePath(pc, srcArg, pathComponents);
     final INodesInPath iip = fsd.getINodesInPath(src, true);
 
@@ -195,8 +196,7 @@ class FSDirStatAndListingOp {
             cur.getLocalStoragePolicyID():
             BlockStoragePolicySuite.ID_UNSPECIFIED;
         listing[i] = createFileStatus(fsd, cur.getLocalNameBytes(), cur,
-            needLocation, fsd.getStoragePolicyID(curPolicy,
-                parentStoragePolicy), snapshot, isRawPath, inodesInPath);
+            needLocation, fsd.getStoragePolicyID(curPolicy, parentStoragePolicy), snapshot, isRawPath, inodesInPath);
         listingCnt++;
         if (needLocation) {
             // Once we  hit lsLimit locations, stop.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageUtil.java
index 931386c..388a1bf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageUtil.java
@@ -23,6 +23,7 @@ import java.io.InputStream;
 import java.io.RandomAccessFile;
 import java.util.Arrays;
 
+import org.apache.commons.io.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
@@ -32,7 +33,8 @@ import org.apache.hadoop.io.compress.CompressionCodec;
 
 @InterfaceAudience.Private
 public final class FSImageUtil {
-  public static final byte[] MAGIC_HEADER = "HDFSIMG1".getBytes();
+  public static final byte[] MAGIC_HEADER =
+      "HDFSIMG1".getBytes(Charsets.UTF_8);
   public static final int FILE_VERSION = 1;
 
   public static boolean checkFileFormat(RandomAccessFile file)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
index 6001db5..921803c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
@@ -300,7 +300,7 @@ public class FileJournalManager implements JournalManager {
             .matcher(name);
         if (staleInprogressEditsMatch.matches()) {
           try {
-            long startTxId = Long.valueOf(staleInprogressEditsMatch.group(1));
+            long startTxId = Long.parseLong(staleInprogressEditsMatch.group(1));
             ret.add(new EditLogFile(f, startTxId, HdfsConstants.INVALID_TXID,
                 true));
             continue;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 4454930..55430b7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 import java.io.PrintStream;
 import java.io.PrintWriter;
 import java.io.StringWriter;
+import java.nio.charset.Charset;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -769,8 +770,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
 
   @VisibleForTesting
   public final void dumpTreeRecursively(PrintStream out) {
-    dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder(),
-        Snapshot.CURRENT_STATE_ID);
+    out.println(dumpTreeRecursively().toString());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 876afba..db22c4b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -974,10 +974,6 @@ class NameNodeRpcServer implements NamenodeProtocols {
   public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
   throws IOException {
     DatanodeInfo results[] = namesystem.datanodeReport(type);
-    if (results == null ) {
-      throw new IOException("Failed to get datanode report for " + type
-          + " datanodes.");
-    }
     return results;
   }
     
@@ -985,10 +981,6 @@ class NameNodeRpcServer implements NamenodeProtocols {
   public DatanodeStorageReport[] getDatanodeStorageReport(
       DatanodeReportType type) throws IOException {
     final DatanodeStorageReport[] reports = namesystem.getDatanodeStorageReport(type);
-    if (reports == null ) {
-      throw new IOException("Failed to get datanode storage report for " + type
-          + " datanodes.");
-    }
     return reports;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index bab8f5e..5eddeea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -643,10 +643,6 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
         }
         if (fos == null) {
           fos = dfs.create(target + "/" + chain, true);
-          if (fos == null) {
-            throw new IOException("Failed to copy " + fullName +
-                " to /lost+found: could not store chain " + chain);
-          }
           chain++;
         }
         

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
index 79dabb3..95f943d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
@@ -100,7 +100,7 @@ public class XAttrPermissionFilter {
   static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc,
       List<XAttr> xAttrs, boolean isRawPath) {
     assert xAttrs != null : "xAttrs can not be null";
-    if (xAttrs == null || xAttrs.isEmpty()) {
+    if (xAttrs.isEmpty()) {
       return xAttrs;
     }
     

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 484ac12..4073d5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -1476,7 +1476,7 @@ public class DFSAdmin extends FsShell {
           } else {
             out.print("FAILED: ");
           }
-          out.printf("Change property %s\n\tFrom: \"%s\"\n\tTo: \"%s\"\n",
+          out.printf("Change property %s%n\tFrom: \"%s\"%n\tTo: \"%s\"%n",
               result.getKey().prop, result.getKey().oldVal,
               result.getKey().newVal);
           if (result.getValue().isPresent()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/DelimitedImageVisitor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/DelimitedImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/DelimitedImageVisitor.java
index eb6cae3..bc5ff56 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/DelimitedImageVisitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/DelimitedImageVisitor.java
@@ -144,7 +144,7 @@ class DelimitedImageVisitor extends TextWriterImageVisitor {
     
     // Special case of file size, which is sum of the num bytes in each block
     if(element == ImageElement.NUM_BYTES)
-      fileSize += Long.valueOf(value);
+      fileSize += Long.parseLong(value);
     
     if(elements.containsKey(element) && element != ImageElement.NUM_BYTES)
       elements.put(element, value);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java
index eb93c87..43fcd69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java
@@ -17,11 +17,7 @@
  */
 package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
+import com.google.common.base.Charsets;
 import io.netty.buffer.ByteBuf;
 import io.netty.buffer.Unpooled;
 import io.netty.channel.ChannelFutureListener;
@@ -30,19 +26,31 @@ import io.netty.channel.SimpleChannelInboundHandler;
 import io.netty.channel.group.ChannelGroup;
 import io.netty.handler.codec.http.DefaultFullHttpResponse;
 import io.netty.handler.codec.http.DefaultHttpResponse;
-import static io.netty.handler.codec.http.HttpResponseStatus.*;
-
 import io.netty.handler.codec.http.HttpMethod;
 import io.netty.handler.codec.http.HttpRequest;
 import io.netty.handler.codec.http.HttpResponseStatus;
-import static io.netty.handler.codec.http.HttpVersion.*;
 import io.netty.handler.codec.http.QueryStringDecoder;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.web.JsonUtil;
-import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
 
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
 
+import static io.netty.handler.codec.http.HttpHeaders.Names.CONNECTION;
+import static io.netty.handler.codec.http.HttpHeaders.Names.CONTENT_LENGTH;
+import static io.netty.handler.codec.http.HttpHeaders.Names.CONTENT_TYPE;
+import static io.netty.handler.codec.http.HttpHeaders.Values.CLOSE;
+import static io.netty.handler.codec.http.HttpResponseStatus.BAD_REQUEST;
+import static io.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR;
+import static io.netty.handler.codec.http.HttpResponseStatus.METHOD_NOT_ALLOWED;
+import static io.netty.handler.codec.http.HttpResponseStatus.NOT_FOUND;
+import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
+import static org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.APPLICATION_JSON_UTF8;
+import static org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.WEBHDFS_PREFIX;
+import static org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.WEBHDFS_PREFIX_LENGTH;
 /**
  * Implement the read-only WebHDFS API for fsimage.
  */
@@ -67,7 +75,7 @@ class FSImageHandler extends SimpleChannelInboundHandler<HttpRequest> {
     if (request.getMethod() != HttpMethod.GET) {
       DefaultHttpResponse resp = new DefaultHttpResponse(HTTP_1_1,
         METHOD_NOT_ALLOWED);
-      resp.headers().set("Connection", "close");
+      resp.headers().set(CONNECTION, CLOSE);
       ctx.write(resp).addListener(ChannelFutureListener.CLOSE);
       return;
     }
@@ -77,24 +85,29 @@ class FSImageHandler extends SimpleChannelInboundHandler<HttpRequest> {
 
     final String content;
     String path = getPath(decoder);
-    if ("GETFILESTATUS".equals(op)) {
-      content = image.getFileStatus(path);
-    } else if ("LISTSTATUS".equals(op)) {
-      content = image.listStatus(path);
-    } else if ("GETACLSTATUS".equals(op)) {
-      content = image.getAclStatus(path);
-    } else {
-      throw new IllegalArgumentException("Invalid value for webhdfs parameter" + " \"op\"");
+    switch (op) {
+      case "GETFILESTATUS":
+        content = image.getFileStatus(path);
+        break;
+      case "LISTSTATUS":
+        content = image.listStatus(path);
+        break;
+      case "GETACLSTATUS":
+        content = image.getAclStatus(path);
+        break;
+      default:
+        throw new IllegalArgumentException(
+            "Invalid value for webhdfs parameter" + " \"op\"");
     }
 
     LOG.info("op=" + op + " target=" + path);
 
     DefaultFullHttpResponse resp = new DefaultFullHttpResponse(
             HTTP_1_1, HttpResponseStatus.OK,
-            Unpooled.wrappedBuffer(content.getBytes()));
-    resp.headers().set("Content-Type", "application/json");
-    resp.headers().set("Content-Length", resp.content().readableBytes());
-    resp.headers().set("Connection", "close");
+            Unpooled.wrappedBuffer(content.getBytes(Charsets.UTF_8)));
+    resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
+    resp.headers().set(CONTENT_LENGTH, resp.content().readableBytes());
+    resp.headers().set(CONNECTION, CLOSE);
     ctx.write(resp).addListener(ChannelFutureListener.CLOSE);
   }
 
@@ -109,19 +122,19 @@ class FSImageHandler extends SimpleChannelInboundHandler<HttpRequest> {
     Exception e = cause instanceof Exception ? (Exception) cause : new
       Exception(cause);
     final String output = JsonUtil.toJsonString(e);
-    ByteBuf content = Unpooled.wrappedBuffer(output.getBytes());
+    ByteBuf content = Unpooled.wrappedBuffer(output.getBytes(Charsets.UTF_8));
     final DefaultFullHttpResponse resp = new DefaultFullHttpResponse(
             HTTP_1_1, INTERNAL_SERVER_ERROR, content);
 
-    resp.headers().set("Content-Type", "application/json");
+    resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
     if (e instanceof IllegalArgumentException) {
       resp.setStatus(BAD_REQUEST);
     } else if (e instanceof FileNotFoundException) {
       resp.setStatus(NOT_FOUND);
     }
 
-    resp.headers().set("Content-Length", resp.content().readableBytes());
-    resp.headers().set("Connection", "close");
+    resp.headers().set(CONTENT_LENGTH, resp.content().readableBytes());
+    resp.headers().set(CONNECTION, CLOSE);
     ctx.write(resp).addListener(ChannelFutureListener.CLOSE);
   }
 
@@ -134,11 +147,11 @@ class FSImageHandler extends SimpleChannelInboundHandler<HttpRequest> {
   private static String getPath(QueryStringDecoder decoder)
           throws FileNotFoundException {
     String path = decoder.path();
-    if (path.startsWith("/webhdfs/v1/")) {
-      return path.substring(11);
+    if (path.startsWith(WEBHDFS_PREFIX)) {
+      return path.substring(WEBHDFS_PREFIX_LENGTH);
     } else {
       throw new FileNotFoundException("Path: " + path + " should " +
-              "start with \"/webhdfs/v1/\"");
+              "start with " + WEBHDFS_PREFIX);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
index a26f1bf..2f2fa5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
@@ -111,17 +111,15 @@ class FSImageLoader {
     }
 
     FsImageProto.FileSummary summary = FSImageUtil.loadSummary(file);
-    FileInputStream fin = null;
 
-    try {
+
+    try (FileInputStream fin = new FileInputStream(file.getFD())) {
       // Map to record INodeReference to the referred id
       ImmutableList<Long> refIdList = null;
       String[] stringTable = null;
       byte[][] inodes = null;
       Map<Long, long[]> dirmap = null;
 
-      fin = new FileInputStream(file.getFD());
-
       ArrayList<FsImageProto.FileSummary.Section> sections =
           Lists.newArrayList(summary.getSectionsList());
       Collections.sort(sections,
@@ -169,8 +167,6 @@ class FSImageLoader {
         }
       }
       return new FSImageLoader(stringTable, inodes, dirmap);
-    } finally {
-      IOUtils.cleanup(null, fin);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
index 61c3650..056ad96 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
@@ -21,7 +21,7 @@ import java.io.BufferedInputStream;
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
-import java.io.PrintWriter;
+import java.io.PrintStream;
 import java.io.RandomAccessFile;
 
 import org.apache.hadoop.conf.Configuration;
@@ -30,7 +30,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName;
 import org.apache.hadoop.hdfs.server.namenode.FSImageUtil;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.LimitInputStream;
 
 import com.google.common.base.Preconditions;
@@ -67,7 +66,7 @@ final class FileDistributionCalculator {
   private final Configuration conf;
   private final long maxSize;
   private final int steps;
-  private final PrintWriter out;
+  private final PrintStream out;
 
   private final int[] distribution;
   private int totalFiles;
@@ -77,7 +76,7 @@ final class FileDistributionCalculator {
   private long maxFileSize;
 
   FileDistributionCalculator(Configuration conf, long maxSize, int steps,
-      PrintWriter out) {
+      PrintStream out) {
     this.conf = conf;
     this.maxSize = maxSize == 0 ? MAX_SIZE_DEFAULT : maxSize;
     this.steps = steps == 0 ? INTERVAL_DEFAULT : steps;
@@ -96,9 +95,7 @@ final class FileDistributionCalculator {
     }
 
     FileSummary summary = FSImageUtil.loadSummary(file);
-    FileInputStream in = null;
-    try {
-      in = new FileInputStream(file.getFD());
+    try (FileInputStream in = new FileInputStream(file.getFD())) {
       for (FileSummary.Section s : summary.getSectionsList()) {
         if (SectionName.fromString(s.getName()) != SectionName.INODE) {
           continue;
@@ -111,8 +108,6 @@ final class FileDistributionCalculator {
         run(is);
         output();
       }
-    } finally {
-      IOUtils.cleanup(null, in);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java
index f293db4..146d00a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java
@@ -159,10 +159,10 @@ class FileDistributionVisitor extends TextWriterImageVisitor {
         current.path = (value.equals("") ? "/" : value);
         break;
       case REPLICATION:
-        current.replication = Integer.valueOf(value);
+        current.replication = Integer.parseInt(value);
         break;
       case NUM_BYTES:
-        current.fileSize += Long.valueOf(value);
+        current.fileSize += Long.parseLong(value);
         break;
       default:
         break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsImageVisitor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsImageVisitor.java
index 6e303a9..7d229db 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsImageVisitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsImageVisitor.java
@@ -135,7 +135,7 @@ class LsImageVisitor extends TextWriterImageVisitor {
         perms = value;
         break;
       case REPLICATION:
-        replication = Integer.valueOf(value);
+        replication = Integer.parseInt(value);
         break;
       case USER_NAME:
         username = value;
@@ -144,7 +144,7 @@ class LsImageVisitor extends TextWriterImageVisitor {
         group = value;
         break;
       case NUM_BYTES:
-        filesize += Long.valueOf(value);
+        filesize += Long.parseLong(value);
         break;
       case MODIFICATION_TIME:
         modTime = value;
@@ -173,6 +173,6 @@ class LsImageVisitor extends TextWriterImageVisitor {
     if(element == ImageElement.INODE)
       newLine();
     else if (element == ImageElement.BLOCKS)
-      numBlocks = Integer.valueOf(value);
+      numBlocks = Integer.parseInt(value);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
index f02acae..4fce6a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
@@ -18,9 +18,8 @@
 package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
 import java.io.EOFException;
-import java.io.File;
 import java.io.IOException;
-import java.io.PrintWriter;
+import java.io.PrintStream;
 import java.io.RandomAccessFile;
 
 import org.apache.commons.cli.CommandLine;
@@ -33,7 +32,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
 
 /**
@@ -144,36 +142,33 @@ public class OfflineImageViewerPB {
     String processor = cmd.getOptionValue("p", "Web");
     String outputFile = cmd.getOptionValue("o", "-");
 
-    PrintWriter out = outputFile.equals("-") ?
-        new PrintWriter(System.out) : new PrintWriter(new File(outputFile));
-
     Configuration conf = new Configuration();
-    try {
-      if (processor.equals("FileDistribution")) {
-        long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
-        int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
-        new FileDistributionCalculator(conf, maxSize, step, out)
-            .visit(new RandomAccessFile(inputFile, "r"));
-      } else if (processor.equals("XML")) {
-        new PBImageXmlWriter(conf, out).visit(new RandomAccessFile(inputFile,
-            "r"));
-      } else if (processor.equals("Web")) {
-        String addr = cmd.getOptionValue("addr", "localhost:5978");
-        WebImageViewer viewer = new WebImageViewer(NetUtils.createSocketAddr
-                (addr));
-        try {
-          viewer.start(inputFile);
-        } finally {
-          viewer.close();
-        }
+    try (PrintStream out = outputFile.equals("-") ?
+        System.out : new PrintStream(outputFile, "UTF-8")) {
+      switch (processor) {
+        case "FileDistribution":
+          long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
+          int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
+          new FileDistributionCalculator(conf, maxSize, step, out).visit(
+              new RandomAccessFile(inputFile, "r"));
+          break;
+        case "XML":
+          new PBImageXmlWriter(conf, out).visit(
+              new RandomAccessFile(inputFile, "r"));
+          break;
+        case "Web":
+          String addr = cmd.getOptionValue("addr", "localhost:5978");
+          try (WebImageViewer viewer = new WebImageViewer(
+              NetUtils.createSocketAddr(addr))) {
+            viewer.start(inputFile);
+          }
+          break;
       }
       return 0;
     } catch (EOFException e) {
       System.err.println("Input file ended unexpectedly. Exiting");
     } catch (IOException e) {
       System.err.println("Encountered exception.  Exiting: " + e.getMessage());
-    } finally {
-      IOUtils.cleanup(null, out);
     }
     return -1;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
index 3e3f021..f3fe886 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
@@ -21,7 +21,7 @@ import java.io.BufferedInputStream;
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
-import java.io.PrintWriter;
+import java.io.PrintStream;
 import java.io.RandomAccessFile;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -50,7 +50,6 @@ import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection;
 import org.apache.hadoop.hdfs.util.XMLUtils;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.LimitInputStream;
 
 import com.google.common.collect.Lists;
@@ -62,10 +61,10 @@ import com.google.common.collect.Lists;
 @InterfaceAudience.Private
 public final class PBImageXmlWriter {
   private final Configuration conf;
-  private final PrintWriter out;
+  private final PrintStream out;
   private String[] stringTable;
 
-  public PBImageXmlWriter(Configuration conf, PrintWriter out) {
+  public PBImageXmlWriter(Configuration conf, PrintStream out) {
     this.conf = conf;
     this.out = out;
   }
@@ -76,9 +75,7 @@ public final class PBImageXmlWriter {
     }
 
     FileSummary summary = FSImageUtil.loadSummary(file);
-    FileInputStream fin = null;
-    try {
-      fin = new FileInputStream(file.getFD());
+    try (FileInputStream fin = new FileInputStream(file.getFD())) {
       out.print("<?xml version=\"1.0\"?>\n<fsimage>");
 
       ArrayList<FileSummary.Section> sections = Lists.newArrayList(summary
@@ -140,8 +137,6 @@ public final class PBImageXmlWriter {
         }
       }
       out.print("</fsimage>\n");
-    } finally {
-      IOUtils.cleanup(null, fin);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index 603bf6e..cc00055 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -1265,11 +1265,11 @@ public class TestEncryptionZones {
     }
 
     // Run the XML OIV processor
-    StringWriter output = new StringWriter();
-    PrintWriter pw = new PrintWriter(output);
+    ByteArrayOutputStream output = new ByteArrayOutputStream();
+    PrintStream pw = new PrintStream(output);
     PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), pw);
     v.visit(new RandomAccessFile(originalFsimage, "r"));
-    final String xml = output.getBuffer().toString();
+    final String xml = output.toString();
     SAXParser parser = SAXParserFactory.newInstance().newSAXParser();
     parser.parse(new InputSource(new StringReader(xml)), new DefaultHandler());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
index 12fba73..b20e2ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
@@ -25,15 +25,15 @@ import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.io.IOException;
-import java.io.PrintWriter;
+import java.io.PrintStream;
 import java.io.RandomAccessFile;
-import java.io.StringWriter;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.Random;
 
+import org.apache.commons.io.output.NullOutputStream;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree.Node;
 import org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
@@ -256,8 +257,7 @@ public class TestSnapshot {
         FSImageTestUtil.getFSImage(
         cluster.getNameNode()).getStorage().getStorageDir(0));
     assertNotNull("Didn't generate or can't find fsimage", originalFsimage);
-    StringWriter output = new StringWriter();
-    PrintWriter o = new PrintWriter(output);
+    PrintStream o = new PrintStream(NullOutputStream.NULL_OUTPUT_STREAM);
     PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), o);
     v.visit(new RandomAccessFile(originalFsimage, "r"));
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f6d0c9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index 36b5201..4bb2b79 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -20,18 +20,18 @@ package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
-import java.io.FileWriter;
 import java.io.IOException;
+import java.io.PrintStream;
 import java.io.PrintWriter;
 import java.io.RandomAccessFile;
 import java.io.StringReader;
 import java.io.StringWriter;
 import java.net.HttpURLConnection;
 import java.net.URI;
-import java.net.URISyntaxException;
 import java.net.URL;
 import java.util.Collections;
 import java.util.Comparator;
@@ -43,6 +43,7 @@ import javax.xml.parsers.ParserConfigurationException;
 import javax.xml.parsers.SAXParser;
 import javax.xml.parsers.SAXParserFactory;
 
+import org.apache.commons.io.output.NullOutputStream;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -186,10 +187,10 @@ public class TestOfflineImageViewer {
   @Test(expected = IOException.class)
   public void testTruncatedFSImage() throws IOException {
     File truncatedFile = folder.newFile();
-    StringWriter output = new StringWriter();
+    PrintStream output = new PrintStream(NullOutputStream.NULL_OUTPUT_STREAM);
     copyPartOfFile(originalFsimage, truncatedFile);
-    new FileDistributionCalculator(new Configuration(), 0, 0, new PrintWriter(
-        output)).visit(new RandomAccessFile(truncatedFile, "r"));
+    new FileDistributionCalculator(new Configuration(), 0, 0, output)
+        .visit(new RandomAccessFile(truncatedFile, "r"));
   }
 
   private void copyPartOfFile(File src, File dest) throws IOException {
@@ -208,20 +209,21 @@ public class TestOfflineImageViewer {
 
   @Test
   public void testFileDistributionCalculator() throws IOException {
-    StringWriter output = new StringWriter();
-    PrintWriter o = new PrintWriter(output);
+    ByteArrayOutputStream output = new ByteArrayOutputStream();
+    PrintStream o = new PrintStream(output);
     new FileDistributionCalculator(new Configuration(), 0, 0, o)
         .visit(new RandomAccessFile(originalFsimage, "r"));
     o.close();
 
+    String outputString = output.toString();
     Pattern p = Pattern.compile("totalFiles = (\\d+)\n");
-    Matcher matcher = p.matcher(output.getBuffer());
+    Matcher matcher = p.matcher(outputString);
     assertTrue(matcher.find() && matcher.groupCount() == 1);
     int totalFiles = Integer.parseInt(matcher.group(1));
     assertEquals(NUM_DIRS * FILES_PER_DIR, totalFiles);
 
     p = Pattern.compile("totalDirectories = (\\d+)\n");
-    matcher = p.matcher(output.getBuffer());
+    matcher = p.matcher(outputString);
     assertTrue(matcher.find() && matcher.groupCount() == 1);
     int totalDirs = Integer.parseInt(matcher.group(1));
     // totalDirs includes root directory, empty directory, and xattr directory
@@ -236,7 +238,7 @@ public class TestOfflineImageViewer {
       }
     });
     p = Pattern.compile("maxFileSize = (\\d+)\n");
-    matcher = p.matcher(output.getBuffer());
+    matcher = p.matcher(output.toString("UTF-8"));
     assertTrue(matcher.find() && matcher.groupCount() == 1);
     assertEquals(maxFile.getLen(), Long.parseLong(matcher.group(1)));
   }
@@ -252,13 +254,13 @@ public class TestOfflineImageViewer {
   @Test
   public void testPBImageXmlWriter() throws IOException, SAXException,
       ParserConfigurationException {
-    StringWriter output = new StringWriter();
-    PrintWriter o = new PrintWriter(output);
+    ByteArrayOutputStream output = new ByteArrayOutputStream();
+    PrintStream o = new PrintStream(output);
     PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), o);
     v.visit(new RandomAccessFile(originalFsimage, "r"));
     SAXParserFactory spf = SAXParserFactory.newInstance();
     SAXParser parser = spf.newSAXParser();
-    final String xml = output.getBuffer().toString();
+    final String xml = output.toString();
     parser.parse(new InputSource(new StringReader(xml)), new DefaultHandler());
   }
 
@@ -298,7 +300,7 @@ public class TestOfflineImageViewer {
       verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND, url);
 
       // LISTSTATUS operation to a invalid prefix
-      url = new URL("http://localhost:" + port + "/webhdfs/v1?op=LISTSTATUS");
+      url = new URL("http://localhost:" + port + "/foo");
       verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND, url);
 
       // GETFILESTATUS operation


[13/50] [abbrv] hadoop git commit: YARN-2924. Fixed RMAdminCLI to not convert node labels to lower case. Contributed by Wangda Tan

Posted by ka...@apache.org.
YARN-2924. Fixed RMAdminCLI to not convert node labels to lower case. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/437322af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/437322af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/437322af

Branch: refs/heads/YARN-2139
Commit: 437322afcaa4b1b260501af160283c97eb589419
Parents: 2ed90a5
Author: Jian He <ji...@apache.org>
Authored: Tue Dec 9 17:56:04 2014 -0800
Committer: Jian He <ji...@apache.org>
Committed: Tue Dec 9 17:56:04 2014 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                  | 3 +++
 .../main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java  | 2 +-
 .../java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java   | 4 ++--
 3 files changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/437322af/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0173782..81d5707 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -209,6 +209,9 @@ Release 2.7.0 - UNRELEASED
     YARN-2930. Fixed TestRMRestart#testRMRestartRecoveringNodeLabelManager
     intermittent failure. (Wangda Tan via jianhe)
 
+    YARN-2924. Fixed RMAdminCLI to not convert node labels to lower case.
+    (Wangda Tan via jianhe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/437322af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index 89d87cf..c7cc4d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -399,7 +399,7 @@ public class RMAdminCLI extends HAAdmin {
 
       for (int i = 1; i < splits.length; i++) {
         if (!splits[i].trim().isEmpty()) {
-          map.get(nodeId).add(splits[i].trim().toLowerCase());
+          map.get(nodeId).add(splits[i].trim());
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/437322af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
index 6176a3e..bee114b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
@@ -468,9 +468,9 @@ public class TestRMAdminCLI {
   @Test
   public void testReplaceLabelsOnNode() throws Exception {
     // Successfully replace labels
-    dummyNodeLabelsManager.addToCluserNodeLabels(ImmutableSet.of("x", "y"));
+    dummyNodeLabelsManager.addToCluserNodeLabels(ImmutableSet.of("x", "Y"));
     String[] args =
-        { "-replaceLabelsOnNode", "node1,x,y node2,y",
+        { "-replaceLabelsOnNode", "node1,x,Y node2,Y",
             "-directlyAccessNodeLabelStore" };
     assertEquals(0, rmAdminCLI.run(args));
     assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey(


[35/50] [abbrv] hadoop git commit: MAPREDUCE-6046. Change the class name for logs in RMCommunicator. Contributed by Sahil Takiar.

Posted by ka...@apache.org.
MAPREDUCE-6046. Change the class name for logs in RMCommunicator.
Contributed by Sahil Takiar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bd02291
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bd02291
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bd02291

Branch: refs/heads/YARN-2139
Commit: 0bd022911013629a8c9e7357fae8cf4399d7a1e3
Parents: b437f5e
Author: Devaraj K <de...@apache.org>
Authored: Fri Dec 12 11:42:03 2014 +0530
Committer: Devaraj K <de...@apache.org>
Committed: Fri Dec 12 11:42:03 2014 +0530

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt                              | 3 +++
 .../org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java     | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bd02291/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index bbab097..ee24857 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -241,6 +241,9 @@ Release 2.7.0 - UNRELEASED
     MAPREDUCE-5932. Provide an option to use a dedicated reduce-side shuffle
     log (Gera Shegalov via jlowe)
 
+    MAPREDUCE-6046. Change the class name for logs in RMCommunicator
+    (Sahil Takiar via devaraj)
+
   OPTIMIZATIONS
 
     MAPREDUCE-6169. MergeQueue should release reference to the current item 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bd02291/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
index cd4e272..5d4fa12 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
@@ -67,7 +67,7 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
  */
 public abstract class RMCommunicator extends AbstractService
     implements RMHeartbeatHandler {
-  private static final Log LOG = LogFactory.getLog(RMContainerAllocator.class);
+  private static final Log LOG = LogFactory.getLog(RMCommunicator.class);
   private int rmPollInterval;//millis
   protected ApplicationId applicationId;
   private final AtomicBoolean stopped;


[50/50] [abbrv] hadoop git commit: HDFS-7513. HDFS inotify: add defaultBlockSize to CreateEvent (cmccabe)

Posted by ka...@apache.org.
HDFS-7513. HDFS inotify: add defaultBlockSize to CreateEvent (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e13fc62
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e13fc62
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e13fc62

Branch: refs/heads/YARN-2139
Commit: 6e13fc62e1f284f22fd0089f06ce281198bc7c2a
Parents: fae3e86
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Mon Dec 15 10:00:25 2014 -0800
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Mon Dec 15 10:30:30 2014 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 .../org/apache/hadoop/hdfs/inotify/Event.java   | 87 ++++++++++++++++++--
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 19 +++--
 .../namenode/InotifyFSEditLogOpTranslator.java  | 34 ++++++--
 .../hadoop-hdfs/src/main/proto/inotify.proto    |  1 +
 .../hdfs/TestDFSInotifyEventInputStream.java    |  4 +-
 6 files changed, 124 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e13fc62/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9dfecc1..99b06be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -457,6 +457,8 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7426. Change nntop JMX format to be a JSON blob. (wang)
 
+    HDFS-7513. HDFS inotify: add defaultBlockSize to CreateEvent (cmccabe)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e13fc62/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java
index e8a34e7..5ceff1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java
@@ -101,6 +101,7 @@ public abstract class Event {
     private FsPermission perms;
     private String symlinkTarget;
     private boolean overwrite;
+    private long defaultBlockSize;
 
     public static class Builder {
       private INodeType iNodeType;
@@ -112,6 +113,7 @@ public abstract class Event {
       private FsPermission perms;
       private String symlinkTarget;
       private boolean overwrite;
+      private long defaultBlockSize = 0;
 
       public Builder iNodeType(INodeType type) {
         this.iNodeType = type;
@@ -158,6 +160,11 @@ public abstract class Event {
         return this;
       }
 
+      public Builder defaultBlockSize(long defaultBlockSize) {
+        this.defaultBlockSize = defaultBlockSize;
+        return this;
+      }
+
       public CreateEvent build() {
         return new CreateEvent(this);
       }
@@ -174,6 +181,7 @@ public abstract class Event {
       this.perms = b.perms;
       this.symlinkTarget = b.symlinkTarget;
       this.overwrite = b.overwrite;
+      this.defaultBlockSize = b.defaultBlockSize;
     }
 
     public INodeType getiNodeType() {
@@ -220,6 +228,10 @@ public abstract class Event {
     public boolean getOverwrite() {
       return overwrite;
     }
+
+    public long getDefaultBlockSize() {
+      return defaultBlockSize;
+    }
   }
 
   /**
@@ -398,11 +410,36 @@ public abstract class Event {
     private String dstPath;
     private long timestamp;
 
-    public RenameEvent(String srcPath, String dstPath, long timestamp) {
+    public static class Builder {
+      private String srcPath;
+      private String dstPath;
+      private long timestamp;
+
+      public Builder srcPath(String srcPath) {
+        this.srcPath = srcPath;
+        return this;
+      }
+
+      public Builder dstPath(String dstPath) {
+        this.dstPath = dstPath;
+        return this;
+      }
+
+      public Builder timestamp(long timestamp) {
+        this.timestamp = timestamp;
+        return this;
+      }
+
+      public RenameEvent build() {
+        return new RenameEvent(this);
+      }
+    }
+
+    private RenameEvent(Builder builder) {
       super(EventType.RENAME);
-      this.srcPath = srcPath;
-      this.dstPath = dstPath;
-      this.timestamp = timestamp;
+      this.srcPath = builder.srcPath;
+      this.dstPath = builder.dstPath;
+      this.timestamp = builder.timestamp;
     }
 
     public String getSrcPath() {
@@ -427,9 +464,22 @@ public abstract class Event {
   public static class AppendEvent extends Event {
     private String path;
 
-    public AppendEvent(String path) {
+    public static class Builder {
+      private String path;
+
+      public Builder path(String path) {
+        this.path = path;
+        return this;
+      }
+
+      public AppendEvent build() {
+        return new AppendEvent(this);
+      }
+    }
+
+    private AppendEvent(Builder b) {
       super(EventType.APPEND);
-      this.path = path;
+      this.path = b.path;
     }
 
     public String getPath() {
@@ -444,10 +494,29 @@ public abstract class Event {
     private String path;
     private long timestamp;
 
-    public UnlinkEvent(String path, long timestamp) {
+    public static class Builder {
+      private String path;
+      private long timestamp;
+
+      public Builder path(String path) {
+        this.path = path;
+        return this;
+      }
+
+      public Builder timestamp(long timestamp) {
+        this.timestamp = timestamp;
+        return this;
+      }
+
+      public UnlinkEvent build() {
+        return new UnlinkEvent(this);
+      }
+    }
+
+    private UnlinkEvent(Builder builder) {
       super(EventType.UNLINK);
-      this.path = path;
-      this.timestamp = timestamp;
+      this.path = builder.path;
+      this.timestamp = builder.timestamp;
     }
 
     public String getPath() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e13fc62/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 5a36585..ee6d58c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -2566,6 +2566,7 @@ public class PBHelper {
                 .replication(create.getReplication())
                 .symlinkTarget(create.getSymlinkTarget().isEmpty() ? null :
                     create.getSymlinkTarget())
+                .defaultBlockSize(create.getDefaultBlockSize())
                 .overwrite(create.getOverwrite()).build());
             break;
           case EVENT_METADATA:
@@ -2592,19 +2593,26 @@ public class PBHelper {
           case EVENT_RENAME:
             InotifyProtos.RenameEventProto rename =
                 InotifyProtos.RenameEventProto.parseFrom(p.getContents());
-            events.add(new Event.RenameEvent(rename.getSrcPath(),
-                rename.getDestPath(), rename.getTimestamp()));
+            events.add(new Event.RenameEvent.Builder()
+                  .srcPath(rename.getSrcPath())
+                  .dstPath(rename.getDestPath())
+                  .timestamp(rename.getTimestamp())
+                  .build());
             break;
           case EVENT_APPEND:
             InotifyProtos.AppendEventProto reopen =
                 InotifyProtos.AppendEventProto.parseFrom(p.getContents());
-            events.add(new Event.AppendEvent(reopen.getPath()));
+            events.add(new Event.AppendEvent.Builder()
+                  .path(reopen.getPath())
+                  .build());
             break;
           case EVENT_UNLINK:
             InotifyProtos.UnlinkEventProto unlink =
                 InotifyProtos.UnlinkEventProto.parseFrom(p.getContents());
-            events.add(new Event.UnlinkEvent(unlink.getPath(),
-                unlink.getTimestamp()));
+            events.add(new Event.UnlinkEvent.Builder()
+                  .path(unlink.getPath())
+                  .timestamp(unlink.getTimestamp())
+                  .build());
             break;
           default:
             throw new RuntimeException("Unexpected inotify event type: " +
@@ -2650,6 +2658,7 @@ public class PBHelper {
                         .setReplication(ce2.getReplication())
                         .setSymlinkTarget(ce2.getSymlinkTarget() == null ?
                             "" : ce2.getSymlinkTarget())
+                        .setDefaultBlockSize(ce2.getDefaultBlockSize())
                         .setOverwrite(ce2.getOverwrite()).build().toByteString()
                 ).build());
             break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e13fc62/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java
index cd3fc23..f265340 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java
@@ -53,10 +53,13 @@ public class InotifyFSEditLogOpTranslator {
             .groupName(addOp.permissions.getGroupName())
             .perms(addOp.permissions.getPermission())
             .overwrite(addOp.overwrite)
+            .defaultBlockSize(addOp.blockSize)
             .iNodeType(Event.CreateEvent.INodeType.FILE).build() });
-      } else {
+      } else { // append
         return new EventBatch(op.txid,
-            new Event[] { new Event.AppendEvent(addOp.path) });
+            new Event[]{new Event.AppendEvent.Builder()
+                .path(addOp.path)
+                .build()});
       }
     case OP_CLOSE:
       FSEditLogOp.CloseOp cOp = (FSEditLogOp.CloseOp) op;
@@ -72,25 +75,40 @@ public class InotifyFSEditLogOpTranslator {
     case OP_CONCAT_DELETE:
       FSEditLogOp.ConcatDeleteOp cdOp = (FSEditLogOp.ConcatDeleteOp) op;
       List<Event> events = Lists.newArrayList();
-      events.add(new Event.AppendEvent(cdOp.trg));
+      events.add(new Event.AppendEvent.Builder()
+          .path(cdOp.trg)
+          .build());
       for (String src : cdOp.srcs) {
-        events.add(new Event.UnlinkEvent(src, cdOp.timestamp));
+        events.add(new Event.UnlinkEvent.Builder()
+          .path(src)
+          .timestamp(cdOp.timestamp)
+          .build());
       }
       events.add(new Event.CloseEvent(cdOp.trg, -1, cdOp.timestamp));
       return new EventBatch(op.txid, events.toArray(new Event[0]));
     case OP_RENAME_OLD:
       FSEditLogOp.RenameOldOp rnOpOld = (FSEditLogOp.RenameOldOp) op;
       return new EventBatch(op.txid, new Event[] {
-          new Event.RenameEvent(rnOpOld.src,
-              rnOpOld.dst, rnOpOld.timestamp) });
+          new Event.RenameEvent.Builder()
+              .srcPath(rnOpOld.src)
+              .dstPath(rnOpOld.dst)
+              .timestamp(rnOpOld.timestamp)
+              .build() });
     case OP_RENAME:
       FSEditLogOp.RenameOp rnOp = (FSEditLogOp.RenameOp) op;
       return new EventBatch(op.txid, new Event[] {
-          new Event.RenameEvent(rnOp.src, rnOp.dst, rnOp.timestamp) });
+          new Event.RenameEvent.Builder()
+            .srcPath(rnOp.src)
+            .dstPath(rnOp.dst)
+            .timestamp(rnOp.timestamp)
+            .build() });
     case OP_DELETE:
       FSEditLogOp.DeleteOp delOp = (FSEditLogOp.DeleteOp) op;
       return new EventBatch(op.txid, new Event[] {
-          new Event.UnlinkEvent(delOp.path, delOp.timestamp) });
+          new Event.UnlinkEvent.Builder()
+            .path(delOp.path)
+            .timestamp(delOp.timestamp)
+            .build() });
     case OP_MKDIR:
       FSEditLogOp.MkdirOp mkOp = (FSEditLogOp.MkdirOp) op;
       return new EventBatch(op.txid,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e13fc62/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/inotify.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/inotify.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/inotify.proto
index e51c02c..e50f14b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/inotify.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/inotify.proto
@@ -78,6 +78,7 @@ message CreateEventProto {
   optional int32 replication = 7;
   optional string symlinkTarget = 8;
   optional bool overwrite = 9;
+  optional int64 defaultBlockSize = 10 [default=0];
 }
 
 message CloseEventProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e13fc62/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
index 82db110..77a17ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
@@ -170,6 +170,7 @@ public class TestDFSInotifyEventInputStream {
       Assert.assertTrue(ce.getReplication() > 0);
       Assert.assertTrue(ce.getSymlinkTarget() == null);
       Assert.assertTrue(ce.getOverwrite());
+      Assert.assertEquals(BLOCK_SIZE, ce.getDefaultBlockSize());
 
       // CloseOp
       batch = waitForNextEvents(eis);
@@ -186,7 +187,8 @@ public class TestDFSInotifyEventInputStream {
       Assert.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
       Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
-      Assert.assertTrue(((Event.AppendEvent) batch.getEvents()[0]).getPath().equals("/file2"));
+      Event.AppendEvent append2 = (Event.AppendEvent)batch.getEvents()[0];
+      Assert.assertEquals("/file2", append2.getPath());
 
       // CloseOp
       batch = waitForNextEvents(eis);


[17/50] [abbrv] hadoop git commit: HADOOP-10482. Fix various findbugs warnings in hadoop-common. Contributed by Haohui Mai.

Posted by ka...@apache.org.
HADOOP-10482. Fix various findbugs warnings in hadoop-common. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bbd6a327
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bbd6a327
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bbd6a327

Branch: refs/heads/YARN-2139
Commit: bbd6a3277678a60d472e76a207f25a916220946c
Parents: 2e98ad3
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Dec 10 12:44:25 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Wed Dec 10 12:44:25 2014 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 ++
 .../dev-support/findbugsExcludeFile.xml         | 10 +++++++
 .../org/apache/hadoop/conf/Configuration.java   |  7 ++---
 .../hadoop/crypto/key/JavaKeyStoreProvider.java |  9 ++----
 .../java/org/apache/hadoop/fs/FileSystem.java   |  3 --
 .../org/apache/hadoop/fs/HarFileSystem.java     | 23 +++------------
 .../org/apache/hadoop/fs/LocalDirAllocator.java |  2 +-
 .../hadoop/fs/MD5MD5CRC32FileChecksum.java      |  8 +++---
 .../org/apache/hadoop/fs/ftp/FTPFileSystem.java | 21 ++++++--------
 .../java/org/apache/hadoop/fs/shell/Ls.java     |  2 +-
 .../java/org/apache/hadoop/fs/shell/Stat.java   |  4 +--
 .../java/org/apache/hadoop/fs/shell/Test.java   |  2 ++
 .../main/java/org/apache/hadoop/ha/HAAdmin.java |  6 ----
 .../org/apache/hadoop/ha/SshFenceByTcpPort.java |  2 ++
 .../java/org/apache/hadoop/io/LongWritable.java |  4 +--
 .../main/java/org/apache/hadoop/io/Text.java    |  2 ++
 .../hadoop/io/compress/DecompressorStream.java  |  2 +-
 .../main/java/org/apache/hadoop/ipc/Server.java | 15 +++++-----
 .../metrics/ganglia/GangliaContext31.java       |  5 ----
 .../hadoop/metrics/spi/CompositeContext.java    |  2 +-
 .../hadoop/metrics2/lib/MutableCounterInt.java  | 17 +++++------
 .../hadoop/metrics2/lib/MutableCounterLong.java | 19 +++++++------
 .../hadoop/metrics2/lib/MutableGaugeInt.java    | 30 ++++++++++----------
 .../hadoop/metrics2/lib/MutableGaugeLong.java   | 30 ++++++++++----------
 .../java/org/apache/hadoop/net/NetUtils.java    |  2 +-
 .../net/ScriptBasedMappingWithDependency.java   |  3 +-
 .../hadoop/security/LdapGroupsMapping.java      |  6 +---
 .../apache/hadoop/util/ComparableVersion.java   |  2 ++
 .../apache/hadoop/util/PrintJarMainClass.java   | 17 +++++------
 .../org/apache/hadoop/util/ServletUtil.java     |  6 ++--
 30 files changed, 118 insertions(+), 145 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9065ff5..53004ce 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -558,6 +558,8 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11381. Fix findbugs warnings in hadoop-distcp, hadoop-aws,
     hadoop-azure, and hadoop-openstack. (Li Lu via wheat9)
 
+    HADOOP-10482. Fix various findbugs warnings in hadoop-common. (wheat9)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index 8de3c37..1a05896 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -241,6 +241,16 @@
        <Method name="writeVLong" />
        <Bug pattern="SF_SWITCH_FALLTHROUGH" />
      </Match>
+     <Match>
+       <Class name="org.apache.hadoop.io.Text" />
+       <Method name="bytesToCodePoint" />
+       <Bug pattern="SF_SWITCH_NO_DEFAULT" />
+     </Match>
+     <Match>
+       <Class name="org.apache.hadoop.util.PureJavaCrc32C" />
+       <Method name="update" />
+       <Bug pattern="SF_SWITCH_NO_DEFAULT" />
+     </Match>
     <!--
 	  The switch condition fall through is intentional and for performance
 	  purposes.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index a3fae19..d2c8052 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -1487,11 +1487,8 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * @param pattern new value
    */
   public void setPattern(String name, Pattern pattern) {
-    if (null == pattern) {
-      set(name, null);
-    } else {
-      set(name, pattern.pattern());
-    }
+    assert pattern != null : "Pattern cannot be null";
+    set(name, pattern.pattern());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
index 75981c4..a9738ba 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
@@ -144,13 +144,8 @@ public class JavaKeyStoreProvider extends KeyProvider {
           // Provided Password file does not exist
           throw new IOException("Password file does not exists");
         }
-        if (pwdFile != null) {
-          InputStream is = pwdFile.openStream();
-          try {
-            password = IOUtils.toString(is).trim().toCharArray();
-          } finally {
-            is.close();
-          }
+        try (InputStream is = pwdFile.openStream()) {
+          password = IOUtils.toString(is).trim().toCharArray();
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 9edc54b..619f433 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -2618,9 +2618,6 @@ public abstract class FileSystem extends Configured implements Closeable {
   private static FileSystem createFileSystem(URI uri, Configuration conf
       ) throws IOException {
     Class<?> clazz = getFileSystemClass(uri.getScheme(), conf);
-    if (clazz == null) {
-      throw new IOException("No FileSystem for scheme: " + uri.getScheme());
-    }
     FileSystem fs = (FileSystem)ReflectionUtils.newInstance(clazz, conf);
     fs.initialize(uri, conf);
     return fs;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
index 3ba6de1..0fba268 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
@@ -220,12 +220,7 @@ public class HarFileSystem extends FileSystem {
       return FileSystem.getDefaultUri(conf);
     }
     String authority = rawURI.getAuthority();
-    if (authority == null) {
-      throw new IOException("URI: " + rawURI
-          + " is an invalid Har URI since authority==null."
-          + "  Expecting har://<scheme>-<host>/<path>.");
-    }
- 
+
     int i = authority.indexOf('-');
     if (i < 0) {
       throw new IOException("URI: " + rawURI
@@ -489,19 +484,12 @@ public class HarFileSystem extends FileSystem {
   }
   
   static class Store {
-    public Store() {
-      begin = end = startHash = endHash = 0;
-    }
-    public Store(long begin, long end, int startHash, int endHash) {
+    public Store(long begin, long end) {
       this.begin = begin;
       this.end = end;
-      this.startHash = startHash;
-      this.endHash = endHash;
     }
     public long begin;
     public long end;
-    public int startHash;
-    public int endHash;
   }
   
   /**
@@ -594,7 +582,7 @@ public class HarFileSystem extends FileSystem {
     public HarStatus(String harString) throws UnsupportedEncodingException {
       String[] splits = harString.split(" ");
       this.name = decodeFileName(splits[0]);
-      this.isDir = "dir".equals(splits[1]) ? true: false;
+      this.isDir = "dir".equals(splits[1]);
       // this is equal to "none" if its a directory
       this.partName = splits[2];
       this.startIndex = Long.parseLong(splits[3]);
@@ -1167,11 +1155,8 @@ public class HarFileSystem extends FileSystem {
           int b = lin.readLine(line);
           read += b;
           readStr = line.toString().split(" ");
-          int startHash = Integer.parseInt(readStr[0]);
-          int endHash  = Integer.parseInt(readStr[1]);
           stores.add(new Store(Long.parseLong(readStr[2]), 
-              Long.parseLong(readStr[3]), startHash,
-              endHash));
+              Long.parseLong(readStr[3])));
           line.clear();
         }
       } catch (IOException ioe) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
index 88b4d4e..8f011ce 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
@@ -372,7 +372,7 @@ public class LocalDirAllocator {
         // Keep rolling the wheel till we get a valid path
         Random r = new java.util.Random();
         while (numDirsSearched < numDirs && returnPath == null) {
-          long randomPosition = Math.abs(r.nextLong()) % totalAvailable;
+          long randomPosition = (r.nextLong() >>> 1) % totalAvailable;
           int dir = 0;
           while (randomPosition > availableOnDisk[dir]) {
             randomPosition -= availableOnDisk[dir];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java
index 5918995..21f56ed 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java
@@ -143,13 +143,13 @@ public class MD5MD5CRC32FileChecksum extends FileChecksum {
       switch (finalCrcType) {
         case CRC32:
           return new MD5MD5CRC32GzipFileChecksum(
-              Integer.valueOf(bytesPerCRC),
-              Integer.valueOf(crcPerBlock),
+              Integer.parseInt(bytesPerCRC),
+              Integer.parseInt(crcPerBlock),
               new MD5Hash(md5));
         case CRC32C:
           return new MD5MD5CRC32CastagnoliFileChecksum(
-              Integer.valueOf(bytesPerCRC),
-              Integer.valueOf(crcPerBlock),
+              Integer.parseInt(bytesPerCRC),
+              Integer.parseInt(crcPerBlock),
               new MD5Hash(md5));
         default:
           // we should never get here since finalCrcType will

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
index 9d36bcf..47fb25c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
@@ -23,6 +23,7 @@ import java.io.InputStream;
 import java.net.ConnectException;
 import java.net.URI;
 
+import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.net.ftp.FTP;
@@ -101,17 +102,12 @@ public class FTPFileSystem extends FileSystem {
     if (userAndPassword == null) {
       userAndPassword = (conf.get("fs.ftp.user." + host, null) + ":" + conf
           .get("fs.ftp.password." + host, null));
-      if (userAndPassword == null) {
-        throw new IOException("Invalid user/passsword specified");
-      }
     }
     String[] userPasswdInfo = userAndPassword.split(":");
+    Preconditions.checkState(userPasswdInfo.length > 1,
+                             "Invalid username / password");
     conf.set(FS_FTP_USER_PREFIX + host, userPasswdInfo[0]);
-    if (userPasswdInfo.length > 1) {
-      conf.set(FS_FTP_PASSWORD_PREFIX + host, userPasswdInfo[1]);
-    } else {
-      conf.set(FS_FTP_PASSWORD_PREFIX + host, null);
-    }
+    conf.set(FS_FTP_PASSWORD_PREFIX + host, userPasswdInfo[1]);
     setConf(conf);
     this.uri = uri;
   }
@@ -293,7 +289,8 @@ public class FTPFileSystem extends FileSystem {
    */
   private boolean exists(FTPClient client, Path file) throws IOException {
     try {
-      return getFileStatus(client, file) != null;
+      getFileStatus(client, file);
+      return true;
     } catch (FileNotFoundException fnfe) {
       return false;
     }
@@ -333,10 +330,8 @@ public class FTPFileSystem extends FileSystem {
     if (dirEntries != null && dirEntries.length > 0 && !(recursive)) {
       throw new IOException("Directory: " + file + " is not empty.");
     }
-    if (dirEntries != null) {
-      for (int i = 0; i < dirEntries.length; i++) {
-        delete(client, new Path(absolute, dirEntries[i].getPath()), recursive);
-      }
+    for (FileStatus dirEntry : dirEntries) {
+      delete(client, new Path(absolute, dirEntry.getPath()), recursive);
     }
     return client.removeDirectory(pathName);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
index 6024d88..c7e80b6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
@@ -57,7 +57,7 @@ class Ls extends FsCommand {
 		  
   
 
-  protected static final SimpleDateFormat dateFormat = 
+  protected final SimpleDateFormat dateFormat =
     new SimpleDateFormat("yyyy-MM-dd HH:mm");
 
   protected int maxRepl = 3, maxLen = 10, maxOwner = 0, maxGroup = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
index ee56fe6..6365294 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
@@ -55,8 +55,8 @@ class Stat extends FsCommand {
     "in the specified format. Format accepts filesize in blocks (%b), group name of owner(%g), " +
     "filename (%n), block size (%o), replication (%r), user name of owner(%u), modification date (%y, %Y)\n";
 
-  protected static final SimpleDateFormat timeFmt;
-  static {
+  protected final SimpleDateFormat timeFmt;
+  {
     timeFmt = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
     timeFmt.setTimeZone(TimeZone.getTimeZone("UTC"));
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java
index 4cfdb08..9984cf2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java
@@ -83,6 +83,8 @@ class Test extends FsCommand {
       case 'z':
         test = (item.stat.getLen() == 0);
         break;
+      default:
+        break;
     }
     if (!test) exitCode = 1;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
index bd6366c..a6c4a42 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
@@ -168,12 +168,6 @@ public abstract class HAAdmin extends Configured implements Tool {
   private boolean isOtherTargetNodeActive(String targetNodeToActivate, boolean forceActive)
       throws IOException  {
     Collection<String> targetIds = getTargetIds(targetNodeToActivate);
-    if(targetIds == null) {
-      errOut.println("transitionToActive: No target node in the "
-          + "current configuration");
-      printUsage(errOut, "-transitionToActive");
-      return true;
-    }
     targetIds.remove(targetNodeToActivate);
     for(String targetId : targetIds) {
       HAServiceTarget target = resolveTarget(targetId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
index 0f54651..90eb915 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
@@ -310,6 +310,8 @@ public class SshFenceByTcpPort extends Configured
       case com.jcraft.jsch.Logger.FATAL:
         LOG.fatal(message);
         break;
+      default:
+        break;
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/LongWritable.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/LongWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/LongWritable.java
index 6dec4aa..b77ca67 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/LongWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/LongWritable.java
@@ -99,11 +99,11 @@ public class LongWritable implements WritableComparable<LongWritable> {
     
     @Override
     public int compare(WritableComparable a, WritableComparable b) {
-      return -super.compare(a, b);
+      return super.compare(b, a);
     }
     @Override
     public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
-      return -super.compare(b1, s1, l1, b2, s2, l2);
+      return super.compare(b2, s2, l2, b1, s1, l1);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
index 3dc5076..0bcaee3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
@@ -584,6 +584,8 @@ public class Text extends BinaryComparable
           state = TRAIL_BYTE;
         }
         break;
+      default:
+        break;
       } // switch (state)
       count++;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java
index 16e0ad7..6bee6b84 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DecompressorStream.java
@@ -40,7 +40,7 @@ public class DecompressorStream extends CompressionInputStream {
   throws IOException {
     super(in);
 
-    if (in == null || decompressor == null) {
+    if (decompressor == null) {
       throw new NullPointerException();
     } else if (bufferSize <= 0) {
       throw new IllegalArgumentException("Illegal bufferSize");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 021e035..a4d669a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -665,7 +665,7 @@ public abstract class Server {
         assert !running;
         readSelector.wakeup();
         try {
-          join();
+          super.join();
         } catch (InterruptedException ie) {
           Thread.currentThread().interrupt();
         }
@@ -1119,7 +1119,8 @@ public abstract class Server {
     private ByteBuffer data;
     private ByteBuffer dataLengthBuffer;
     private LinkedList<Call> responseQueue;
-    private volatile int rpcCount = 0; // number of outstanding rpcs
+    // number of outstanding rpcs
+    private AtomicInteger rpcCount = new AtomicInteger();
     private long lastContact;
     private int dataLength;
     private Socket socket;
@@ -1207,17 +1208,17 @@ public abstract class Server {
 
     /* Return true if the connection has no outstanding rpc */
     private boolean isIdle() {
-      return rpcCount == 0;
+      return rpcCount.get() == 0;
     }
     
     /* Decrement the outstanding RPC count */
     private void decRpcCount() {
-      rpcCount--;
+      rpcCount.decrementAndGet();
     }
     
     /* Increment the outstanding RPC count */
     private void incRpcCount() {
-      rpcCount++;
+      rpcCount.incrementAndGet();
     }
     
     private UserGroupInformation getAuthorizedUgi(String authorizedId)
@@ -2068,9 +2069,9 @@ public abstract class Server {
         LOG.debug("Ignoring socket shutdown exception", e);
       }
       if (channel.isOpen()) {
-        try {channel.close();} catch(Exception e) {}
+        IOUtils.cleanup(null, channel);
       }
-      try {socket.close();} catch(Exception e) {}
+      IOUtils.cleanup(null, socket);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext31.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext31.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext31.java
index 39509f0..f35ad18 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext31.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext31.java
@@ -86,11 +86,6 @@ public class GangliaContext31 extends GangliaContext {
       value + " from hostname" + hostName);
 
     String units = getUnits(name);
-    if (units == null) {
-      LOG.warn("Metric name " + name + ", value " + value
-        + " had 'null' units");
-      units = "";
-    }
     int slope = getSlope(name);
     int tmax = getTmax(name);
     int dmax = getDmax(name);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/CompositeContext.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/CompositeContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/CompositeContext.java
index 60f5fec..ad49404 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/CompositeContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/CompositeContext.java
@@ -55,7 +55,7 @@ public class CompositeContext extends AbstractMetricsContext {
     int nKids;
     try {
       String sKids = getAttribute(ARITY_LABEL);
-      nKids = Integer.valueOf(sKids);
+      nKids = Integer.parseInt(sKids);
     } catch (Exception e) {
       LOG.error("Unable to initialize composite metric " + contextName +
                 ": could not init arity", e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounterInt.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounterInt.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounterInt.java
index b0fb0d4..77139e2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounterInt.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounterInt.java
@@ -23,23 +23,24 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.metrics2.MetricsInfo;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 
+import java.util.concurrent.atomic.AtomicInteger;
+
 /**
  * A mutable int counter for implementing metrics sources
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class MutableCounterInt extends MutableCounter {
-  private volatile int value;
+  private AtomicInteger value = new AtomicInteger();
 
   MutableCounterInt(MetricsInfo info, int initValue) {
     super(info);
-    this.value = initValue;
+    this.value.set(initValue);
   }
 
   @Override
-  public synchronized void incr() {
-    ++value;
-    setChanged();
+  public void incr() {
+    incr(1);
   }
 
   /**
@@ -47,18 +48,18 @@ public class MutableCounterInt extends MutableCounter {
    * @param delta of the increment
    */
   public synchronized void incr(int delta) {
-    value += delta;
+    value.addAndGet(delta);
     setChanged();
   }
 
   public int value() {
-    return value;
+    return value.get();
   }
 
   @Override
   public void snapshot(MetricsRecordBuilder builder, boolean all) {
     if (all || changed()) {
-      builder.addCounter(info(), value);
+      builder.addCounter(info(), value());
       clearChanged();
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounterLong.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounterLong.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounterLong.java
index 43ea490..03a6043 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounterLong.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounterLong.java
@@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.metrics2.MetricsInfo;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 
+import java.util.concurrent.atomic.AtomicLong;
+
 /**
  * A mutable long counter
  */
@@ -30,36 +32,35 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 @InterfaceStability.Evolving
 public class MutableCounterLong extends MutableCounter {
 
-  private volatile long value;
+  private AtomicLong value = new AtomicLong();
 
   MutableCounterLong(MetricsInfo info, long initValue) {
     super(info);
-    this.value = initValue;
+    this.value.set(initValue);
   }
 
   @Override
-  public synchronized void incr() {
-    ++value;
-    setChanged();
+  public void incr() {
+    incr(1);
   }
 
   /**
    * Increment the value by a delta
    * @param delta of the increment
    */
-  public synchronized void incr(long delta) {
-    value += delta;
+  public void incr(long delta) {
+    value.addAndGet(delta);
     setChanged();
   }
 
   public long value() {
-    return value;
+    return value.get();
   }
 
   @Override
   public void snapshot(MetricsRecordBuilder builder, boolean all) {
     if (all || changed()) {
-      builder.addCounter(info(), value);
+      builder.addCounter(info(), value());
       clearChanged();
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeInt.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeInt.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeInt.java
index 1a3a15c..cce4528 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeInt.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeInt.java
@@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.metrics2.MetricsInfo;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 
+import java.util.concurrent.atomic.AtomicInteger;
+
 /**
  * A mutable int gauge
  */
@@ -30,44 +32,42 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 @InterfaceStability.Evolving
 public class MutableGaugeInt extends MutableGauge {
 
-  private volatile int value;
+  private AtomicInteger value = new AtomicInteger();
 
   MutableGaugeInt(MetricsInfo info, int initValue) {
     super(info);
-    this.value = initValue;
+    this.value.set(initValue);
   }
 
   public int value() {
-    return value;
+    return value.get();
   }
 
   @Override
-  public synchronized void incr() {
-    ++value;
-    setChanged();
+  public void incr() {
+    incr(1);
   }
 
   /**
    * Increment by delta
    * @param delta of the increment
    */
-  public synchronized void incr(int delta) {
-    value += delta;
+  public void incr(int delta) {
+    value.addAndGet(delta);
     setChanged();
   }
 
   @Override
-  public synchronized void decr() {
-    --value;
-    setChanged();
+  public void decr() {
+    decr(1);
   }
 
   /**
    * decrement by delta
    * @param delta of the decrement
    */
-  public synchronized void decr(int delta) {
-    value -= delta;
+  public void decr(int delta) {
+    value.addAndGet(-delta);
     setChanged();
   }
 
@@ -76,14 +76,14 @@ public class MutableGaugeInt extends MutableGauge {
    * @param value to set
    */
   public void set(int value) {
-    this.value = value;
+    this.value.set(value);
     setChanged();
   }
 
   @Override
   public void snapshot(MetricsRecordBuilder builder, boolean all) {
     if (all || changed()) {
-      builder.addGauge(info(), value);
+      builder.addGauge(info(), value());
       clearChanged();
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeLong.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeLong.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeLong.java
index 69df430..a2a8632 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeLong.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGaugeLong.java
@@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.metrics2.MetricsInfo;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 
+import java.util.concurrent.atomic.AtomicLong;
+
 /**
  * A mutable long gauge
  */
@@ -30,44 +32,42 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 @InterfaceStability.Evolving
 public class MutableGaugeLong extends MutableGauge {
 
-  private volatile long value;
+  private AtomicLong value = new AtomicLong();
 
   MutableGaugeLong(MetricsInfo info, long initValue) {
     super(info);
-    this.value = initValue;
+    this.value.set(initValue);
   }
 
   public long value() {
-    return value;
+    return value.get();
   }
 
   @Override
-  public synchronized void incr() {
-    ++value;
-    setChanged();
+  public void incr() {
+    incr(1);
   }
 
   /**
    * Increment by delta
    * @param delta of the increment
    */
-  public synchronized void incr(long delta) {
-    value += delta;
+  public void incr(long delta) {
+    value.addAndGet(delta);
     setChanged();
   }
 
   @Override
-  public synchronized void decr() {
-    --value;
-    setChanged();
+  public void decr() {
+    decr(1);
   }
 
   /**
    * decrement by delta
    * @param delta of the decrement
    */
-  public synchronized void decr(long delta) {
-    value -= delta;
+  public void decr(long delta) {
+    value.addAndGet(-delta);
     setChanged();
   }
 
@@ -76,14 +76,14 @@ public class MutableGaugeLong extends MutableGauge {
    * @param value to set
    */
   public void set(long value) {
-    this.value = value;
+    this.value.set(value);
     setChanged();
   }
 
   @Override
   public void snapshot(MetricsRecordBuilder builder, boolean all) {
     if (all || changed()) {
-      builder.addGauge(info(), value);
+      builder.addGauge(info(), value());
       clearChanged();
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
index b535dda..ffee402 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
@@ -287,7 +287,7 @@ public class NetUtils {
     if (fqHost == null) {
       try {
         fqHost = SecurityUtil.getByName(host).getHostName();
-        // slight race condition, but won't hurt 
+        // slight race condition, but won't hurt
         canonicalizedHostCache.put(host, fqHost);
       } catch (UnknownHostException e) {
         fqHost = host;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMappingWithDependency.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMappingWithDependency.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMappingWithDependency.java
index 8a0a003..086650b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMappingWithDependency.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMappingWithDependency.java
@@ -171,8 +171,7 @@ public class ScriptBasedMappingWithDependency  extends ScriptBasedMapping
 
     @Override
     public String toString() {
-      return super.toString() + ", " + dependencyScriptName != null ?
-          ("dependency script " + dependencyScriptName) : NO_SCRIPT;
+      return "dependency script " + dependencyScriptName;
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
index 76f5380..e72d988 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
@@ -367,10 +367,8 @@ public class LdapGroupsMapping
       return "";
     }
     
-    Reader reader = null;
-    try {
+    try (Reader reader = new FileReader(pwFile)) {
       StringBuilder password = new StringBuilder();
-      reader = new FileReader(pwFile);
       int c = reader.read();
       while (c > -1) {
         password.append((char)c);
@@ -379,8 +377,6 @@ public class LdapGroupsMapping
       return password.toString().trim();
     } catch (IOException ioe) {
       throw new RuntimeException("Could not read password file: " + pwFile, ioe);
-    } finally {
-      IOUtils.cleanup(LOG, reader);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java
index a57342f..ebe46d8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java
@@ -195,6 +195,8 @@ public class ComparableVersion
                     case 'm':
                         value = "milestone";
                         break;
+                    default:
+                        break;
                 }
             }
             this.value = ALIASES.getProperty( value , value );

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PrintJarMainClass.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PrintJarMainClass.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PrintJarMainClass.java
index efa4de3..df571f3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PrintJarMainClass.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PrintJarMainClass.java
@@ -34,16 +34,13 @@ public class PrintJarMainClass {
    * @param args
    */
   public static void main(String[] args) {
-    try {
-      JarFile jar_file = new JarFile(args[0]);
-      if (jar_file != null) {
-        Manifest manifest = jar_file.getManifest();
-        if (manifest != null) {
-          String value = manifest.getMainAttributes().getValue("Main-Class");
-          if (value != null) {
-            System.out.println(value.replaceAll("/", "."));
-            return;
-          }
+    try (JarFile jar_file = new JarFile(args[0])) {
+      Manifest manifest = jar_file.getManifest();
+      if (manifest != null) {
+        String value = manifest.getMainAttributes().getValue("Main-Class");
+        if (value != null) {
+          System.out.println(value.replaceAll("/", "."));
+          return;
         }
       }
     } catch (Throwable e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbd6a327/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServletUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServletUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServletUtil.java
index 6a8ca0f..2fd9b55 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServletUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServletUtil.java
@@ -70,14 +70,14 @@ public class ServletUtil {
       throw new IOException("Invalid request has no " + param + " parameter");
     }
     
-    return Long.valueOf(paramStr);
+    return Long.parseLong(paramStr);
   }
 
   public static final String HTML_TAIL = "<hr />\n"
-    + "<a href='http://hadoop.apache.org/core'>Hadoop</a>, " 
+    + "<a href='http://hadoop.apache.org/core'>Hadoop</a>, "
     + Calendar.getInstance().get(Calendar.YEAR) + ".\n"
     + "</body></html>";
-  
+
   /**
    * HTML footer to be added in the jsps.
    * @return the HTML footer.


[40/50] [abbrv] hadoop git commit: HDFS-7059. Avoid resolving path multiple times. Contributed by Jing Zhao.

Posted by ka...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index 1501fce..cfc7a24 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -89,18 +89,11 @@ public class INodesInPath {
     return buf.toString();
   }
 
-  static INodesInPath resolve(final INodeDirectory startingDir,
-      final byte[][] components) throws UnresolvedLinkException {
-    return resolve(startingDir, components, components.length, false);
-  }
-
   /**
-   * Retrieve existing INodes from a path. If existing is big enough to store
-   * all path components (existing and non-existing), then existing INodes
-   * will be stored starting from the root INode into existing[0]; if
-   * existing is not big enough to store all path components, then only the
-   * last existing and non existing INodes will be stored so that
-   * existing[existing.length-1] refers to the INode of the final component.
+   * Retrieve existing INodes from a path. For non-snapshot path,
+   * the number of INodes is equal to the number of path components. For
+   * snapshot path (e.g., /foo/.snapshot/s1/bar), the number of INodes is
+   * (number_of_path_components - 1).
    * 
    * An UnresolvedPathException is always thrown when an intermediate path 
    * component refers to a symbolic link. If the final path component refers 
@@ -110,56 +103,38 @@ public class INodesInPath {
    * <p>
    * Example: <br>
    * Given the path /c1/c2/c3 where only /c1/c2 exists, resulting in the
-   * following path components: ["","c1","c2","c3"],
-   * 
-   * <p>
-   * <code>getExistingPathINodes(["","c1","c2"], [?])</code> should fill the
-   * array with [c2] <br>
-   * <code>getExistingPathINodes(["","c1","c2","c3"], [?])</code> should fill the
-   * array with [null]
-   * 
-   * <p>
-   * <code>getExistingPathINodes(["","c1","c2"], [?,?])</code> should fill the
-   * array with [c1,c2] <br>
-   * <code>getExistingPathINodes(["","c1","c2","c3"], [?,?])</code> should fill
-   * the array with [c2,null]
+   * following path components: ["","c1","c2","c3"]
    * 
    * <p>
-   * <code>getExistingPathINodes(["","c1","c2"], [?,?,?,?])</code> should fill
-   * the array with [rootINode,c1,c2,null], <br>
-   * <code>getExistingPathINodes(["","c1","c2","c3"], [?,?,?,?])</code> should
+   * <code>getExistingPathINodes(["","c1","c2"])</code> should fill
+   * the array with [rootINode,c1,c2], <br>
+   * <code>getExistingPathINodes(["","c1","c2","c3"])</code> should
    * fill the array with [rootINode,c1,c2,null]
    * 
    * @param startingDir the starting directory
    * @param components array of path component name
-   * @param numOfINodes number of INodes to return
    * @param resolveLink indicates whether UnresolvedLinkException should
    *        be thrown when the path refers to a symbolic link.
    * @return the specified number of existing INodes in the path
    */
   static INodesInPath resolve(final INodeDirectory startingDir,
-      final byte[][] components, final int numOfINodes,
-      final boolean resolveLink) throws UnresolvedLinkException {
+      final byte[][] components, final boolean resolveLink)
+      throws UnresolvedLinkException {
     Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0);
 
     INode curNode = startingDir;
     int count = 0;
-    int index = numOfINodes <= components.length ?
-        numOfINodes - components.length : 0;
     int inodeNum = 0;
-    int capacity = numOfINodes;
-    INode[] inodes = new INode[numOfINodes];
+    INode[] inodes = new INode[components.length];
     boolean isSnapshot = false;
     int snapshotId = CURRENT_STATE_ID;
 
     while (count < components.length && curNode != null) {
-      final boolean lastComp = (count == components.length - 1);      
-      if (index >= 0) {
-        inodes[inodeNum++] = curNode;
-      }
+      final boolean lastComp = (count == components.length - 1);
+      inodes[inodeNum++] = curNode;
       final boolean isRef = curNode.isReference();
       final boolean isDir = curNode.isDirectory();
-      final INodeDirectory dir = isDir? curNode.asDirectory(): null;  
+      final INodeDirectory dir = isDir? curNode.asDirectory(): null;
       if (!isRef && isDir && dir.isWithSnapshot()) {
         //if the path is a non-snapshot path, update the latest snapshot.
         if (!isSnapshot && shouldUpdateLatestId(
@@ -217,11 +192,7 @@ public class INodesInPath {
       if (isDotSnapshotDir(childName) && dir.isSnapshottable()) {
         // skip the ".snapshot" in components
         count++;
-        index++;
         isSnapshot = true;
-        if (index >= 0) { // decrease the capacity by 1 to account for .snapshot
-          capacity--;
-        }
         // check if ".snapshot" is the last element of components
         if (count == components.length - 1) {
           break;
@@ -240,14 +211,12 @@ public class INodesInPath {
             isSnapshot ? snapshotId : CURRENT_STATE_ID);
       }
       count++;
-      index++;
     }
-    if (isSnapshot && capacity < numOfINodes &&
-        !isDotSnapshotDir(components[components.length - 1])) {
+    if (isSnapshot && !isDotSnapshotDir(components[components.length - 1])) {
       // for snapshot path shrink the inode array. however, for path ending with
       // .snapshot, still keep last the null inode in the array
-      INode[] newNodes = new INode[capacity];
-      System.arraycopy(inodes, 0, newNodes, 0, capacity);
+      INode[] newNodes = new INode[components.length - 1];
+      System.arraycopy(inodes, 0, newNodes, 0, newNodes.length);
       inodes = newNodes;
     }
     return new INodesInPath(inodes, components, isSnapshot, snapshotId);
@@ -277,6 +246,24 @@ public class INodesInPath {
     return new INodesInPath(inodes, iip.path, iip.isSnapshot, iip.snapshotId);
   }
 
+  /**
+   * Extend a given INodesInPath with a child INode. The child INode will be
+   * appended to the end of the new INodesInPath.
+   */
+  public static INodesInPath append(INodesInPath iip, INode child,
+      byte[] childName) {
+    Preconditions.checkArgument(!iip.isSnapshot && iip.length() > 0);
+    Preconditions.checkArgument(iip.getLastINode() != null && iip
+        .getLastINode().isDirectory());
+    INode[] inodes = new INode[iip.length() + 1];
+    System.arraycopy(iip.inodes, 0, inodes, 0, inodes.length - 1);
+    inodes[inodes.length - 1] = child;
+    byte[][] path = new byte[iip.path.length + 1][];
+    System.arraycopy(iip.path, 0, path, 0, path.length - 1);
+    path[path.length - 1] = childName;
+    return new INodesInPath(inodes, path, false, iip.snapshotId);
+  }
+
   private final byte[][] path;
   /**
    * Array with the specified number of INodes resolved for a given path.
@@ -348,6 +335,10 @@ public class INodesInPath {
     return path[path.length - 1];
   }
 
+  public byte[][] getPathComponents() {
+    return path;
+  }
+
   /** @return the full path in string form */
   public String getPath() {
     return DFSUtil.byteArray2PathString(path);
@@ -370,6 +361,56 @@ public class INodesInPath {
   }
 
   /**
+   * @param length number of ancestral INodes in the returned INodesInPath
+   *               instance
+   * @return the INodesInPath instance containing ancestral INodes
+   */
+  private INodesInPath getAncestorINodesInPath(int length) {
+    Preconditions.checkArgument(length >= 0 && length < inodes.length);
+    final INode[] anodes = new INode[length];
+    final byte[][] apath;
+    final boolean isSnapshot;
+    final int snapshotId;
+    int dotSnapshotIndex = getDotSnapshotIndex();
+    if (this.isSnapshot && length >= dotSnapshotIndex + 1) {
+      apath = new byte[length + 1][];
+      isSnapshot = true;
+      snapshotId = this.snapshotId;
+    } else {
+      apath = new byte[length][];
+      isSnapshot = false;
+      snapshotId = this.isSnapshot ? CURRENT_STATE_ID : this.snapshotId;
+    }
+    System.arraycopy(this.inodes, 0, anodes, 0, length);
+    System.arraycopy(this.path, 0, apath, 0, apath.length);
+    return new INodesInPath(anodes, apath, isSnapshot, snapshotId);
+  }
+
+  /**
+   * @return an INodesInPath instance containing all the INodes in the parent
+   *         path. We do a deep copy here.
+   */
+  public INodesInPath getParentINodesInPath() {
+    return inodes.length > 1 ? getAncestorINodesInPath(inodes.length - 1) :
+        null;
+  }
+
+  private int getDotSnapshotIndex() {
+    if (isSnapshot) {
+      for (int i = 0; i < path.length; i++) {
+        if (isDotSnapshotDir(path[i])) {
+          return i;
+        }
+      }
+      throw new IllegalStateException("The path " + getPath()
+          + " is a snapshot path but does not contain "
+          + HdfsConstants.DOT_SNAPSHOT_DIR);
+    } else {
+      return -1;
+    }
+  }
+
+  /**
    * @return isSnapshot true for a snapshot path
    */
   boolean isSnapshot() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index e13a5c6..f076215 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -116,7 +116,7 @@ public class LeaseManager {
         final INodeFile cons;
         try {
           cons = this.fsnamesystem.getFSDirectory().getINode(path).asFile();
-            Preconditions.checkState(cons.isUnderConstruction());
+          Preconditions.checkState(cons.isUnderConstruction());
         } catch (UnresolvedLinkException e) {
           throw new AssertionError("Lease files should reside on this FS");
         }
@@ -481,8 +481,10 @@ public class LeaseManager {
       leaseToCheck.getPaths().toArray(leasePaths);
       for(String p : leasePaths) {
         try {
+          INodesInPath iip = fsnamesystem.getFSDirectory().getINodesInPath(p,
+              true);
           boolean completed = fsnamesystem.internalReleaseLease(leaseToCheck, p,
-              HdfsServerConstants.NAMENODE_LEASE_HOLDER);
+              iip, HdfsServerConstants.NAMENODE_LEASE_HOLDER);
           if (LOG.isDebugEnabled()) {
             if (completed) {
               LOG.debug("Lease recovery for " + p + " is complete. File closed.");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
index eda0a28..b0275e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
@@ -827,8 +827,8 @@ public abstract class FSAclBaseTest {
     fs.setPermission(path,
       new FsPermissionExtension(FsPermission.
           createImmutable((short)0755), true, true));
-    INode inode = cluster.getNamesystem().getFSDirectory().getNode(
-      path.toUri().getPath(), false);
+    INode inode = cluster.getNamesystem().getFSDirectory().getINode(
+        path.toUri().getPath(), false);
     assertNotNull(inode);
     FsPermission perm = inode.getFsPermission();
     assertNotNull(perm);
@@ -1433,7 +1433,7 @@ public abstract class FSAclBaseTest {
   private static void assertAclFeature(Path pathToCheck,
       boolean expectAclFeature) throws IOException {
     INode inode = cluster.getNamesystem().getFSDirectory()
-      .getNode(pathToCheck.toUri().getPath(), false);
+      .getINode(pathToCheck.toUri().getPath(), false);
     assertNotNull(inode);
     AclFeature aclFeature = inode.getAclFeature();
     if (expectAclFeature) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index aecf55e..5450cf7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -711,8 +711,8 @@ public class TestFsck {
       DFSTestUtil.waitReplication(fs, filePath, (short)1);
       
       // intentionally corrupt NN data structure
-      INodeFile node = (INodeFile)cluster.getNamesystem().dir.getNode(
-          fileName, true);
+      INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode
+          (fileName, true);
       final BlockInfo[] blocks = node.getBlocks(); 
       assertEquals(blocks.length, 1);
       blocks[0].setNumBytes(-1L);  // set the block length to be negative

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
index 9b454ea..2f114a7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
@@ -62,9 +62,11 @@ public class TestLeaseManager {
    */
   @Test (timeout=1000)
   public void testCheckLeaseNotInfiniteLoop() {
+    FSDirectory dir = Mockito.mock(FSDirectory.class);
     FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
     Mockito.when(fsn.isRunning()).thenReturn(true);
     Mockito.when(fsn.hasWriteLock()).thenReturn(true);
+    Mockito.when(fsn.getFSDirectory()).thenReturn(dir);
     LeaseManager lm = new LeaseManager(fsn);
 
     //Make sure the leases we are going to add exceed the hard limit

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
index 354bff1..e416e00 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
@@ -141,7 +140,8 @@ public class TestSnapshotPathINodes {
     // Get the inodes by resolving the path of a normal file
     String[] names = INode.getPathNames(file1.toString());
     byte[][] components = INode.getPathComponents(names);
-    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+        components, false);
     // The number of inodes should be equal to components.length
     assertEquals(nodesInPath.length(), components.length);
     // The returned nodesInPath should be non-snapshot
@@ -157,20 +157,10 @@ public class TestSnapshotPathINodes {
     assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(),
         dir.toString());
     
-    // Call getExistingPathINodes and request only one INode. This is used
-    // when identifying the INode for a given path.
-    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 1, false);
-    assertEquals(nodesInPath.length(), 1);
-    assertSnapshot(nodesInPath, false, null, -1);
-    assertEquals(nodesInPath.getINode(0).getFullPathName(), file1.toString());
-    
-    // Call getExistingPathINodes and request 2 INodes. This is usually used
-    // when identifying the parent INode of a given path.
-    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 2, false);
-    assertEquals(nodesInPath.length(), 2);
+    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
+    assertEquals(nodesInPath.length(), components.length);
     assertSnapshot(nodesInPath, false, null, -1);
-    assertEquals(nodesInPath.getINode(1).getFullPathName(), file1.toString());
-    assertEquals(nodesInPath.getINode(0).getFullPathName(), sub1.toString());
+    assertEquals(nodesInPath.getLastINode().getFullPathName(), file1.toString());
   }
   
   /** 
@@ -187,7 +177,8 @@ public class TestSnapshotPathINodes {
     String snapshotPath = sub1.toString() + "/.snapshot/s1/file1";
     String[] names = INode.getPathNames(snapshotPath);
     byte[][] components = INode.getPathComponents(names);
-    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+        components, false);
     // Length of inodes should be (components.length - 1), since we will ignore
     // ".snapshot" 
     assertEquals(nodesInPath.length(), components.length - 1);
@@ -200,27 +191,17 @@ public class TestSnapshotPathINodes {
     assertTrue(snapshotFileNode.getParent().isWithSnapshot());
     
     // Call getExistingPathINodes and request only one INode.
-    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 1, false);
-    assertEquals(nodesInPath.length(), 1);
-    // The snapshotroot (s1) is not included in inodes. Thus the
-    // snapshotRootIndex should be -1.
-    assertSnapshot(nodesInPath, true, snapshot, -1);
+    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
+    assertEquals(nodesInPath.length(), components.length - 1);
+    assertSnapshot(nodesInPath, true, snapshot, 3);
     // Check the INode for file1 (snapshot file)
     assertINodeFile(nodesInPath.getLastINode(), file1);
-    
-    // Call getExistingPathINodes and request 2 INodes.
-    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 2, false);
-    assertEquals(nodesInPath.length(), 2);
-    // There should be two INodes in inodes: s1 and snapshot of file1. Thus the
-    // SnapshotRootIndex should be 0.
-    assertSnapshot(nodesInPath, true, snapshot, 0);
-    assertINodeFile(nodesInPath.getLastINode(), file1);
-    
+
     // Resolve the path "/TestSnapshot/sub1/.snapshot"  
     String dotSnapshotPath = sub1.toString() + "/.snapshot";
     names = INode.getPathNames(dotSnapshotPath);
     components = INode.getPathComponents(names);
-    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+    nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
     // The number of INodes returned should still be components.length
     // since we put a null in the inode array for ".snapshot"
     assertEquals(nodesInPath.length(), components.length);
@@ -267,7 +248,8 @@ public class TestSnapshotPathINodes {
       String snapshotPath = sub1.toString() + "/.snapshot/s2/file1";
       String[] names = INode.getPathNames(snapshotPath);
       byte[][] components = INode.getPathComponents(names);
-      INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+      INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+          components, false);
       // Length of inodes should be (components.length - 1), since we will ignore
       // ".snapshot" 
       assertEquals(nodesInPath.length(), components.length - 1);
@@ -284,7 +266,8 @@ public class TestSnapshotPathINodes {
     // Check the INodes for path /TestSnapshot/sub1/file1
     String[] names = INode.getPathNames(file1.toString());
     byte[][] components = INode.getPathComponents(names);
-    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+        components, false);
     // The length of inodes should be equal to components.length
     assertEquals(nodesInPath.length(), components.length);
     // The number of non-null elements should be components.length - 1 since
@@ -333,7 +316,8 @@ public class TestSnapshotPathINodes {
       String snapshotPath = sub1.toString() + "/.snapshot/s4/file3";
       String[] names = INode.getPathNames(snapshotPath);
       byte[][] components = INode.getPathComponents(names);
-      INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+      INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+          components, false);
       // Length of inodes should be (components.length - 1), since we will ignore
       // ".snapshot" 
       assertEquals(nodesInPath.length(), components.length - 1);
@@ -352,7 +336,8 @@ public class TestSnapshotPathINodes {
     // Check the inodes for /TestSnapshot/sub1/file3
     String[] names = INode.getPathNames(file3.toString());
     byte[][] components = INode.getPathComponents(names);
-    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+        components, false);
     // The number of inodes should be equal to components.length
     assertEquals(nodesInPath.length(), components.length);
 
@@ -378,7 +363,8 @@ public class TestSnapshotPathINodes {
     // First check the INode for /TestSnapshot/sub1/file1
     String[] names = INode.getPathNames(file1.toString());
     byte[][] components = INode.getPathComponents(names);
-    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+    INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
+        components, false);
     // The number of inodes should be equal to components.length
     assertEquals(nodesInPath.length(), components.length);
 
@@ -401,7 +387,8 @@ public class TestSnapshotPathINodes {
     String snapshotPath = sub1.toString() + "/.snapshot/s3/file1";
     names = INode.getPathNames(snapshotPath);
     components = INode.getPathComponents(names);
-    INodesInPath ssNodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+    INodesInPath ssNodesInPath = INodesInPath.resolve(fsdir.rootDir,
+        components, false);
     // Length of ssInodes should be (components.length - 1), since we will
     // ignore ".snapshot" 
     assertEquals(ssNodesInPath.length(), components.length - 1);
@@ -419,7 +406,8 @@ public class TestSnapshotPathINodes {
     // Check the INode for /TestSnapshot/sub1/file1 again
     names = INode.getPathNames(file1.toString());
     components = INode.getPathComponents(names);
-    INodesInPath newNodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
+    INodesInPath newNodesInPath = INodesInPath.resolve(fsdir.rootDir,
+        components, false);
     assertSnapshot(newNodesInPath, false, s3, -1);
     // The number of inodes should be equal to components.length
     assertEquals(newNodesInPath.length(), components.length);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
index 62041e8..ba318de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
-import org.apache.hadoop.security.AccessControlException;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -169,7 +168,7 @@ public class TestOpenFilesWithSnapshot {
   }
 
   private void doTestMultipleSnapshots(boolean saveNamespace)
-      throws IOException, AccessControlException {
+      throws IOException {
     Path path = new Path("/test");
     doWriteAndAbort(fs, path);
     fs.createSnapshot(path, "s2");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
index e1ca263..5264cb7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
@@ -40,7 +40,7 @@ import org.junit.Test;
 /**
  * This class tests the replication handling/calculation of snapshots. In
  * particular, {@link INodeFile#getFileReplication()} and
- * {@link INodeFileWithSnapshot#getBlockReplication()} are tested to make sure
+ * {@link INodeFile#getBlockReplication()} are tested to make sure
  * the number of replication is calculated correctly with/without snapshots.
  */
 public class TestSnapshotReplication {
@@ -82,7 +82,7 @@ public class TestSnapshotReplication {
    * Check the replication of a given file. We test both
    * {@link INodeFile#getFileReplication()} and
    * {@link INodeFile#getBlockReplication()}.
-   * 
+   *
    * @param file The given file
    * @param replication The expected replication number
    * @param blockReplication The expected replication number for the block
@@ -132,8 +132,7 @@ public class TestSnapshotReplication {
    *          as their expected replication number stored in their corresponding
    *          INodes
    * @param expectedBlockRep
-   *          The expected replication number that should be returned by
-   *          {@link INodeFileWithSnapshot#getBlockReplication()} of all the INodes
+   *          The expected replication number
    * @throws Exception
    */
   private void checkSnapshotFileReplication(Path currentFile,
@@ -143,8 +142,8 @@ public class TestSnapshotReplication {
     assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
     // Then check replication for every snapshot
     for (Path ss : snapshotRepMap.keySet()) {
-      final INodesInPath iip = fsdir.getLastINodeInPath(ss.toString());
-      final INodeFile ssInode = (INodeFile)iip.getLastINode();
+      final INodesInPath iip = fsdir.getINodesInPath(ss.toString(), true);
+      final INodeFile ssInode = iip.getLastINode().asFile();
       // The replication number derived from the
       // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
       assertEquals(expectedBlockRep, ssInode.getBlockReplication());


[11/50] [abbrv] hadoop git commit: HADOOP-11349. RawLocalFileSystem leaks file descriptor while creating a file if creat succeeds but chmod fails. (Varun Saxena via Colin P. McCabe)

Posted by ka...@apache.org.
HADOOP-11349. RawLocalFileSystem leaks file descriptor while creating a file if creat succeeds but chmod fails. (Varun Saxena via Colin P.  McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03867eb1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03867eb1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03867eb1

Branch: refs/heads/YARN-2139
Commit: 03867eb1bb173c66b5eb3bebf2fe03a1188635b5
Parents: a2e07a5
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Tue Dec 9 14:31:44 2014 -0800
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Tue Dec 9 14:31:44 2014 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  4 ++++
 .../apache/hadoop/fs/RawLocalFileSystem.java    | 25 ++++++++++++++++----
 2 files changed, 25 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03867eb1/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 40aab85..0019b3a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -552,6 +552,10 @@ Release 2.7.0 - UNRELEASED
 
     HADOOP-11378. Fix new findbugs warnings in hadoop-kms. (Li Lu via wheat9)
 
+    HADOOP-11349. RawLocalFileSystem leaks file descriptor while creating a
+    file if creat succeeds but chmod fails. (Varun Saxena via Colin P. McCabe)
+
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03867eb1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index b6b6f59..858789e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Shell;
@@ -295,8 +296,16 @@ public class RawLocalFileSystem extends FileSystem {
 
     FSDataOutputStream out = create(f,
         overwrite, bufferSize, replication, blockSize, progress);
-    setPermission(f, permission);
-    return out;
+    boolean success = false;
+    try {
+      setPermission(f, permission);
+      success = true;
+      return out;
+    } finally {
+      if (!success) {
+        IOUtils.cleanup(LOG, out);
+      }
+    }
   }
 
   @Override
@@ -306,8 +315,16 @@ public class RawLocalFileSystem extends FileSystem {
       Progressable progress) throws IOException {
     FSDataOutputStream out = create(f,
         overwrite, false, bufferSize, replication, blockSize, progress);
-    setPermission(f, permission);
-    return out;
+    boolean success = false;
+    try {
+      setPermission(f, permission);
+      success = true;
+      return out;
+    } finally {
+      if (!success) {
+        IOUtils.cleanup(LOG, out);
+      } 
+    }
   }
 
   @Override


[43/50] [abbrv] hadoop git commit: HDFS-7426. Change nntop JMX format to be a JSON blob.

Posted by ka...@apache.org.
HDFS-7426. Change nntop JMX format to be a JSON blob.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa7b9248
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa7b9248
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa7b9248

Branch: refs/heads/YARN-2139
Commit: fa7b9248e415c04bb555772f44fadaf8d9f34974
Parents: e5a6925
Author: Andrew Wang <wa...@apache.org>
Authored: Fri Dec 12 17:04:33 2014 -0800
Committer: Andrew Wang <wa...@apache.org>
Committed: Fri Dec 12 17:04:33 2014 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 .../hdfs/server/namenode/FSNamesystem.java      |  38 +++-
 .../namenode/metrics/FSNamesystemMBean.java     |   7 +
 .../server/namenode/top/TopAuditLogger.java     |  20 +-
 .../hdfs/server/namenode/top/TopConf.java       |  29 +--
 .../server/namenode/top/metrics/TopMetrics.java | 216 ++++--------------
 .../top/window/RollingWindowManager.java        | 223 +++++++++++++------
 .../server/namenode/TestFSNamesystemMBean.java  |   2 +
 .../server/namenode/TestNameNodeMXBean.java     | 116 ++++++++++
 .../namenode/metrics/TestNameNodeMetrics.java   |  59 -----
 .../top/window/TestRollingWindowManager.java    |  63 +++---
 11 files changed, 417 insertions(+), 358 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7b9248/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index eeedb0d..9dfecc1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -455,6 +455,8 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7509. Avoid resolving path multiple times. (jing9)
 
+    HDFS-7426. Change nntop JMX format to be a JSON blob. (wang)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7b9248/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index b4b897a..1ac19fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -120,6 +120,7 @@ import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.TreeMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.ReentrantLock;
@@ -241,6 +242,7 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
 import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
 import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
 import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics;
+import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager;
 import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -281,6 +283,7 @@ import org.apache.hadoop.util.VersionInfo;
 import org.apache.log4j.Appender;
 import org.apache.log4j.AsyncAppender;
 import org.apache.log4j.Logger;
+import org.codehaus.jackson.map.ObjectMapper;
 import org.mortbay.util.ajax.JSON;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -539,6 +542,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
   private final FSImage fsImage;
 
+  private final TopConf topConf;
+  private TopMetrics topMetrics;
+
   /**
    * Notify that loading of this FSDirectory is complete, and
    * it is imageLoaded for use
@@ -842,6 +848,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       this.snapshotManager = new SnapshotManager(dir);
       this.cacheManager = new CacheManager(this, conf, blockManager);
       this.safeMode = new SafeModeInfo(conf);
+      this.topConf = new TopConf(conf);
       this.auditLoggers = initAuditLoggers(conf);
       this.isDefaultAuditLogger = auditLoggers.size() == 1 &&
         auditLoggers.get(0) instanceof DefaultAuditLogger;
@@ -952,13 +959,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
 
     // Add audit logger to calculate top users
-    if (conf.getBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY,
-        DFSConfigKeys.NNTOP_ENABLED_DEFAULT)) {
-      String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
-      TopConf nntopConf = new TopConf(conf);
-      TopMetrics.initSingleton(conf, NamenodeRole.NAMENODE.name(), sessionId,
-          nntopConf.nntopReportingPeriodsMs);
-      auditLoggers.add(new TopAuditLogger());
+    if (topConf.isEnabled) {
+      topMetrics = new TopMetrics(conf, topConf.nntopReportingPeriodsMs);
+      auditLoggers.add(new TopAuditLogger(topMetrics));
     }
 
     return Collections.unmodifiableList(auditLoggers);
@@ -6013,6 +6016,27 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return getBlockManager().getDatanodeManager().getNumStaleStorages();
   }
 
+  @Override // FSNamesystemMBean
+  public String getTopUserOpCounts() {
+    if (!topConf.isEnabled) {
+      return null;
+    }
+
+    Date now = new Date();
+    final List<RollingWindowManager.TopWindow> topWindows =
+        topMetrics.getTopWindows();
+    Map<String, Object> topMap = new TreeMap<String, Object>();
+    topMap.put("windows", topWindows);
+    topMap.put("timestamp", DFSUtil.dateToIso8601String(now));
+    ObjectMapper mapper = new ObjectMapper();
+    try {
+      return mapper.writeValueAsString(topMap);
+    } catch (IOException e) {
+      LOG.warn("Failed to fetch TopUser metrics", e);
+    }
+    return null;
+  }
+
   /**
    * Increments, logs and then returns the stamp
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7b9248/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
index 708591b..86f4bd6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
@@ -164,4 +164,11 @@ public interface FSNamesystemMBean {
    */
   public int getNumStaleStorages();
 
+  /**
+   * Returns a nested JSON object listing the top users for different RPC 
+   * operations over tracked time windows.
+   * 
+   * @return JSON string
+   */
+  public String getTopUserOpCounts();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7b9248/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopAuditLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopAuditLogger.java
index 4f26b17..49c9153 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopAuditLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopAuditLogger.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode.top;
 
 import java.net.InetAddress;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -36,6 +37,14 @@ import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics;
 public class TopAuditLogger implements AuditLogger {
   public static final Logger LOG = LoggerFactory.getLogger(TopAuditLogger.class);
 
+  private final TopMetrics topMetrics;
+
+  public TopAuditLogger(TopMetrics topMetrics) {
+    Preconditions.checkNotNull(topMetrics, "Cannot init with a null " +
+        "TopMetrics");
+    this.topMetrics = topMetrics;
+  }
+
   @Override
   public void initialize(Configuration conf) {
   }
@@ -43,12 +52,11 @@ public class TopAuditLogger implements AuditLogger {
   @Override
   public void logAuditEvent(boolean succeeded, String userName,
       InetAddress addr, String cmd, String src, String dst, FileStatus status) {
-
-    TopMetrics instance = TopMetrics.getInstance();
-    if (instance != null) {
-      instance.report(succeeded, userName, addr, cmd, src, dst, status);
-    } else {
-      LOG.error("TopMetrics is not initialized yet!");
+    try {
+      topMetrics.report(succeeded, userName, addr, cmd, src, dst, status);
+    } catch (Throwable t) {
+      LOG.error("An error occurred while reflecting the event in top service, "
+          + "event: (cmd={},userName={})", cmd, userName);
     }
 
     if (LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7b9248/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopConf.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopConf.java
index 0f4ebac..ba82032 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopConf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopConf.java
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.top;
 
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.primitives.Ints;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -27,34 +30,34 @@ import com.google.common.base.Preconditions;
  */
 @InterfaceAudience.Private
 public final class TopConf {
-
-  public static final String TOP_METRICS_REGISTRATION_NAME = "topusers";
-  public static final String TOP_METRICS_RECORD_NAME = "topparam";
   /**
-   * A meta command representing the total number of commands
+   * Whether TopMetrics are enabled
    */
-  public static final String CMD_TOTAL = "total";
+  public final boolean isEnabled;
+
   /**
-   * A meta user representing all users
+   * A meta command representing the total number of calls to all commands
    */
-  public static String ALL_USERS = "ALL";
+  public static final String ALL_CMDS = "*";
 
   /**
    * nntop reporting periods in milliseconds
    */
-  public final long[] nntopReportingPeriodsMs;
+  public final int[] nntopReportingPeriodsMs;
 
   public TopConf(Configuration conf) {
+    isEnabled = conf.getBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY,
+        DFSConfigKeys.NNTOP_ENABLED_DEFAULT);
     String[] periodsStr = conf.getTrimmedStrings(
         DFSConfigKeys.NNTOP_WINDOWS_MINUTES_KEY,
         DFSConfigKeys.NNTOP_WINDOWS_MINUTES_DEFAULT);
-    nntopReportingPeriodsMs = new long[periodsStr.length];
+    nntopReportingPeriodsMs = new int[periodsStr.length];
     for (int i = 0; i < periodsStr.length; i++) {
-      nntopReportingPeriodsMs[i] = Integer.parseInt(periodsStr[i]) *
-          60L * 1000L; //min to ms
+      nntopReportingPeriodsMs[i] = Ints.checkedCast(
+          TimeUnit.MINUTES.toMillis(Integer.parseInt(periodsStr[i])));
     }
-    for (long aPeriodMs: nntopReportingPeriodsMs) {
-      Preconditions.checkArgument(aPeriodMs >= 60L * 1000L,
+    for (int aPeriodMs: nntopReportingPeriodsMs) {
+      Preconditions.checkArgument(aPeriodMs >= TimeUnit.MINUTES.toMillis(1),
           "minimum reporting period is 1 min!");
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7b9248/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
index e8a4e23..ab55392 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
@@ -17,67 +17,50 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.top.metrics;
 
-import static org.apache.hadoop.metrics2.impl.MsInfo.ProcessName;
-import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
-import static org.apache.hadoop.metrics2.lib.Interns.info;
-
 import java.net.InetAddress;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
 import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
-import org.apache.hadoop.metrics2.MetricsCollector;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager;
-import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.MetricValueMap;
+import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.TopWindow;
 
-/***
- * The interface to the top metrics
+/**
+ * The interface to the top metrics.
+ * <p/>
+ * Metrics are collected by a custom audit logger, {@link org.apache.hadoop
+ * .hdfs.server.namenode.top.TopAuditLogger}, which calls TopMetrics to
+ * increment per-operation, per-user counts on every audit log call. These
+ * counts are used to show the top users by NameNode operation as well as
+ * across all operations.
+ * <p/>
+ * TopMetrics maintains these counts for a configurable number of time
+ * intervals, e.g. 1min, 5min, 25min. Each interval is tracked by a
+ * RollingWindowManager.
  * <p/>
- * The producers use the {@link #report} method to report events and the
- * consumers use {@link #getMetrics(MetricsCollector, boolean)} to retrieve the
- * current top metrics. The default consumer is JMX but it could be any other
- * user interface.
+ * These metrics are published as a JSON string via {@link org.apache.hadoop
+ * .hdfs.server .namenode.metrics.FSNamesystemMBean#getTopWindows}. This is
+ * done by calling {@link org.apache.hadoop.hdfs.server.namenode.top.window
+ * .RollingWindowManager#snapshot} on each RollingWindowManager.
  * <p/>
  * Thread-safe: relies on thread-safety of RollingWindowManager
  */
 @InterfaceAudience.Private
-public class TopMetrics implements MetricsSource {
+public class TopMetrics {
   public static final Logger LOG = LoggerFactory.getLogger(TopMetrics.class);
 
-  enum Singleton {
-    INSTANCE;
-
-    volatile TopMetrics impl = null;
-
-    synchronized TopMetrics init(Configuration conf, String processName,
-        String sessionId, long[] reportingPeriods) {
-      if (impl == null) {
-        impl =
-            create(conf, processName, sessionId, reportingPeriods,
-                DefaultMetricsSystem.instance());
-      }
-      logConf(conf);
-      return impl;
-    }
-  }
-
   private static void logConf(Configuration conf) {
     LOG.info("NNTop conf: " + DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY +
         " = " +  conf.get(DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY));
@@ -88,127 +71,34 @@ public class TopMetrics implements MetricsSource {
   }
 
   /**
-   * Return only the shortest periods for default
-   * TODO: make it configurable
-   */
-  final boolean smallestOnlyDefault = true;
-
-  /**
-   * The smallest of reporting periods
-   */
-  long smallestPeriod = Long.MAX_VALUE;
-
-  /**
-   * processName and sessionId might later be leveraged later when we aggregate
-   * report from multiple federated name nodes
-   */
-  final String processName, sessionId;
-
-  /**
    * A map from reporting periods to WindowManager. Thread-safety is provided by
    * the fact that the mapping is not changed after construction.
    */
-  final Map<Long, RollingWindowManager> rollingWindowManagers =
-      new HashMap<Long, RollingWindowManager>();
+  final Map<Integer, RollingWindowManager> rollingWindowManagers =
+      new HashMap<Integer, RollingWindowManager>();
 
-  TopMetrics(Configuration conf, String processName, String sessionId,
-      long[] reportingPeriods) {
-    this.processName = processName;
-    this.sessionId = sessionId;
+  public TopMetrics(Configuration conf, int[] reportingPeriods) {
+    logConf(conf);
     for (int i = 0; i < reportingPeriods.length; i++) {
-      smallestPeriod = Math.min(smallestPeriod, reportingPeriods[i]);
       rollingWindowManagers.put(reportingPeriods[i], new RollingWindowManager(
           conf, reportingPeriods[i]));
     }
   }
 
-  public static TopMetrics create(Configuration conf, String processName,
-      String sessionId, long[] reportingPeriods, MetricsSystem ms) {
-    return ms.register(TopConf.TOP_METRICS_REGISTRATION_NAME,
-        "top metrics of the namenode in a last period of time", new TopMetrics(
-            conf, processName, sessionId, reportingPeriods));
-  }
-
-  public static TopMetrics initSingleton(Configuration conf,
-      String processName, String sessionId, long[] reportingPeriods) {
-    return Singleton.INSTANCE.init(conf, processName, sessionId,
-        reportingPeriods);
-  }
-
-  public static TopMetrics getInstance() {
-    TopMetrics topMetrics = Singleton.INSTANCE.impl;
-    Preconditions.checkArgument(topMetrics != null,
-          "The TopMetric singleton instance is not initialized."
-              + " Have you called initSingleton first?");
-    return topMetrics;
-  }
-
   /**
-   * In testing, the previous initialization should be reset if the entire
-   * metric system is reinitialized
+   * Get a list of the current TopWindow statistics, one TopWindow per tracked
+   * time interval.
    */
-  @VisibleForTesting
-  public static void reset() {
-    Singleton.INSTANCE.impl = null;
-  }
-
-  @Override
-  public void getMetrics(MetricsCollector collector, boolean all) {
-    long realTime = Time.monotonicNow();
-    getMetrics(smallestOnlyDefault, realTime, collector, all);
-  }
-
-  public void getMetrics(boolean smallestOnly, long currTime,
-      MetricsCollector collector, boolean all) {
-    for (Entry<Long, RollingWindowManager> entry : rollingWindowManagers
+  public List<TopWindow> getTopWindows() {
+    long monoTime = Time.monotonicNow();
+    List<TopWindow> windows = Lists.newArrayListWithCapacity
+        (rollingWindowManagers.size());
+    for (Entry<Integer, RollingWindowManager> entry : rollingWindowManagers
         .entrySet()) {
-      if (!smallestOnly || smallestPeriod == entry.getKey()) {
-        getMetrics(currTime, collector, entry.getKey(), entry.getValue(), all);
-      }
-    }
-  }
-
-  /**
-   * Get metrics for a particular recording period and its corresponding
-   * {@link RollingWindowManager}
-   * <p/>
-   *
-   * @param collector the metric collector
-   * @param period the reporting period
-   * @param rollingWindowManager the window manager corresponding to the
-   *          reporting period
-   * @param all currently ignored
-   */
-  void getMetrics(long currTime, MetricsCollector collector, Long period,
-      RollingWindowManager rollingWindowManager, boolean all) {
-    MetricsRecordBuilder rb =
-        collector.addRecord(createTopMetricsRecordName(period))
-            .setContext("namenode").tag(ProcessName, processName)
-            .tag(SessionId, sessionId);
-
-    MetricValueMap snapshotMetrics = rollingWindowManager.snapshot(currTime);
-    LOG.debug("calling snapshot, result size is: " + snapshotMetrics.size());
-    for (Map.Entry<String, Number> entry : snapshotMetrics.entrySet()) {
-      String key = entry.getKey();
-      Number value = entry.getValue();
-      LOG.debug("checking an entry: key: {} value: {}", key, value);
-      long min = period / 1000L / 60L; //ms -> min
-      String desc = "top user of name node in the past " + min + " minutes";
-
-      if (value instanceof Integer) {
-        rb.addGauge(info(key, desc), (Integer) value);
-      } else if (value instanceof Long) {
-        rb.addGauge(info(key, desc), (Long) value);
-      } else if (value instanceof Float) {
-        rb.addGauge(info(key, desc), (Float) value);
-      } else if (value instanceof Double) {
-        rb.addGauge(info(key, desc), (Double) value);
-      } else {
-        LOG.warn("Unsupported metric type: " + value.getClass());
-      }
+      TopWindow window = entry.getValue().snapshot(monoTime);
+      windows.add(window);
     }
-    LOG.debug("END iterating over metrics, result size is: {}",
-        snapshotMetrics.size());
+    return windows;
   }
 
   /**
@@ -216,18 +106,10 @@ public class TopMetrics implements MetricsSource {
    * log file. This is to be consistent when {@link TopMetrics} is charged with
    * data read back from log files instead of being invoked directly by the
    * FsNamesystem
-   *
-   * @param succeeded
-   * @param userName
-   * @param addr
-   * @param cmd
-   * @param src
-   * @param dst
-   * @param status
    */
   public void report(boolean succeeded, String userName, InetAddress addr,
       String cmd, String src, String dst, FileStatus status) {
-    //currently we nntop makes use of only the username and the command
+    // currently nntop only makes use of the username and the command
     report(userName, cmd);
   }
 
@@ -239,27 +121,11 @@ public class TopMetrics implements MetricsSource {
   public void report(long currTime, String userName, String cmd) {
     LOG.debug("a metric is reported: cmd: {} user: {}", cmd, userName);
     userName = UserGroupInformation.trimLoginMethod(userName);
-    try {
-      for (RollingWindowManager rollingWindowManager : rollingWindowManagers
-          .values()) {
-        rollingWindowManager.recordMetric(currTime, cmd, userName, 1);
-        rollingWindowManager.recordMetric(currTime,
-            TopConf.CMD_TOTAL, userName, 1);
-      }
-    } catch (Throwable t) {
-      LOG.error("An error occurred while reflecting the event in top service, "
-          + "event: (time,cmd,userName)=(" + currTime + "," + cmd + ","
-          + userName);
+    for (RollingWindowManager rollingWindowManager : rollingWindowManagers
+        .values()) {
+      rollingWindowManager.recordMetric(currTime, cmd, userName, 1);
+      rollingWindowManager.recordMetric(currTime,
+          TopConf.ALL_CMDS, userName, 1);
     }
   }
-
-  /***
-   *
-   * @param period the reporting period length in ms
-   * @return
-   */
-  public static String createTopMetricsRecordName(Long period) {
-    return TopConf.TOP_METRICS_RECORD_NAME + "-" + period;
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7b9248/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
index d818cce..00e7087 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
@@ -17,21 +17,22 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.top.window;
 
-import java.util.HashMap;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.PriorityQueue;
 import java.util.Set;
+import java.util.Stack;
 import java.util.concurrent.ConcurrentHashMap;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.primitives.Ints;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.conf.Configuration;
 
 /**
  * A class to manage the set of {@link RollingWindow}s. This class is the
@@ -46,25 +47,93 @@ public class RollingWindowManager {
   public static final Logger LOG = LoggerFactory.getLogger(
       RollingWindowManager.class);
 
-  private int windowLenMs;
-  private int bucketsPerWindow; // e.g., 10 buckets per minute
-  private int topUsersCnt; // e.g., report top 10 metrics
+  private final int windowLenMs;
+  private final int bucketsPerWindow; // e.g., 10 buckets per minute
+  private final int topUsersCnt; // e.g., report top 10 metrics
+
+  static private class RollingWindowMap extends
+      ConcurrentHashMap<String, RollingWindow> {
+    private static final long serialVersionUID = -6785807073237052051L;
+  }
 
   /**
-   * Create a metric name composed of the command and user
-   *
-   * @param command the command executed
-   * @param user    the user
-   * @return a composed metric name
+   * Represents a snapshot of the rolling window. It contains one Op per 
+   * operation in the window, with ranked users for each Op.
    */
-  @VisibleForTesting
-  public static String createMetricName(String command, String user) {
-    return command + "." + user;
+  public static class TopWindow {
+    private final int windowMillis;
+    private final List<Op> top;
+
+    public TopWindow(int windowMillis) {
+      this.windowMillis = windowMillis;
+      this.top = Lists.newArrayList();
+    }
+
+    public void addOp(Op op) {
+      top.add(op);
+    }
+
+    public int getWindowLenMs() {
+      return windowMillis;
+    }
+
+    public List<Op> getOps() {
+      return top;
+    }
   }
 
-  static private class RollingWindowMap extends
-      ConcurrentHashMap<String, RollingWindow> {
-    private static final long serialVersionUID = -6785807073237052051L;
+  /**
+   * Represents an operation within a TopWindow. It contains a ranked 
+   * set of the top users for the operation.
+   */
+  public static class Op {
+    private final String opType;
+    private final List<User> topUsers;
+    private final long totalCount;
+
+    public Op(String opType, long totalCount) {
+      this.opType = opType;
+      this.topUsers = Lists.newArrayList();
+      this.totalCount = totalCount;
+    }
+
+    public void addUser(User u) {
+      topUsers.add(u);
+    }
+
+    public String getOpType() {
+      return opType;
+    }
+
+    public List<User> getTopUsers() {
+      return topUsers;
+    }
+
+    public long getTotalCount() {
+      return totalCount;
+    }
+  }
+
+  /**
+   * Represents a user who called an Op within a TopWindow. Specifies the 
+   * user and the number of times the user called the operation.
+   */
+  public static class User {
+    private final String user;
+    private final long count;
+
+    public User(String user, long count) {
+      this.user = user;
+      this.count = count;
+    }
+
+    public String getUser() {
+      return user;
+    }
+
+    public long getCount() {
+      return count;
+    }
   }
 
   /**
@@ -75,8 +144,9 @@ public class RollingWindowManager {
   public ConcurrentHashMap<String, RollingWindowMap> metricMap =
       new ConcurrentHashMap<String, RollingWindowMap>();
 
-  public RollingWindowManager(Configuration conf, long reportingPeriodMs) {
-    windowLenMs = (int) reportingPeriodMs;
+  public RollingWindowManager(Configuration conf, int reportingPeriodMs) {
+    
+    windowLenMs = reportingPeriodMs;
     bucketsPerWindow =
         conf.getInt(DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY,
             DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_DEFAULT);
@@ -112,53 +182,71 @@ public class RollingWindowManager {
    * Take a snapshot of current top users in the past period.
    *
    * @param time the current time
-   * @return a map between the top metrics and their values. The user is encoded
-   * in the metric name. Refer to {@link RollingWindowManager#createMetricName} for
-   * the actual format.
+   * @return a TopWindow describing the top users for each metric in the 
+   * window.
    */
-  public MetricValueMap snapshot(long time) {
-    MetricValueMap map = new MetricValueMap();
-    Set<String> metricNames = metricMap.keySet();
-    LOG.debug("iterating in reported metrics, size={} values={}",
-        metricNames.size(), metricNames);
-    for (Map.Entry<String,RollingWindowMap> rwEntry: metricMap.entrySet()) {
-      String metricName = rwEntry.getKey();
-      RollingWindowMap rollingWindows = rwEntry.getValue();
-      TopN topN = new TopN(topUsersCnt);
-      Iterator<Map.Entry<String, RollingWindow>> iterator =
-          rollingWindows.entrySet().iterator();
-      while (iterator.hasNext()) {
-        Map.Entry<String, RollingWindow> entry = iterator.next();
-        String userName = entry.getKey();
-        RollingWindow aWindow = entry.getValue();
-        long windowSum = aWindow.getSum(time);
-        // do the gc here
-        if (windowSum == 0) {
-          LOG.debug("gc window of metric: {} userName: {}",
-              metricName, userName);
-          iterator.remove();
-          continue;
-        }
-        LOG.debug("offer window of metric: {} userName: {} sum: {}",
-            metricName, userName, windowSum);
-        topN.offer(new NameValuePair(userName, windowSum));
-      }
-      int n = topN.size();
-      LOG.info("topN size for command " + metricName + " is: " + n);
-      if (n == 0) {
+  public TopWindow snapshot(long time) {
+    TopWindow window = new TopWindow(windowLenMs);
+    if (LOG.isDebugEnabled()) {
+      Set<String> metricNames = metricMap.keySet();
+      LOG.debug("iterating in reported metrics, size={} values={}",
+          metricNames.size(), metricNames);
+    }
+    for (Map.Entry<String, RollingWindowMap> entry : metricMap.entrySet()) {
+      String metricName = entry.getKey();
+      RollingWindowMap rollingWindows = entry.getValue();
+      TopN topN = getTopUsersForMetric(time, metricName, rollingWindows);
+      final int size = topN.size();
+      if (size == 0) {
         continue;
       }
-      String allMetricName =
-          createMetricName(metricName, TopConf.ALL_USERS);
-      map.put(allMetricName, Long.valueOf(topN.total));
-      for (int i = 0; i < n; i++) {
-        NameValuePair userEntry = topN.poll();
-        String userMetricName =
-            createMetricName(metricName, userEntry.name);
-        map.put(userMetricName, Long.valueOf(userEntry.value));
+      Op op = new Op(metricName, topN.getTotal());
+      window.addOp(op);
+      // Reverse the users from the TopUsers using a stack, 
+      // since we'd like them sorted in descending rather than ascending order
+      Stack<NameValuePair> reverse = new Stack<NameValuePair>();
+      for (int i = 0; i < size; i++) {
+        reverse.push(topN.poll());
       }
+      for (int i = 0; i < size; i++) {
+        NameValuePair userEntry = reverse.pop();
+        User user = new User(userEntry.name, Long.valueOf(userEntry.value));
+        op.addUser(user);
+      }
+    }
+    return window;
+  }
+
+  /**
+   * Calculates the top N users over a time interval.
+   * 
+   * @param time the current time
+   * @param metricName Name of metric
+   * @return
+   */
+  private TopN getTopUsersForMetric(long time, String metricName, 
+      RollingWindowMap rollingWindows) {
+    TopN topN = new TopN(topUsersCnt);
+    Iterator<Map.Entry<String, RollingWindow>> iterator =
+        rollingWindows.entrySet().iterator();
+    while (iterator.hasNext()) {
+      Map.Entry<String, RollingWindow> entry = iterator.next();
+      String userName = entry.getKey();
+      RollingWindow aWindow = entry.getValue();
+      long windowSum = aWindow.getSum(time);
+      // do the gc here
+      if (windowSum == 0) {
+        LOG.debug("gc window of metric: {} userName: {}",
+            metricName, userName);
+        iterator.remove();
+        continue;
+      }
+      LOG.debug("offer window of metric: {} userName: {} sum: {}",
+          metricName, userName, windowSum);
+      topN.offer(new NameValuePair(userName, windowSum));
     }
-    return map;
+    LOG.info("topN size for command {} is: {}", metricName, topN.size());
+    return topN;
   }
 
   /**
@@ -190,7 +278,8 @@ public class RollingWindowManager {
   }
 
   /**
-   * A pair of a name and its corresponding value
+   * A pair of a name and its corresponding value. Defines a custom 
+   * comparator so the TopN PriorityQueue sorts based on the count.
    */
   static private class NameValuePair implements Comparable<NameValuePair> {
     String name;
@@ -254,12 +343,4 @@ public class RollingWindowManager {
       return total;
     }
   }
-
-  /**
-   * A mapping from metric names to their absolute values and their percentage
-   */
-  @InterfaceAudience.Private
-  public static class MetricValueMap extends HashMap<String, Number> {
-    private static final long serialVersionUID = 8936732010242400171L;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7b9248/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
index 39e1165..3703c2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
@@ -96,6 +96,8 @@ public class TestFSNamesystemMBean {
             "MaxObjects"));
         Integer numStaleStorages = (Integer) (mbs.getAttribute(
             mxbeanNameFsns, "NumStaleStorages"));
+        String topUsers =
+            (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
 
         // Metrics that belong to "NameNodeInfo".
         // These are metrics that FSNamesystem registers directly with MBeanServer.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7b9248/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index 03ade90..c649621 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -26,9 +26,12 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
+import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
 import org.apache.hadoop.util.VersionInfo;
+import org.codehaus.jackson.map.ObjectMapper;
 import org.junit.Test;
 import org.mortbay.util.ajax.JSON;
 
@@ -38,10 +41,15 @@ import java.io.File;
 import java.lang.management.ManagementFactory;
 import java.net.URI;
 import java.util.Collection;
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
+import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.Op;
+import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.TopWindow;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 /**
@@ -257,4 +265,112 @@ public class TestNameNodeMXBean {
       }
     }
   }
+
+  @Test(timeout=120000)
+  @SuppressWarnings("unchecked")
+  public void testTopUsers() throws Exception {
+    final Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+      cluster.waitActive();
+      MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+      ObjectName mxbeanNameFsns = new ObjectName(
+          "Hadoop:service=NameNode,name=FSNamesystemState");
+      FileSystem fs = cluster.getFileSystem();
+      final Path path = new Path("/");
+      final int NUM_OPS = 10;
+      for (int i=0; i< NUM_OPS; i++) {
+        fs.listStatus(path);
+        fs.setTimes(path, 0, 1);
+      }
+      String topUsers =
+          (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
+      ObjectMapper mapper = new ObjectMapper();
+      Map<String, Object> map = mapper.readValue(topUsers, Map.class);
+      assertTrue("Could not find map key timestamp", 
+          map.containsKey("timestamp"));
+      assertTrue("Could not find map key windows", map.containsKey("windows"));
+      List<Map<String, List<Map<String, Object>>>> windows =
+          (List<Map<String, List<Map<String, Object>>>>) map.get("windows");
+      assertEquals("Unexpected num windows", 3, windows.size());
+      for (Map<String, List<Map<String, Object>>> window : windows) {
+        final List<Map<String, Object>> ops = window.get("ops");
+        assertEquals("Unexpected num ops", 3, ops.size());
+        for (Map<String, Object> op: ops) {
+          final long count = Long.parseLong(op.get("totalCount").toString());
+          final String opType = op.get("opType").toString();
+          final int expected;
+          if (opType.equals(TopConf.ALL_CMDS)) {
+            expected = 2*NUM_OPS;
+          } else {
+            expected = NUM_OPS;
+          }
+          assertEquals("Unexpected total count", expected, count);
+        }
+      }
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  @Test(timeout=120000)
+  public void testTopUsersDisabled() throws Exception {
+    final Configuration conf = new Configuration();
+    // Disable nntop
+    conf.setBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY, false);
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+      cluster.waitActive();
+      MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+      ObjectName mxbeanNameFsns = new ObjectName(
+          "Hadoop:service=NameNode,name=FSNamesystemState");
+      FileSystem fs = cluster.getFileSystem();
+      final Path path = new Path("/");
+      final int NUM_OPS = 10;
+      for (int i=0; i< NUM_OPS; i++) {
+        fs.listStatus(path);
+        fs.setTimes(path, 0, 1);
+      }
+      String topUsers =
+          (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
+      assertNull("Did not expect to find TopUserOpCounts bean!", topUsers);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  @Test(timeout=120000)
+  public void testTopUsersNoPeriods() throws Exception {
+    final Configuration conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY, true);
+    conf.set(DFSConfigKeys.NNTOP_WINDOWS_MINUTES_KEY, "");
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+      cluster.waitActive();
+      MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+      ObjectName mxbeanNameFsns = new ObjectName(
+          "Hadoop:service=NameNode,name=FSNamesystemState");
+      FileSystem fs = cluster.getFileSystem();
+      final Path path = new Path("/");
+      final int NUM_OPS = 10;
+      for (int i=0; i< NUM_OPS; i++) {
+        fs.listStatus(path);
+        fs.setTimes(path, 0, 1);
+      }
+      String topUsers =
+          (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
+      assertNotNull("Expected TopUserOpCounts bean!", topUsers);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7b9248/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index c028a4a..6c37822 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -47,10 +47,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
-import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics;
 import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
-import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsSource;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -58,7 +55,6 @@ import org.apache.hadoop.test.MetricsAsserts;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -93,11 +89,6 @@ public class TestNameNodeMetrics {
     CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
     ((Log4JLogger)LogFactory.getLog(MetricsAsserts.class))
       .getLogger().setLevel(Level.DEBUG);
-    /**
-     * need it to test {@link #testTopAuditLogger}
-     */
-    CONF.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
-        TopAuditLogger.class.getName());
   }
   
   private MiniDFSCluster cluster;
@@ -112,7 +103,6 @@ public class TestNameNodeMetrics {
   
   @Before
   public void setUp() throws Exception {
-    TopMetrics.reset();//reset the static init done by prev test
     cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(DATANODE_COUNT).build();
     cluster.waitActive();
     namesystem = cluster.getNamesystem();
@@ -465,53 +455,4 @@ public class TestNameNodeMetrics {
     assertQuantileGauges("Syncs1s", rb);
     assertQuantileGauges("BlockReport1s", rb);
   }
-
-  /**
-   * Test whether {@link TopMetrics} is registered with metrics system
-   * @throws Exception
-   */
-  @Test
-  public void testTopMetrics() throws Exception {
-    final String testUser = "NNTopTestUser";
-    final String testOp = "NNTopTestOp";
-    final String metricName =
-        RollingWindowManager.createMetricName(testOp, testUser);
-    TopMetrics.getInstance().report(testUser, testOp);
-    final String regName = TopConf.TOP_METRICS_REGISTRATION_NAME;
-    MetricsRecordBuilder rb = getMetrics(regName);
-    assertGauge(metricName, 1L, rb);
-  }
-
-  /**
-   * Test whether {@link TopAuditLogger} is registered as an audit logger
-   * @throws Exception
-   */
-  @Test
-  public void testTopAuditLogger() throws Exception {
-    //note: the top audit logger should already be set in conf
-    //issue one command, any command is fine
-    FileSystem fs = cluster.getFileSystem();
-    long time = System.currentTimeMillis();
-    fs.setTimes(new Path("/"), time, time);
-    //the command should be reflected in the total count of all users
-    final String testUser = TopConf.ALL_USERS;
-    final String testOp = TopConf.CMD_TOTAL;
-    final String metricName =
-        RollingWindowManager.createMetricName(testOp, testUser);
-    final String regName = TopConf.TOP_METRICS_REGISTRATION_NAME;
-    MetricsRecordBuilder rb = getMetrics(regName);
-    assertGaugeGreaterThan(metricName, 1L, rb);
-  }
-
-  /**
-   * Assert a long gauge metric greater than
-   * @param name  of the metric
-   * @param expected  minimum expected value of the metric
-   * @param rb  the record builder mock used to getMetrics
-   */
-  public static void assertGaugeGreaterThan(String name, long expected,
-                                 MetricsRecordBuilder rb) {
-    Assert.assertTrue("Bad value for metric " + name,
-        expected <= MetricsAsserts.getLongGauge(name, rb));
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa7b9248/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/top/window/TestRollingWindowManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/top/window/TestRollingWindowManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/top/window/TestRollingWindowManager.java
index de21714..494ed08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/top/window/TestRollingWindowManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/top/window/TestRollingWindowManager.java
@@ -17,16 +17,19 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.top.window;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
 import org.junit.Before;
 import org.junit.Test;
 
-import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.MetricValueMap;
+import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.Op;
+import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.TopWindow;
+import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.User;
+import static org.junit.Assert.assertEquals;
 
 public class TestRollingWindowManager {
 
@@ -61,33 +64,39 @@ public class TestRollingWindowManager {
     for (int i = 0; i < users.length; i++)
       manager.recordMetric(time, "close", users[i], i + 1);
     time++;
-    MetricValueMap tops = manager.snapshot(time);
+    TopWindow tops = manager.snapshot(time);
 
-    assertEquals("The number of returned top metrics is invalid",
-        2 * (N_TOP_USERS + 1), tops.size());
-    int userIndex = users.length - 2;
-    String metricName = RollingWindowManager.createMetricName("open",
-        users[userIndex]);
-    boolean includes = tops.containsKey(metricName);
-    assertTrue("The order of entries in top metrics is wrong", includes);
-    assertEquals("The reported value by top is different from recorded one",
-        (userIndex + 1) * 2, ((Long) tops.get(metricName)).longValue());
+    assertEquals("Unexpected number of ops", 2, tops.getOps().size());
+    for (Op op : tops.getOps()) {
+      final List<User> topUsers = op.getTopUsers();
+      assertEquals("Unexpected number of users", N_TOP_USERS, topUsers.size());
+      if (op.getOpType() == "open") {
+        for (int i = 0; i < topUsers.size(); i++) {
+          User user = topUsers.get(i);
+          assertEquals("Unexpected count for user " + user.getUser(),
+              (users.length-i)*2, user.getCount());
+        }
+        // Closed form of sum(range(2,42,2))
+        assertEquals("Unexpected total count for op", 
+            (2+(users.length*2))*(users.length/2),
+            op.getTotalCount());
+      }
+    }
 
     // move the window forward not to see the "open" results
     time += WINDOW_LEN_MS - 2;
-    // top should not include only "close" results
     tops = manager.snapshot(time);
-    assertEquals("The number of returned top metrics is invalid",
-        N_TOP_USERS + 1, tops.size());
-    includes = tops.containsKey(metricName);
-    assertFalse("After rolling, the top list still includes the stale metrics",
-        includes);
-
-    metricName = RollingWindowManager.createMetricName("close",
-        users[userIndex]);
-    includes = tops.containsKey(metricName);
-    assertTrue("The order of entries in top metrics is wrong", includes);
-    assertEquals("The reported value by top is different from recorded one",
-        (userIndex + 1), ((Long) tops.get(metricName)).longValue());
+    assertEquals("Unexpected number of ops", 1, tops.getOps().size());
+    final Op op = tops.getOps().get(0);
+    assertEquals("Should only see close ops", "close", op.getOpType());
+    final List<User> topUsers = op.getTopUsers();
+    for (int i = 0; i < topUsers.size(); i++) {
+      User user = topUsers.get(i);
+      assertEquals("Unexpected count for user " + user.getUser(),
+          (users.length-i), user.getCount());
+    }
+    // Closed form of sum(range(1,21))
+    assertEquals("Unexpected total count for op",
+        (1 + users.length) * (users.length / 2), op.getTotalCount());
   }
 }


[12/50] [abbrv] hadoop git commit: YARN-2930. Fixed TestRMRestart#testRMRestartRecoveringNodeLabelManager intermittent failure. Contributed by Wangda Tan

Posted by ka...@apache.org.
YARN-2930. Fixed TestRMRestart#testRMRestartRecoveringNodeLabelManager intermittent failure. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ed90a57
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ed90a57
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ed90a57

Branch: refs/heads/YARN-2139
Commit: 2ed90a57fdd31d194b4a690df68b158ed9743dba
Parents: 03867eb
Author: Jian He <ji...@apache.org>
Authored: Tue Dec 9 16:47:24 2014 -0800
Committer: Jian He <ji...@apache.org>
Committed: Tue Dec 9 16:48:04 2014 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                       |  3 +++
 .../yarn/server/resourcemanager/TestRMRestart.java    | 14 ++++++++++++++
 2 files changed, 17 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ed90a57/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d87322f..0173782 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -206,6 +206,9 @@ Release 2.7.0 - UNRELEASED
     YARN-2910. FSLeafQueue can throw ConcurrentModificationException. 
     (Wilfred Spiegelenburg via kasha)
 
+    YARN-2930. Fixed TestRMRestart#testRMRestartRecoveringNodeLabelManager
+    intermittent failure. (Wangda Tan via jianhe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ed90a57/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
index 29f0208..fcb2be7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
@@ -39,6 +39,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.DataOutputBuffer;
@@ -2048,6 +2049,19 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
   // 4. Get cluster and node lobel, it should be present by recovering it
   @Test(timeout = 20000)
   public void testRMRestartRecoveringNodeLabelManager() throws Exception {
+    // Initial FS node label store root dir to a random tmp dir
+    File nodeLabelFsStoreDir =
+        new File("target", this.getClass().getSimpleName()
+            + "-testRMRestartRecoveringNodeLabelManager");
+    if (nodeLabelFsStoreDir.exists()) {
+      FileUtils.deleteDirectory(nodeLabelFsStoreDir);
+    }
+    nodeLabelFsStoreDir.deleteOnExit();
+    
+    String nodeLabelFsStoreDirURI = nodeLabelFsStoreDir.toURI().toString(); 
+    conf.set(YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR,
+        nodeLabelFsStoreDirURI);
+    
     MemoryRMStateStore memStore = new MemoryRMStateStore();
     memStore.init(conf);
     MockRM rm1 = new MockRM(conf, memStore) {


[18/50] [abbrv] hadoop git commit: HADOOP-11388. Remove deprecated o.a.h.metrics.file.FileContext. Contributed by Li Lu.

Posted by ka...@apache.org.
HADOOP-11388. Remove deprecated o.a.h.metrics.file.FileContext. Contributed by Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44870dcf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44870dcf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44870dcf

Branch: refs/heads/YARN-2139
Commit: 44870dcf41697de75bfb0389282c3087c875e420
Parents: bbd6a32
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Dec 10 12:46:42 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Wed Dec 10 12:46:42 2014 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../apache/hadoop/metrics/file/FileContext.java | 159 -------------------
 2 files changed, 3 insertions(+), 159 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44870dcf/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 53004ce..6242cee 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -560,6 +560,9 @@ Release 2.7.0 - UNRELEASED
 
     HADOOP-10482. Fix various findbugs warnings in hadoop-common. (wheat9)
 
+    HADOOP-11388. Remove deprecated o.a.h.metrics.file.FileContext.
+    (Li Lu via wheat9)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44870dcf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/file/FileContext.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/file/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/file/FileContext.java
deleted file mode 100644
index fcbe7c4..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/file/FileContext.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * FileContext.java
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.metrics.file;
-
-import java.io.BufferedOutputStream;
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.PrintWriter;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.metrics.ContextFactory;
-import org.apache.hadoop.metrics.spi.AbstractMetricsContext;
-import org.apache.hadoop.metrics.spi.OutputRecord;
-
-/**
- * Metrics context for writing metrics to a file.<p/>
- *
- * This class is configured by setting ContextFactory attributes which in turn
- * are usually configured through a properties file.  All the attributes are
- * prefixed by the contextName. For example, the properties file might contain:
- * <pre>
- * myContextName.fileName=/tmp/metrics.log
- * myContextName.period=5
- * </pre>
- * @see org.apache.hadoop.metrics2.sink.FileSink for metrics 2.0.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-@Deprecated
-public class FileContext extends AbstractMetricsContext {
-    
-  /* Configuration attribute names */
-  @InterfaceAudience.Private
-  protected static final String FILE_NAME_PROPERTY = "fileName";
-  @InterfaceAudience.Private
-  protected static final String PERIOD_PROPERTY = "period";
-    
-  private File file = null;              // file for metrics to be written to
-  private PrintWriter writer = null;
-    
-  /** Creates a new instance of FileContext */
-  @InterfaceAudience.Private
-  public FileContext() {}
-    
-  @Override
-  @InterfaceAudience.Private
-  public void init(String contextName, ContextFactory factory) {
-    super.init(contextName, factory);
-        
-    String fileName = getAttribute(FILE_NAME_PROPERTY);
-    if (fileName != null) {
-      file = new File(fileName);
-    }
-        
-    parseAndSetPeriod(PERIOD_PROPERTY);
-  }
-
-  /**
-   * Returns the configured file name, or null.
-   */
-  @InterfaceAudience.Private
-  public String getFileName() {
-    if (file == null) {
-      return null;
-    } else {
-      return file.getName();
-    }
-  }
-    
-  /**
-   * Starts or restarts monitoring, by opening in append-mode, the
-   * file specified by the <code>fileName</code> attribute,
-   * if specified. Otherwise the data will be written to standard
-   * output.
-   */
-  @Override
-  @InterfaceAudience.Private
-  public void startMonitoring()
-    throws IOException 
-  {
-    if (file == null) {
-      writer = new PrintWriter(new BufferedOutputStream(System.out));
-    } else {
-      writer = new PrintWriter(new FileWriter(file, true));
-    }
-    super.startMonitoring();
-  }
-    
-  /**
-   * Stops monitoring, closing the file.
-   * @see #close()
-   */
-  @Override
-  @InterfaceAudience.Private
-  public void stopMonitoring() {
-    super.stopMonitoring();
-        
-    if (writer != null) {
-      writer.close();
-      writer = null;
-    }
-  }
-    
-  /**
-   * Emits a metrics record to a file.
-   */
-  @Override
-  @InterfaceAudience.Private
-  public void emitRecord(String contextName, String recordName, OutputRecord outRec) {
-    writer.print(contextName);
-    writer.print(".");
-    writer.print(recordName);
-    String separator = ": ";
-    for (String tagName : outRec.getTagNames()) {
-      writer.print(separator);
-      separator = ", ";
-      writer.print(tagName);
-      writer.print("=");
-      writer.print(outRec.getTag(tagName));
-    }
-    for (String metricName : outRec.getMetricNames()) {
-      writer.print(separator);
-      separator = ", ";
-      writer.print(metricName);
-      writer.print("=");
-      writer.print(outRec.getMetric(metricName));
-    }
-    writer.println();
-  }
-    
-  /**
-   * Flushes the output writer, forcing updates to disk.
-   */
-  @Override
-  @InterfaceAudience.Private
-  public void flush() {
-    writer.flush();
-  }
-}


[47/50] [abbrv] hadoop git commit: MAPREDUCE-4879. TeraOutputFormat may overwrite an existing output directory. (gera)

Posted by ka...@apache.org.
MAPREDUCE-4879. TeraOutputFormat may overwrite an existing output directory. (gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25a04402
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25a04402
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25a04402

Branch: refs/heads/YARN-2139
Commit: 25a04402389dce0222938df4375d4545f8c1f34f
Parents: cbfb996
Author: Gera Shegalov <ge...@apache.org>
Authored: Sat Dec 13 17:48:42 2014 -0800
Committer: Gera Shegalov <ge...@apache.org>
Committed: Sat Dec 13 17:52:50 2014 -0800

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |  3 ++
 .../hadoop/examples/terasort/TeraGen.java       |  4 --
 .../examples/terasort/TeraOutputFormat.java     | 27 ++++++++++++-
 .../hadoop/examples/terasort/TestTeraSort.java  | 42 ++++++++++++++------
 4 files changed, 59 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25a04402/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index ee24857..a6475b1 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -273,6 +273,9 @@ Release 2.7.0 - UNRELEASED
     MAPREDUCE-6160. Potential NullPointerException in MRClientProtocol
     interface implementation. (Rohith via jlowe)
 
+    MAPREDUCE-4879. TeraOutputFormat may overwrite an existing output
+    directory. (gera)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25a04402/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
index 7e67934..e8b6503 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
@@ -289,10 +289,6 @@ public class TeraGen extends Configured implements Tool {
     }
     setNumberOfRows(job, parseHumanLong(args[0]));
     Path outputDir = new Path(args[1]);
-    if (outputDir.getFileSystem(getConf()).exists(outputDir)) {
-      throw new IOException("Output directory " + outputDir + 
-                            " already exists.");
-    }
     FileOutputFormat.setOutputPath(job, outputDir);
     job.setJobName("TeraGen");
     job.setJarByClass(TeraGen.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25a04402/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
index 872e719..867f33e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java
@@ -20,10 +20,13 @@ package org.apache.hadoop.examples.terasort;
 
 import java.io.IOException;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.FileAlreadyExistsException;
 import org.apache.hadoop.mapred.InvalidJobConfException;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.OutputCommitter;
@@ -87,9 +90,31 @@ public class TeraOutputFormat extends FileOutputFormat<Text,Text> {
       throw new InvalidJobConfException("Output directory not set in JobConf.");
     }
 
+    final Configuration jobConf = job.getConfiguration();
+
     // get delegation token for outDir's file system
     TokenCache.obtainTokensForNamenodes(job.getCredentials(),
-        new Path[] { outDir }, job.getConfiguration());
+        new Path[] { outDir }, jobConf);
+
+    final FileSystem fs = outDir.getFileSystem(jobConf);
+
+    if (fs.exists(outDir)) {
+      // existing output dir is considered empty iff its only content is the
+      // partition file.
+      //
+      final FileStatus[] outDirKids = fs.listStatus(outDir);
+      boolean empty = false;
+      if (outDirKids != null && outDirKids.length == 1) {
+        final FileStatus st = outDirKids[0];
+        final String fname = st.getPath().getName();
+        empty =
+          !st.isDirectory() && TeraInputFormat.PARTITION_FILENAME.equals(fname);
+      }
+      if (TeraSort.getUseSimplePartitioner(job) || !empty) {
+        throw new FileAlreadyExistsException("Output directory " + outDir
+            + " already exists");
+      }
+    }
   }
 
   public RecordWriter<Text,Text> getRecordWriter(TaskAttemptContext job

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25a04402/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java
index 4a11c9a..1956872 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/terasort/TestTeraSort.java
@@ -20,17 +20,19 @@ package org.apache.hadoop.examples.terasort;
 import java.io.File;
 import java.io.IOException;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.FileAlreadyExistsException;
 import org.apache.hadoop.mapred.HadoopTestCase;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.Ignore;
-@Ignore
 public class TestTeraSort extends HadoopTestCase {
+  private static Log LOG = LogFactory.getLog(TestTeraSort.class);
   
   public TestTeraSort()
       throws IOException {
-    super(CLUSTER_MR, DFS_FS, 1, 1);
+    super(LOCAL_MR, LOCAL_FS, 1, 1);
   }
 
   protected void tearDown() throws Exception {
@@ -45,42 +47,58 @@ public class TestTeraSort extends HadoopTestCase {
   private static final Path SORT_INPUT_PATH = new Path(TEST_DIR, "sortin");
   private static final Path SORT_OUTPUT_PATH = new Path(TEST_DIR, "sortout");
   private static final Path TERA_OUTPUT_PATH = new Path(TEST_DIR, "validate");
-  private static final String NUM_ROWS = "100"; 
+  private static final String NUM_ROWS = "100";
 
-  private void runTeraGen(Configuration conf, Path sortInput) 
+  private void runTeraGen(Configuration conf, Path sortInput)
       throws Exception {
     String[] genArgs = {NUM_ROWS, sortInput.toString()};
-    
+
     // Run TeraGen
     assertEquals(ToolRunner.run(conf, new TeraGen(), genArgs), 0);
   }
-  
+
   private void runTeraSort(Configuration conf,
       Path sortInput, Path sortOutput) throws Exception {
 
     // Setup command-line arguments to 'sort'
     String[] sortArgs = {sortInput.toString(), sortOutput.toString()};
-    
+
     // Run Sort
     assertEquals(ToolRunner.run(conf, new TeraSort(), sortArgs), 0);
   }
-  
-  private void runTeraValidator(Configuration job, 
-                                       Path sortOutput, Path valOutput) 
+
+  private void runTeraValidator(Configuration job,
+                                       Path sortOutput, Path valOutput)
   throws Exception {
     String[] svArgs = {sortOutput.toString(), valOutput.toString()};
 
     // Run Tera-Validator
     assertEquals(ToolRunner.run(job, new TeraValidate(), svArgs), 0);
   }
-  
+
   public void testTeraSort() throws Exception {
     // Run TeraGen to generate input for 'terasort'
     runTeraGen(createJobConf(), SORT_INPUT_PATH);
 
+    // Run teragen again to check for FAE
+    try {
+      runTeraGen(createJobConf(), SORT_INPUT_PATH);
+      fail("Teragen output overwritten!");
+    } catch (FileAlreadyExistsException fae) {
+      LOG.info("Expected exception: ", fae);
+    }
+
     // Run terasort
     runTeraSort(createJobConf(), SORT_INPUT_PATH, SORT_OUTPUT_PATH);
 
+    // Run terasort again to check for FAE
+    try {
+      runTeraSort(createJobConf(), SORT_INPUT_PATH, SORT_OUTPUT_PATH);
+      fail("Terasort output overwritten!");
+    } catch (FileAlreadyExistsException fae) {
+      LOG.info("Expected exception: ", fae);
+    }
+
     // Run tera-validator to check if sort worked correctly
     runTeraValidator(createJobConf(), SORT_OUTPUT_PATH,
       TERA_OUTPUT_PATH);


[39/50] [abbrv] hadoop git commit: HDFS-7514. TestTextCommand fails on Windows. (Arpit Agarwal)

Posted by ka...@apache.org.
HDFS-7514. TestTextCommand fails on Windows. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7784b108
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7784b108
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7784b108

Branch: refs/heads/YARN-2139
Commit: 7784b10808c2146cde8025d56e80f042ec3581c6
Parents: 46612c7
Author: arp <ar...@apache.org>
Authored: Fri Dec 12 14:27:50 2014 -0800
Committer: arp <ar...@apache.org>
Committed: Fri Dec 12 14:27:50 2014 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                       | 2 ++
 .../test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java | 3 +--
 2 files changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7784b108/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9cd5b05..d635400 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -582,6 +582,8 @@ Release 2.7.0 - UNRELEASED
     HDFS-7517. Remove redundant non-null checks in FSNamesystem#
     getBlockLocations. (wheat9)
 
+    HDFS-7514. TestTextCommand fails on Windows. (Arpit Agarwal)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7784b108/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java
index f589d7e..76c32bf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java
@@ -43,8 +43,7 @@ import org.junit.Test;
  * by the Text command.
  */
 public class TestHdfsTextCommand {
-  private static final String TEST_ROOT_DIR =
-    System.getProperty("test.build.data", "build/test/data/") + "/testText";
+  private static final String TEST_ROOT_DIR = "/test/data/testText";
   private static final Path AVRO_FILENAME = new Path(TEST_ROOT_DIR, "weather.avro");
   private static MiniDFSCluster cluster;
   private static FileSystem fs;


[44/50] [abbrv] hadoop git commit: YARN-2950. Change message to mandate, not suggest JS requirement on UI. Contributed by Dustin Cote.

Posted by ka...@apache.org.
YARN-2950. Change message to mandate, not suggest JS requirement on UI. Contributed by Dustin Cote.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e37bbc8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e37bbc8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e37bbc8

Branch: refs/heads/YARN-2139
Commit: 0e37bbc8e3f8e96acd96522face2f4bb01584cb4
Parents: fa7b924
Author: Harsh J <ha...@cloudera.com>
Authored: Sat Dec 13 07:10:11 2014 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Sat Dec 13 07:10:40 2014 +0530

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                   | 3 +++
 .../main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java    | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e37bbc8/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index cd0bf7c..af29b70 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -67,6 +67,9 @@ Release 2.7.0 - UNRELEASED
 
   IMPROVEMENTS
 
+    YARN-2950. Change message to mandate, not suggest JS requirement on UI.
+    (Dustin Cote via harsh)
+
     YARN-2891. Failed Container Executor does not provide a clear error
     message. (Dustin Cote via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e37bbc8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
index 7c311bc..6a64d1c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
@@ -91,7 +91,8 @@ public class JQueryUI extends HtmlBlock {
   public static void jsnotice(HTML html) {
     html.
       div("#jsnotice.ui-state-error").
-          _("This page works best with javascript enabled.")._();
+          _("This page will not function without javascript enabled."
+            + " Please enable javascript on your browser.")._();
     html.
       script().$type("text/javascript").
         _("$('#jsnotice').hide();")._();


[24/50] [abbrv] hadoop git commit: HDFS-7475. Make TestLazyPersistFiles#testLazyPersistBlocksAreSaved deterministic. (Contributed by Xiaoyu Yao)

Posted by ka...@apache.org.
HDFS-7475. Make TestLazyPersistFiles#testLazyPersistBlocksAreSaved deterministic. (Contributed by Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a44db48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a44db48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a44db48

Branch: refs/heads/YARN-2139
Commit: 9a44db48b4bfc097284f68a0576c058a0fd167bf
Parents: 92916ae
Author: arp <ar...@apache.org>
Authored: Wed Dec 10 18:24:22 2014 -0800
Committer: arp <ar...@apache.org>
Committed: Wed Dec 10 18:24:22 2014 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../fsdataset/impl/LazyPersistTestCase.java     | 44 ++++++++++++++++++++
 .../fsdataset/impl/TestLazyPersistFiles.java    | 32 +-------------
 3 files changed, 48 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a44db48/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1db358f..7b4e0c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -567,6 +567,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-5578. [JDK8] Fix Javadoc errors caused by incorrect or illegal tags
     in doc comments. (Andrew Purtell via wheat9)
 
+    HDFS-7475. Make TestLazyPersistFiles#testLazyPersistBlocksAreSaved
+    deterministic. (Xiaoyu Yao via Arpit Agarwal)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a44db48/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
index c762849..2de5bb7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
@@ -50,6 +50,8 @@ import java.io.IOException;
 import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.List;
+import java.util.HashSet;
+import java.util.Set;
 import java.util.UUID;
 
 import static org.apache.hadoop.fs.CreateFlag.CREATE;
@@ -131,6 +133,48 @@ public abstract class LazyPersistTestCase {
     return locatedBlocks;
   }
 
+  /**
+   * Make sure at least one non-transient volume has a saved copy of the replica.
+   * An infinite loop is used to ensure the async lazy persist tasks are completely
+   * done before verification. Caller of ensureLazyPersistBlocksAreSaved expects
+   * either a successful pass or timeout failure.
+   */
+  protected final void ensureLazyPersistBlocksAreSaved(
+      LocatedBlocks locatedBlocks) throws IOException, InterruptedException {
+    final String bpid = cluster.getNamesystem().getBlockPoolId();
+    List<? extends FsVolumeSpi> volumes =
+      cluster.getDataNodes().get(0).getFSDataset().getVolumes();
+    final Set<Long> persistedBlockIds = new HashSet<Long>();
+
+    while (persistedBlockIds.size() < locatedBlocks.getLocatedBlocks().size()) {
+      // Take 1 second sleep before each verification iteration
+      Thread.sleep(1000);
+
+      for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
+        for (FsVolumeSpi v : volumes) {
+          if (v.isTransientStorage()) {
+            continue;
+          }
+
+          FsVolumeImpl volume = (FsVolumeImpl) v;
+          File lazyPersistDir = volume.getBlockPoolSlice(bpid).getLazypersistDir();
+
+          long blockId = lb.getBlock().getBlockId();
+          File targetDir =
+            DatanodeUtil.idToBlockDir(lazyPersistDir, blockId);
+          File blockFile = new File(targetDir, lb.getBlock().getBlockName());
+          if (blockFile.exists()) {
+            // Found a persisted copy for this block and added to the Set
+            persistedBlockIds.add(blockId);
+          }
+        }
+      }
+    }
+
+    // We should have found a persisted copy for each located block.
+    assertThat(persistedBlockIds.size(), is(locatedBlocks.getLocatedBlocks().size()));
+  }
+
   protected final void makeRandomTestFile(Path path, long length,
       boolean isLazyPersist, long seed) throws IOException {
     DFSTestUtil.createFile(fs, path, isLazyPersist, BUFFER_LENGTH, length,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a44db48/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
index 771609c..49d3c6d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
@@ -304,37 +304,7 @@ public class TestLazyPersistFiles extends LazyPersistTestCase {
 
     // Make sure that there is a saved copy of the replica on persistent
     // storage.
-    final String bpid = cluster.getNamesystem().getBlockPoolId();
-    List<? extends FsVolumeSpi> volumes =
-        cluster.getDataNodes().get(0).getFSDataset().getVolumes();
-
-    final Set<Long> persistedBlockIds = new HashSet<Long>();
-
-    // Make sure at least one non-transient volume has a saved copy of
-    // the replica.
-    for (FsVolumeSpi v : volumes) {
-      if (v.isTransientStorage()) {
-        continue;
-      }
-
-      FsVolumeImpl volume = (FsVolumeImpl) v;
-      File lazyPersistDir = volume.getBlockPoolSlice(bpid).getLazypersistDir();
-
-      for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
-        File targetDir = DatanodeUtil.idToBlockDir(lazyPersistDir, lb.getBlock().getBlockId());
-        File blockFile = new File(targetDir, lb.getBlock().getBlockName());
-        if (blockFile.exists()) {
-          // Found a persisted copy for this block!
-          boolean added = persistedBlockIds.add(lb.getBlock().getBlockId());
-          assertThat(added, is(true));
-        } else {
-          LOG.error(blockFile + " not found");
-        }
-      }
-    }
-
-    // We should have found a persisted copy for each located block.
-    assertThat(persistedBlockIds.size(), is(locatedBlocks.getLocatedBlocks().size()));
+    ensureLazyPersistBlocksAreSaved(locatedBlocks);
   }
 
   /**


[09/50] [abbrv] hadoop git commit: HADOOP-11378. Fix new findbugs warnings in hadoop-kms. Contributed by Li Lu.

Posted by ka...@apache.org.
HADOOP-11378. Fix new findbugs warnings in hadoop-kms. Contributed by Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13406175
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13406175
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13406175

Branch: refs/heads/YARN-2139
Commit: 1340617535cd3300929aa29fa2a8b12afc464e89
Parents: 6df457a
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Dec 9 13:10:03 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Dec 9 13:10:03 2014 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                  | 2 ++
 .../org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java   | 4 +++-
 2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13406175/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index e6b44e9..40aab85 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -550,6 +550,8 @@ Release 2.7.0 - UNRELEASED
 
     HADOOP-11379. Fix new findbugs warnings in hadoop-auth*. (Li Lu via wheat9)
 
+    HADOOP-11378. Fix new findbugs warnings in hadoop-kms. (Li Lu via wheat9)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13406175/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java
index 3674e7a..31fac9f 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java
@@ -32,6 +32,7 @@ import java.io.OutputStreamWriter;
 import java.io.Writer;
 import java.lang.annotation.Annotation;
 import java.lang.reflect.Type;
+import java.nio.charset.Charset;
 import java.util.List;
 import java.util.Map;
 
@@ -62,7 +63,8 @@ public class KMSJSONWriter implements MessageBodyWriter<Object> {
       Annotation[] annotations, MediaType mediaType,
       MultivaluedMap<String, Object> stringObjectMultivaluedMap,
       OutputStream outputStream) throws IOException, WebApplicationException {
-    Writer writer = new OutputStreamWriter(outputStream);
+    Writer writer = new OutputStreamWriter(outputStream, Charset
+        .forName("UTF-8"));
     ObjectMapper jsonMapper = new ObjectMapper();
     jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, obj);
   }


[25/50] [abbrv] hadoop git commit: MAPREDUCE-5420. Remove mapreduce.task.tmp.dir from mapred-default.xml. Contributed by James Carman. (harsh)

Posted by ka...@apache.org.
MAPREDUCE-5420. Remove mapreduce.task.tmp.dir from mapred-default.xml. Contributed by James Carman. (harsh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb99f433
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb99f433
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb99f433

Branch: refs/heads/YARN-2139
Commit: cb99f43305bd1577d4ba9527d237ac6cdb9ae730
Parents: 9a44db4
Author: Harsh J <ha...@cloudera.com>
Authored: Mon Dec 8 17:34:39 2014 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Thu Dec 11 09:45:49 2014 +0530

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |  3 ++
 .../apache/hadoop/mapreduce/MRJobConfig.java    |  2 --
 .../hadoop/mapreduce/util/ConfigUtil.java       |  2 --
 .../src/main/resources/mapred-default.xml       | 12 --------
 .../resources/job_1329348432655_0001_conf.xml   |  1 -
 .../hadoop/mapred/TestMiniMRChildTask.java      | 31 +-------------------
 6 files changed, 4 insertions(+), 47 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb99f433/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index c757d40..bbab097 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -235,6 +235,9 @@ Release 2.7.0 - UNRELEASED
 
   IMPROVEMENTS
 
+    MAPREDUCE-5420. Remove mapreduce.task.tmp.dir from mapred-default.xml
+    (James Carman via harsh)
+
     MAPREDUCE-5932. Provide an option to use a dedicated reduce-side shuffle
     log (Gera Shegalov via jlowe)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb99f433/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 230361c..915353b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -166,8 +166,6 @@ public interface MRJobConfig {
 
   public static final String PRESERVE_FILES_PATTERN = "mapreduce.task.files.preserve.filepattern";
 
-  public static final String TASK_TEMP_DIR = "mapreduce.task.tmp.dir";
-
   public static final String TASK_DEBUGOUT_LINES = "mapreduce.task.debugout.lines";
 
   public static final String RECORDS_BEFORE_PROGRESS = "mapreduce.task.merge.progress.records";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb99f433/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
index 8c7952b..b1756ce 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
@@ -274,8 +274,6 @@ public class ConfigUtil {
         MRJobConfig.PRESERVE_FAILED_TASK_FILES),
       new DeprecationDelta("keep.task.files.pattern",
         MRJobConfig.PRESERVE_FILES_PATTERN),
-      new DeprecationDelta("mapred.child.tmp",
-        MRJobConfig.TASK_TEMP_DIR),
       new DeprecationDelta("mapred.debug.out.lines",
         MRJobConfig.TASK_DEBUGOUT_LINES),
       new DeprecationDelta("mapred.merge.recordsBeforeProgress",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb99f433/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 43ddb13..00a89c9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -320,18 +320,6 @@
 </property>
 
 <property>
-  <name>mapreduce.task.tmp.dir</name>
-  <value>./tmp</value>
-  <description> To set the value of tmp directory for map and reduce tasks.
-  If the value is an absolute path, it is directly assigned. Otherwise, it is
-  prepended with task's working directory. The java tasks are executed with
-  option -Djava.io.tmpdir='the absolute path of the tmp dir'. Pipes and
-  streaming are set with environment variable,
-   TMPDIR='the absolute path of the tmp dir'
-  </description>
-</property>
-
-<property>
   <name>mapreduce.map.log.level</name>
   <value>INFO</value>
   <description>The logging level for the map task. The allowed levels are:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb99f433/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
index bd9c9c5..d886e89 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml
@@ -166,7 +166,6 @@
 <property><!--Loaded from mapred-default.xml--><name>mapreduce.job.end-notification.max.attempts</name><value>5</value></property>
 <property><!--Loaded from job.xml--><name>mapreduce.jobhistory.max-age-ms</name><value>10000000000</value></property>
 <property><!--Loaded from job.xml--><name>yarn.resourcemanager.zookeeper-store.session.timeout-ms</name><value>60000</value></property>
-<property><!--Loaded from job.xml--><name>mapreduce.task.tmp.dir</name><value>./tmp</value></property>
 <property><!--Loaded from job.xml--><name>dfs.default.chunk.view.size</name><value>32768</value></property>
 <property><!--Loaded from job.xml--><name>kfs.bytes-per-checksum</name><value>512</value></property>
 <property><!--Loaded from job.xml--><name>mapreduce.reduce.memory.mb</name><value>512</value></property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb99f433/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java
index 17b5fd2..6dc1e29 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMiniMRChildTask.java
@@ -391,36 +391,7 @@ public class TestMiniMRChildTask {
       ioe.printStackTrace();           
     }
   }
-  
-  /**
-   * Tests task's temp directory.
-   * 
-   * In this test, we give different values to mapreduce.task.tmp.dir
-   * both relative and absolute. And check whether the temp directory 
-   * is created. We also check whether java.io.tmpdir value is same as 
-   * the directory specified. We create a temp file and check if is is 
-   * created in the directory specified.
-   */
-  @Test
-  public void testTaskTempDir(){
-    try {
-      JobConf conf = new JobConf(mr.getConfig());
-      
-      // intialize input, output directories
-      Path inDir = new Path("testing/wc/input");
-      Path outDir = new Path("testing/wc/output");
-      String input = "The input";
-      configure(conf, inDir, outDir, input, 
-          MapClass.class, IdentityReducer.class);
-      launchTest(conf, inDir, outDir, input);
-      
-    } catch(Exception e) {
-      e.printStackTrace();
-      fail("Exception in testing temp dir");
-      tearDown();
-    }
-  }
-
+ 
   /**
    * To test OS dependent setting of default execution path for a MapRed task.
    * Mainly that we can use MRJobConfig.DEFAULT_MAPRED_ADMIN_USER_ENV to set -


[49/50] [abbrv] hadoop git commit: YARN-2356. yarn status command for non-existent application/application attempt/container is too verbose. Contributed by Sunil G.

Posted by ka...@apache.org.
YARN-2356. yarn status command for non-existent application/application
attempt/container is too verbose. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fae3e861
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fae3e861
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fae3e861

Branch: refs/heads/YARN-2139
Commit: fae3e8614f4f9a42904e39c51ca68b0d1e67469f
Parents: 298d09c
Author: Devaraj K <de...@apache.org>
Authored: Mon Dec 15 14:43:21 2014 +0530
Committer: Devaraj K <de...@apache.org>
Committed: Mon Dec 15 14:43:21 2014 +0530

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../hadoop/yarn/client/cli/ApplicationCLI.java  | 81 ++++++++++++++----
 .../hadoop/yarn/client/cli/TestYarnCLI.java     | 90 ++++++++++++++++++--
 3 files changed, 151 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fae3e861/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index af29b70..6e74d14 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -225,6 +225,9 @@ Release 2.7.0 - UNRELEASED
 
     YARN-2912 Jersey Tests failing with port in use. (varun saxena via stevel)
 
+    YARN-2356. yarn status command for non-existent application/application 
+    attempt/container is too verbose. (Sunil G via devaraj)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fae3e861/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
index 83d212d..b8ce94a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
@@ -41,7 +41,9 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
+import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Times;
@@ -152,12 +154,14 @@ public class ApplicationCLI extends YarnCLI {
         return exitCode;
       }
       if (args[0].equalsIgnoreCase(APPLICATION)) {
-        printApplicationReport(cliParser.getOptionValue(STATUS_CMD));
+        exitCode = printApplicationReport(cliParser.getOptionValue(STATUS_CMD));
       } else if (args[0].equalsIgnoreCase(APPLICATION_ATTEMPT)) {
-        printApplicationAttemptReport(cliParser.getOptionValue(STATUS_CMD));
+        exitCode = printApplicationAttemptReport(cliParser
+            .getOptionValue(STATUS_CMD));
       } else if (args[0].equalsIgnoreCase(CONTAINER)) {
-        printContainerReport(cliParser.getOptionValue(STATUS_CMD));
+        exitCode = printContainerReport(cliParser.getOptionValue(STATUS_CMD));
       }
+      return exitCode;
     } else if (cliParser.hasOption(LIST_CMD)) {
       if (args[0].equalsIgnoreCase(APPLICATION)) {
         allAppStates = false;
@@ -252,13 +256,24 @@ public class ApplicationCLI extends YarnCLI {
    * Prints the application attempt report for an application attempt id.
    * 
    * @param applicationAttemptId
+   * @return exitCode
    * @throws YarnException
    */
-  private void printApplicationAttemptReport(String applicationAttemptId)
+  private int printApplicationAttemptReport(String applicationAttemptId)
       throws YarnException, IOException {
-    ApplicationAttemptReport appAttemptReport = client
-        .getApplicationAttemptReport(ConverterUtils
-            .toApplicationAttemptId(applicationAttemptId));
+    ApplicationAttemptReport appAttemptReport = null;
+    try {
+      appAttemptReport = client.getApplicationAttemptReport(ConverterUtils
+          .toApplicationAttemptId(applicationAttemptId));
+    } catch (ApplicationNotFoundException e) {
+      sysout.println("Application for AppAttempt with id '"
+          + applicationAttemptId + "' doesn't exist in RM or Timeline Server.");
+      return -1;
+    } catch (ApplicationAttemptNotFoundException e) {
+      sysout.println("Application Attempt with id '" + applicationAttemptId
+          + "' doesn't exist in RM or Timeline Server.");
+      return -1;
+    }
     // Use PrintWriter.println, which uses correct platform line ending.
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
     PrintWriter appAttemptReportStr = new PrintWriter(baos);
@@ -282,22 +297,42 @@ public class ApplicationCLI extends YarnCLI {
       appAttemptReportStr.print(appAttemptReport.getDiagnostics());
     } else {
       appAttemptReportStr.print("Application Attempt with id '"
-          + applicationAttemptId + "' doesn't exist in History Server.");
+          + applicationAttemptId + "' doesn't exist in Timeline Server.");
+      appAttemptReportStr.close();
+      sysout.println(baos.toString("UTF-8"));
+      return -1;
     }
     appAttemptReportStr.close();
     sysout.println(baos.toString("UTF-8"));
+    return 0;
   }
 
   /**
    * Prints the container report for an container id.
    * 
    * @param containerId
+   * @return exitCode
    * @throws YarnException
    */
-  private void printContainerReport(String containerId) throws YarnException,
+  private int printContainerReport(String containerId) throws YarnException,
       IOException {
-    ContainerReport containerReport = client.getContainerReport((ConverterUtils
-        .toContainerId(containerId)));
+    ContainerReport containerReport = null;
+    try {
+      containerReport = client.getContainerReport((ConverterUtils
+          .toContainerId(containerId)));
+    } catch (ApplicationNotFoundException e) {
+      sysout.println("Application for Container with id '" + containerId
+          + "' doesn't exist in RM or Timeline Server.");
+      return -1;
+    } catch (ApplicationAttemptNotFoundException e) {
+      sysout.println("Application Attempt for Container with id '"
+          + containerId + "' doesn't exist in RM or Timeline Server.");
+      return -1;
+    } catch (ContainerNotFoundException e) {
+      sysout.println("Container with id '" + containerId
+          + "' doesn't exist in RM or Timeline Server.");
+      return -1;
+    }
     // Use PrintWriter.println, which uses correct platform line ending.
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
     PrintWriter containerReportStr = new PrintWriter(baos);
@@ -319,10 +354,14 @@ public class ApplicationCLI extends YarnCLI {
       containerReportStr.print(containerReport.getDiagnosticsInfo());
     } else {
       containerReportStr.print("Container with id '" + containerId
-          + "' doesn't exist in Hostory Server.");
+          + "' doesn't exist in Timeline Server.");
+      containerReportStr.close();
+      sysout.println(baos.toString("UTF-8"));
+      return -1;
     }
     containerReportStr.close();
     sysout.println(baos.toString("UTF-8"));
+    return 0;
   }
 
   /**
@@ -423,12 +462,20 @@ public class ApplicationCLI extends YarnCLI {
    * Prints the application report for an application id.
    * 
    * @param applicationId
+   * @return exitCode
    * @throws YarnException
    */
-  private void printApplicationReport(String applicationId)
+  private int printApplicationReport(String applicationId)
       throws YarnException, IOException {
-    ApplicationReport appReport = client.getApplicationReport(ConverterUtils
-        .toApplicationId(applicationId));
+    ApplicationReport appReport = null;
+    try {
+      appReport = client.getApplicationReport(ConverterUtils
+          .toApplicationId(applicationId));
+    } catch (ApplicationNotFoundException e) {
+      sysout.println("Application with id '" + applicationId
+          + "' doesn't exist in RM or Timeline Server.");
+      return -1;
+    }
     // Use PrintWriter.println, which uses correct platform line ending.
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
     PrintWriter appReportStr = new PrintWriter(baos);
@@ -478,9 +525,13 @@ public class ApplicationCLI extends YarnCLI {
     } else {
       appReportStr.print("Application with id '" + applicationId
           + "' doesn't exist in RM.");
+      appReportStr.close();
+      sysout.println(baos.toString("UTF-8"));
+      return -1;
     }
     appReportStr.close();
     sysout.println(baos.toString("UTF-8"));
+    return 0;
   }
 
   private String getAllValidApplicationStates() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fae3e861/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
index 194d7d1..fa81f14 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
@@ -62,7 +62,9 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.client.api.YarnClient;
+import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
+import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
 import org.apache.hadoop.yarn.util.Records;
 import org.junit.Assert;
 import org.junit.Before;
@@ -319,14 +321,12 @@ public class TestYarnCLI {
     when(client.getApplicationReport(any(ApplicationId.class))).thenThrow(
         new ApplicationNotFoundException("History file for application"
             + applicationId + " is not found"));
-    try {
-      cli.run(new String[] { "application", "-status", applicationId.toString() });
-      Assert.fail();
-    } catch (Exception ex) {
-      Assert.assertTrue(ex instanceof ApplicationNotFoundException);
-      Assert.assertEquals("History file for application"
-          + applicationId + " is not found", ex.getMessage());
-    }
+    int exitCode = cli.run(new String[] { "application", "-status",
+        applicationId.toString() });
+    verify(sysOut).println(
+        "Application with id '" + applicationId
+            + "' doesn't exist in RM or Timeline Server.");
+    Assert.assertNotSame("should return non-zero exit code.", 0, exitCode);
   }
 
   @Test
@@ -1318,6 +1318,80 @@ public class TestYarnCLI {
     Assert.assertEquals(queueInfoStr, sysOutStream.toString());
   }
 
+  @Test
+  public void testGetApplicationAttemptReportException() throws Exception {
+    ApplicationCLI cli = createAndGetAppCLI();
+    ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
+    ApplicationAttemptId attemptId1 = ApplicationAttemptId.newInstance(
+        applicationId, 1);
+    when(client.getApplicationAttemptReport(attemptId1)).thenThrow(
+        new ApplicationNotFoundException("History file for application"
+            + applicationId + " is not found"));
+
+    int exitCode = cli.run(new String[] { "applicationattempt", "-status",
+        attemptId1.toString() });
+    verify(sysOut).println(
+        "Application for AppAttempt with id '" + attemptId1
+            + "' doesn't exist in RM or Timeline Server.");
+    Assert.assertNotSame("should return non-zero exit code.", 0, exitCode);
+
+    ApplicationAttemptId attemptId2 = ApplicationAttemptId.newInstance(
+        applicationId, 2);
+    when(client.getApplicationAttemptReport(attemptId2)).thenThrow(
+        new ApplicationAttemptNotFoundException(
+            "History file for application attempt" + attemptId2
+                + " is not found"));
+
+    exitCode = cli.run(new String[] { "applicationattempt", "-status",
+        attemptId2.toString() });
+    verify(sysOut).println(
+        "Application Attempt with id '" + attemptId2
+            + "' doesn't exist in RM or Timeline Server.");
+    Assert.assertNotSame("should return non-zero exit code.", 0, exitCode);
+  }
+
+  @Test
+  public void testGetContainerReportException() throws Exception {
+    ApplicationCLI cli = createAndGetAppCLI();
+    ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
+    ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
+        applicationId, 1);
+    long cntId = 1;
+    ContainerId containerId1 = ContainerId.newContainerId(attemptId, cntId++);
+    when(client.getContainerReport(containerId1)).thenThrow(
+        new ApplicationNotFoundException("History file for application"
+            + applicationId + " is not found"));
+
+    int exitCode = cli.run(new String[] { "container", "-status",
+        containerId1.toString() });
+    verify(sysOut).println(
+        "Application for Container with id '" + containerId1
+            + "' doesn't exist in RM or Timeline Server.");
+    Assert.assertNotSame("should return non-zero exit code.", 0, exitCode);
+    ContainerId containerId2 = ContainerId.newContainerId(attemptId, cntId++);
+    when(client.getContainerReport(containerId2)).thenThrow(
+        new ApplicationAttemptNotFoundException(
+            "History file for application attempt" + attemptId
+                + " is not found"));
+
+    exitCode = cli.run(new String[] { "container", "-status",
+        containerId2.toString() });
+    verify(sysOut).println(
+        "Application Attempt for Container with id '" + containerId2
+            + "' doesn't exist in RM or Timeline Server.");
+    Assert.assertNotSame("should return non-zero exit code.", 0, exitCode);
+
+    ContainerId containerId3 = ContainerId.newContainerId(attemptId, cntId++);
+    when(client.getContainerReport(containerId3)).thenThrow(
+        new ContainerNotFoundException("History file for container"
+            + containerId3 + " is not found"));
+    exitCode = cli.run(new String[] { "container", "-status",
+        containerId3.toString() });
+    verify(sysOut).println(
+        "Container with id '" + containerId3
+            + "' doesn't exist in RM or Timeline Server.");
+    Assert.assertNotSame("should return non-zero exit code.", 0, exitCode);
+  }
 
   private void verifyUsageInfo(YarnCLI cli) throws Exception {
     cli.setSysErrPrintStream(sysErr);


[04/50] [abbrv] hadoop git commit: HADOOP-11372. Fix new findbugs warnings in mapreduce-examples. Contributed by Li Lu.

Posted by ka...@apache.org.
HADOOP-11372. Fix new findbugs warnings in mapreduce-examples. Contributed by Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be86237c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be86237c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be86237c

Branch: refs/heads/YARN-2139
Commit: be86237c09533a6691ed9eb4864643657331a11a
Parents: 74d4bfd
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Dec 9 10:48:35 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Dec 9 10:49:55 2014 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt             | 3 +++
 .../src/main/java/org/apache/hadoop/examples/pi/Parser.java | 5 ++---
 .../java/org/apache/hadoop/examples/pi/math/Bellard.java    | 9 ++++++++-
 3 files changed, 13 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be86237c/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2051698..4b23471 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -542,6 +542,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11368. Fix SSLFactory truststore reloader thread leak in
     KMSClientProvider. (Arun Suresh via wang)
 
+    HADOOP-11372. Fix new findbugs warnings in mapreduce-examples.
+    (Li Lu via wheat9)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/be86237c/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java
index 187520a..a2db9d1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java
@@ -151,11 +151,10 @@ public final class Parser {
   static <T extends Combinable<T>> Map<Parameter, T> combine(Map<Parameter, List<T>> m) {
     final Map<Parameter, T> combined = new TreeMap<Parameter, T>();
     for(Parameter p : Parameter.values()) {
+      //note: results would never be null due to the design of Util.combine
       final List<T> results = Util.combine(m.get(p));
       Util.out.format("%-6s => ", p); 
-      if (results == null)
-        Util.out.println("null");
-      else if (results.size() != 1) 
+      if (results.size() != 1)
         Util.out.println(results.toString().replace(", ", ",\n           "));
       else {
         final T r = results.get(0);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/be86237c/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Bellard.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Bellard.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Bellard.java
index 90b608f..d909d92 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Bellard.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Bellard.java
@@ -25,6 +25,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
+import java.util.NoSuchElementException;
 
 import org.apache.hadoop.examples.pi.Container;
 import org.apache.hadoop.examples.pi.Util;
@@ -255,7 +256,13 @@ public final class Bellard {
         public boolean hasNext() {return i < parts.length;}
         /** {@inheritDoc} */
         @Override
-        public Summation next() {return parts[i++];}
+        public Summation next() throws NoSuchElementException {
+          if (hasNext()) {
+            return parts[i++];
+          } else {
+            throw new NoSuchElementException("Sum's iterator does not have next!");
+          }
+        }
         /** Unsupported */
         @Override
         public void remove() {throw new UnsupportedOperationException();}


[48/50] [abbrv] hadoop git commit: MAPREDUCE-6194. Bubble up final exception in failures during creation of output collectors. Contributed by Varun Saxena.

Posted by ka...@apache.org.
MAPREDUCE-6194. Bubble up final exception in failures during creation of output collectors. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/298d09c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/298d09c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/298d09c9

Branch: refs/heads/YARN-2139
Commit: 298d09c9b583088f364038adcb1edf1eb1c2c196
Parents: 25a0440
Author: Harsh J <ha...@cloudera.com>
Authored: Mon Dec 15 14:26:22 2014 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Mon Dec 15 14:26:43 2014 +0530

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt                            | 3 +++
 .../src/main/java/org/apache/hadoop/mapred/MapTask.java         | 5 ++++-
 2 files changed, 7 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/298d09c9/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index a6475b1..191526a 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -235,6 +235,9 @@ Release 2.7.0 - UNRELEASED
 
   IMPROVEMENTS
 
+    MAPREDUCE-6194. Bubble up final exception in failures during creation
+    of output collectors (Varun Saxena via harsh)
+
     MAPREDUCE-5420. Remove mapreduce.task.tmp.dir from mapred-default.xml
     (James Carman via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/298d09c9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
index 75b4141..1a4901b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
@@ -387,6 +387,7 @@ public class MapTask extends Task {
     Class<?>[] collectorClasses = job.getClasses(
       JobContext.MAP_OUTPUT_COLLECTOR_CLASS_ATTR, MapOutputBuffer.class);
     int remainingCollectors = collectorClasses.length;
+    Exception lastException = null;
     for (Class clazz : collectorClasses) {
       try {
         if (!MapOutputCollector.class.isAssignableFrom(clazz)) {
@@ -406,10 +407,12 @@ public class MapTask extends Task {
         if (--remainingCollectors > 0) {
           msg += " (" + remainingCollectors + " more collector(s) to try)";
         }
+        lastException = e;
         LOG.warn(msg, e);
       }
     }
-    throw new IOException("Unable to initialize any output collector");
+    throw new IOException("Initialization of all the collectors failed. " +
+      "Error in last collector was :" + lastException.getMessage(), lastException);
   }
 
   @SuppressWarnings("unchecked")


[45/50] [abbrv] hadoop git commit: HADOOP-11394. hadoop-aws documentation missing. Contributed by Chris Nauroth.

Posted by ka...@apache.org.
HADOOP-11394. hadoop-aws documentation missing. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9458cd5b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9458cd5b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9458cd5b

Branch: refs/heads/YARN-2139
Commit: 9458cd5bce20358e31c0cfb594bc545c7824b10d
Parents: 0e37bbc
Author: cnauroth <cn...@apache.org>
Authored: Fri Dec 12 23:29:11 2014 -0800
Committer: cnauroth <cn...@apache.org>
Committed: Fri Dec 12 23:29:11 2014 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |   2 +
 .../site/markdown/tools/hadoop-aws/index.md     | 417 -------------------
 .../src/site/markdown/tools/hadoop-aws/index.md | 417 +++++++++++++++++++
 3 files changed, 419 insertions(+), 417 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9458cd5b/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1e59395..729a456 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -580,6 +580,8 @@ Release 2.7.0 - UNRELEASED
 
     HADOOP-11389. Clean up byte to string encoding issues in hadoop-common.
     (wheat9)
+
+    HADOOP-11394. hadoop-aws documentation missing. (cnauroth)
     
 Release 2.6.0 - 2014-11-18
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9458cd5b/hadoop-tools/hadoop-aws/src/main/site/markdown/tools/hadoop-aws/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/main/site/markdown/tools/hadoop-aws/index.md
deleted file mode 100644
index 4a1956a..0000000
--- a/hadoop-tools/hadoop-aws/src/main/site/markdown/tools/hadoop-aws/index.md
+++ /dev/null
@@ -1,417 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-# Hadoop-AWS module: Integration with Amazon Web Services
-
-The `hadoop-aws` module provides support for AWS integration. The generated
-JAR file, `hadoop-aws.jar` also declares a transitive dependency on all
-external artifacts which are needed for this support —enabling downstream
-applications to easily use this support.
-
-Features
-
-1. The "classic" `s3:` filesystem for storing objects in Amazon S3 Storage
-1. The second-generation, `s3n:` filesystem, making it easy to share
-data between hadoop and other applications via the S3 object store
-1. The third generation, `s3a:` filesystem. Designed to be a switch in
-replacement for `s3n:`, this filesystem binding supports larger files and promises
-higher performance.
-
-The specifics of using these filesystems are documented below.
-
-## Warning: Object Stores are not filesystems.
-
-Amazon S3 is an example of "an object store". In order to achieve scalalablity
-and especially high availability, S3 has —as many other cloud object stores have
-done— relaxed some of the constraints which classic "POSIX" filesystems promise.
-
-Specifically
-
-1. Files that are newly created from the Hadoop Filesystem APIs may not be
-immediately visible.
-2. File delete and update operations may not immediately propagate. Old
-copies of the file may exist for an indeterminate time period.
-3. Directory operations: `delete()` and `rename()` are implemented by
-recursive file-by-file operations. They take time at least proportional to
-the number of files, during which time partial updates may be visible. If
-the operations are interrupted, the filesystem is left in an intermediate state.
-
-For further discussion on these topics, please consult
-[/filesystem](The Hadoop FileSystem API Definition).
-
-## Warning #2: your AWS credentials are valuable
-
-Your AWS credentials not only pay for services, they offer read and write
-access to the data. Anyone with the credentials can not only read your datasets
-—they can delete them.
-
-Do not inadvertently share these credentials through means such as
-1. Checking in Hadoop configuration files containing the credentials.
-1. Logging them to a console, as they invariably end up being seen.
-
-If you do any of these: change your credentials immediately!
-
-
-## S3
-
-### Authentication properties
-
-    <property>
-      <name>fs.s3.awsAccessKeyId</name>
-      <description>AWS access key ID</description>
-    </property>
-
-    <property>
-      <name>fs.s3.awsSecretAccessKey</name>
-      <description>AWS secret key</description>
-    </property>
-
-
-## S3N
-
-### Authentication properties
-
-    <property>
-      <name>fs.s3n.awsAccessKeyId</name>
-      <description>AWS access key ID</description>
-    </property>
-
-    <property>
-      <name>fs.s3n.awsSecretAccessKey</name>
-      <description>AWS secret key</description>
-    </property>
-
-### Other properties
-
-
-    <property>
-      <name>fs.s3n.block.size</name>
-      <value>67108864</value>
-      <description>Block size to use when reading files using the native S3
-      filesystem (s3n: URIs).</description>
-    </property>
-
-    <property>
-      <name>fs.s3n.multipart.uploads.enabled</name>
-      <value>false</value>
-      <description>Setting this property to true enables multiple uploads to
-      native S3 filesystem. When uploading a file, it is split into blocks
-      if the size is larger than fs.s3n.multipart.uploads.block.size.
-      </description>
-    </property>
-
-    <property>
-      <name>fs.s3n.multipart.uploads.block.size</name>
-      <value>67108864</value>
-      <description>The block size for multipart uploads to native S3 filesystem.
-      Default size is 64MB.
-      </description>
-    </property>
-
-    <property>
-      <name>fs.s3n.multipart.copy.block.size</name>
-      <value>5368709120</value>
-      <description>The block size for multipart copy in native S3 filesystem.
-      Default size is 5GB.
-      </description>
-    </property>
-
-    <property>
-      <name>fs.s3n.server-side-encryption-algorithm</name>
-      <value></value>
-      <description>Specify a server-side encryption algorithm for S3.
-      The default is NULL, and the only other currently allowable value is AES256.
-      </description>
-    </property>
-
-## S3A
-
-
-### Authentication properties
-
-    <property>
-      <name>fs.s3a.awsAccessKeyId</name>
-      <description>AWS access key ID. Omit for Role-based authentication.</description>
-    </property>
-
-    <property>
-      <name>fs.s3a.awsSecretAccessKey</name>
-      <description>AWS secret key. Omit for Role-based authentication.</description>
-    </property>
-
-### Other properties
-
-    <property>
-      <name>fs.s3a.connection.maximum</name>
-      <value>15</value>
-      <description>Controls the maximum number of simultaneous connections to S3.</description>
-    </property>
-
-    <property>
-      <name>fs.s3a.connection.ssl.enabled</name>
-      <value>true</value>
-      <description>Enables or disables SSL connections to S3.</description>
-    </property>
-
-    <property>
-      <name>fs.s3a.attempts.maximum</name>
-      <value>10</value>
-      <description>How many times we should retry commands on transient errors.</description>
-    </property>
-
-    <property>
-      <name>fs.s3a.connection.timeout</name>
-      <value>5000</value>
-      <description>Socket connection timeout in seconds.</description>
-    </property>
-
-    <property>
-      <name>fs.s3a.paging.maximum</name>
-      <value>5000</value>
-      <description>How many keys to request from S3 when doing
-         directory listings at a time.</description>
-    </property>
-
-    <property>
-      <name>fs.s3a.multipart.size</name>
-      <value>104857600</value>
-      <description>How big (in bytes) to split upload or copy operations up into.</description>
-    </property>
-
-    <property>
-      <name>fs.s3a.multipart.threshold</name>
-      <value>2147483647</value>
-      <description>Threshold before uploads or copies use parallel multipart operations.</description>
-    </property>
-
-    <property>
-      <name>fs.s3a.acl.default</name>
-      <description>Set a canned ACL for newly created and copied objects. Value may be private,
-         public-read, public-read-write, authenticated-read, log-delivery-write,
-         bucket-owner-read, or bucket-owner-full-control.</description>
-    </property>
-
-    <property>
-      <name>fs.s3a.multipart.purge</name>
-      <value>false</value>
-      <description>True if you want to purge existing multipart uploads that may not have been
-         completed/aborted correctly</description>
-    </property>
-
-    <property>
-      <name>fs.s3a.multipart.purge.age</name>
-      <value>86400</value>
-      <description>Minimum age in seconds of multipart uploads to purge</description>
-    </property>
-
-    <property>
-      <name>fs.s3a.buffer.dir</name>
-      <value>${hadoop.tmp.dir}/s3a</value>
-      <description>Comma separated list of directories that will be used to buffer file
-        uploads to.</description>
-    </property>
-
-    <property>
-      <name>fs.s3a.impl</name>
-      <value>org.apache.hadoop.fs.s3a.S3AFileSystem</value>
-      <description>The implementation class of the S3A Filesystem</description>
-    </property>
-
-
-## Testing the S3 filesystem clients
-
-To test the S3* filesystem clients, you need to provide two files
-which pass in authentication details to the test runner
-
-1. `auth-keys.xml`
-1. `core-site.xml`
-
-These are both Hadoop XML configuration files, which must be placed into
-`hadoop-tools/hadoop-aws/src/test/resources`.
-
-
-### `auth-keys.xml`
-
-The presence of this file triggers the testing of the S3 classes.
-
-Without this file, *none of the tests in this module will be executed*
-
-The XML file must contain all the ID/key information needed to connect
-each of the filesystem clients to the object stores, and a URL for
-each filesystem for its testing.
-
-1. `test.fs.s3n.name` : the URL of the bucket for S3n tests
-1. `test.fs.s3a.name` : the URL of the bucket for S3a tests
-2. `test.fs.s3.name` : the URL of the bucket for "S3"  tests
-
-The contents of each bucket will be destroyed during the test process:
-do not use the bucket for any purpose other than testing.
-
-Example:
-
-    <configuration>
-      
-      <property>
-        <name>test.fs.s3n.name</name>
-        <value>s3n://test-aws-s3n/</value>
-      </property>
-    
-      <property>
-        <name>test.fs.s3a.name</name>
-        <value>s3a://test-aws-s3a/</value>
-      </property>
-    
-      <property>
-        <name>test.fs.s3.name</name>
-        <value>s3a://test-aws-s3/</value>
-      </property>
-  
-      <property>
-        <name>fs.s3.awsAccessKeyId</name>
-        <value>DONOTPCOMMITTHISKEYTOSCM</value>
-      </property>
-
-      <property>
-        <name>fs.s3.awsSecretAccessKey</name>
-        <value>DONOTEVERSHARETHISSECRETKEY!</value>
-      </property>
-
-      <property>
-        <name>fs.s3n.awsAccessKeyId</name>
-        <value>DONOTPCOMMITTHISKEYTOSCM</value>
-      </property>
-
-      <property>
-        <name>fs.s3n.awsSecretAccessKey</name>
-        <value>DONOTEVERSHARETHISSECRETKEY!</value>
-      </property>
-
-      <property>
-        <name>fs.s3a.awsAccessKeyId</name>
-        <description>AWS access key ID. Omit for Role-based authentication.</description>
-        <value>DONOTPCOMMITTHISKEYTOSCM</value>
-      </property>
-  
-      <property>
-        <name>fs.s3a.awsSecretAccessKey</name>
-        <description>AWS secret key. Omit for Role-based authentication.</description>
-        <value>DONOTEVERSHARETHISSECRETKEY!</value>
-      </property>
-    </configuration>
-
-## File `contract-test-options.xml`
-
-The file `hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml`
-must be created and configured for the test fileystems.
-
-If a specific file `fs.contract.test.fs.*` test path is not defined for
-any of the filesystems, those tests will be skipped.
-
-The standard S3 authentication details must also be provided. This can be
-through copy-and-paste of the `auth-keys.xml` credentials, or it can be
-through direct XInclude inclustion.
-
-#### s3://
-
-The filesystem name must be defined in the property `fs.contract.test.fs.s3`. 
-
-
-Example:
-
-      <property>
-        <name>fs.contract.test.fs.s3</name>
-        <value>s3://test-aws-s3/</value>
-      </property>
-
-### s3n://
-
-
-In the file `src/test/resources/contract-test-options.xml`, the filesystem
-name must be defined in the property `fs.contract.test.fs.s3n`.
-The standard configuration options to define the S3N authentication details
-must also be provided.
-
-Example:
-
-      <property>
-        <name>fs.contract.test.fs.s3n</name>
-        <value>s3n://test-aws-s3n/</value>
-      </property>
-
-### s3a://
-
-
-In the file `src/test/resources/contract-test-options.xml`, the filesystem
-name must be defined in the property `fs.contract.test.fs.s3a`.
-The standard configuration options to define the S3N authentication details
-must also be provided.
-
-Example:
-
-    <property>
-      <name>fs.contract.test.fs.s3a</name>
-      <value>s3a://test-aws-s3a/</value>
-    </property>
-
-### Complete example of `contract-test-options.xml`
-
-
-
-    <?xml version="1.0"?>
-    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-    <!--
-      ~ Licensed to the Apache Software Foundation (ASF) under one
-      ~  or more contributor license agreements.  See the NOTICE file
-      ~  distributed with this work for additional information
-      ~  regarding copyright ownership.  The ASF licenses this file
-      ~  to you under the Apache License, Version 2.0 (the
-      ~  "License"); you may not use this file except in compliance
-      ~  with the License.  You may obtain a copy of the License at
-      ~
-      ~       http://www.apache.org/licenses/LICENSE-2.0
-      ~
-      ~  Unless required by applicable law or agreed to in writing, software
-      ~  distributed under the License is distributed on an "AS IS" BASIS,
-      ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-      ~  See the License for the specific language governing permissions and
-      ~  limitations under the License.
-      -->
-    
-    <configuration>
-    
-      <include xmlns="http://www.w3.org/2001/XInclude"
-        href="auth-keys.xml"/>
-    
-      <property>
-        <name>fs.contract.test.fs.s3</name>
-        <value>s3://test-aws-s3/</value>
-      </property>
-
-
-      <property>
-        <name>fs.contract.test.fs.s3a</name>
-        <value>s3a://test-aws-s3a/</value>
-      </property>
-
-      <property>
-        <name>fs.contract.test.fs.s3n</name>
-        <value>s3n://test-aws-s3n/</value>
-      </property>
-
-    </configuration>
-
-This example pulls in the `auth-keys.xml` file for the credentials. 
-This provides one single place to keep the keys up to date —and means
-that the file `contract-test-options.xml` does not contain any
-secret credentials itself.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9458cd5b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
new file mode 100644
index 0000000..d443389
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -0,0 +1,417 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+# Hadoop-AWS module: Integration with Amazon Web Services
+
+The `hadoop-aws` module provides support for AWS integration. The generated
+JAR file, `hadoop-aws.jar` also declares a transitive dependency on all
+external artifacts which are needed for this support —enabling downstream
+applications to easily use this support.
+
+Features
+
+1. The "classic" `s3:` filesystem for storing objects in Amazon S3 Storage
+1. The second-generation, `s3n:` filesystem, making it easy to share
+data between hadoop and other applications via the S3 object store
+1. The third generation, `s3a:` filesystem. Designed to be a switch in
+replacement for `s3n:`, this filesystem binding supports larger files and promises
+higher performance.
+
+The specifics of using these filesystems are documented below.
+
+## Warning: Object Stores are not filesystems.
+
+Amazon S3 is an example of "an object store". In order to achieve scalalablity
+and especially high availability, S3 has —as many other cloud object stores have
+done— relaxed some of the constraints which classic "POSIX" filesystems promise.
+
+Specifically
+
+1. Files that are newly created from the Hadoop Filesystem APIs may not be
+immediately visible.
+2. File delete and update operations may not immediately propagate. Old
+copies of the file may exist for an indeterminate time period.
+3. Directory operations: `delete()` and `rename()` are implemented by
+recursive file-by-file operations. They take time at least proportional to
+the number of files, during which time partial updates may be visible. If
+the operations are interrupted, the filesystem is left in an intermediate state.
+
+For further discussion on these topics, please consult
+[The Hadoop FileSystem API Definition](../../../hadoop-project-dist/hadoop-common/filesystem/index.html).
+
+## Warning #2: your AWS credentials are valuable
+
+Your AWS credentials not only pay for services, they offer read and write
+access to the data. Anyone with the credentials can not only read your datasets
+—they can delete them.
+
+Do not inadvertently share these credentials through means such as
+1. Checking in Hadoop configuration files containing the credentials.
+1. Logging them to a console, as they invariably end up being seen.
+
+If you do any of these: change your credentials immediately!
+
+
+## S3
+
+### Authentication properties
+
+    <property>
+      <name>fs.s3.awsAccessKeyId</name>
+      <description>AWS access key ID</description>
+    </property>
+
+    <property>
+      <name>fs.s3.awsSecretAccessKey</name>
+      <description>AWS secret key</description>
+    </property>
+
+
+## S3N
+
+### Authentication properties
+
+    <property>
+      <name>fs.s3n.awsAccessKeyId</name>
+      <description>AWS access key ID</description>
+    </property>
+
+    <property>
+      <name>fs.s3n.awsSecretAccessKey</name>
+      <description>AWS secret key</description>
+    </property>
+
+### Other properties
+
+
+    <property>
+      <name>fs.s3n.block.size</name>
+      <value>67108864</value>
+      <description>Block size to use when reading files using the native S3
+      filesystem (s3n: URIs).</description>
+    </property>
+
+    <property>
+      <name>fs.s3n.multipart.uploads.enabled</name>
+      <value>false</value>
+      <description>Setting this property to true enables multiple uploads to
+      native S3 filesystem. When uploading a file, it is split into blocks
+      if the size is larger than fs.s3n.multipart.uploads.block.size.
+      </description>
+    </property>
+
+    <property>
+      <name>fs.s3n.multipart.uploads.block.size</name>
+      <value>67108864</value>
+      <description>The block size for multipart uploads to native S3 filesystem.
+      Default size is 64MB.
+      </description>
+    </property>
+
+    <property>
+      <name>fs.s3n.multipart.copy.block.size</name>
+      <value>5368709120</value>
+      <description>The block size for multipart copy in native S3 filesystem.
+      Default size is 5GB.
+      </description>
+    </property>
+
+    <property>
+      <name>fs.s3n.server-side-encryption-algorithm</name>
+      <value></value>
+      <description>Specify a server-side encryption algorithm for S3.
+      The default is NULL, and the only other currently allowable value is AES256.
+      </description>
+    </property>
+
+## S3A
+
+
+### Authentication properties
+
+    <property>
+      <name>fs.s3a.awsAccessKeyId</name>
+      <description>AWS access key ID. Omit for Role-based authentication.</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.awsSecretAccessKey</name>
+      <description>AWS secret key. Omit for Role-based authentication.</description>
+    </property>
+
+### Other properties
+
+    <property>
+      <name>fs.s3a.connection.maximum</name>
+      <value>15</value>
+      <description>Controls the maximum number of simultaneous connections to S3.</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.connection.ssl.enabled</name>
+      <value>true</value>
+      <description>Enables or disables SSL connections to S3.</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.attempts.maximum</name>
+      <value>10</value>
+      <description>How many times we should retry commands on transient errors.</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.connection.timeout</name>
+      <value>5000</value>
+      <description>Socket connection timeout in seconds.</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.paging.maximum</name>
+      <value>5000</value>
+      <description>How many keys to request from S3 when doing
+         directory listings at a time.</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.multipart.size</name>
+      <value>104857600</value>
+      <description>How big (in bytes) to split upload or copy operations up into.</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.multipart.threshold</name>
+      <value>2147483647</value>
+      <description>Threshold before uploads or copies use parallel multipart operations.</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.acl.default</name>
+      <description>Set a canned ACL for newly created and copied objects. Value may be private,
+         public-read, public-read-write, authenticated-read, log-delivery-write,
+         bucket-owner-read, or bucket-owner-full-control.</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.multipart.purge</name>
+      <value>false</value>
+      <description>True if you want to purge existing multipart uploads that may not have been
+         completed/aborted correctly</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.multipart.purge.age</name>
+      <value>86400</value>
+      <description>Minimum age in seconds of multipart uploads to purge</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.buffer.dir</name>
+      <value>${hadoop.tmp.dir}/s3a</value>
+      <description>Comma separated list of directories that will be used to buffer file
+        uploads to.</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.impl</name>
+      <value>org.apache.hadoop.fs.s3a.S3AFileSystem</value>
+      <description>The implementation class of the S3A Filesystem</description>
+    </property>
+
+
+## Testing the S3 filesystem clients
+
+To test the S3* filesystem clients, you need to provide two files
+which pass in authentication details to the test runner
+
+1. `auth-keys.xml`
+1. `core-site.xml`
+
+These are both Hadoop XML configuration files, which must be placed into
+`hadoop-tools/hadoop-aws/src/test/resources`.
+
+
+### `auth-keys.xml`
+
+The presence of this file triggers the testing of the S3 classes.
+
+Without this file, *none of the tests in this module will be executed*
+
+The XML file must contain all the ID/key information needed to connect
+each of the filesystem clients to the object stores, and a URL for
+each filesystem for its testing.
+
+1. `test.fs.s3n.name` : the URL of the bucket for S3n tests
+1. `test.fs.s3a.name` : the URL of the bucket for S3a tests
+2. `test.fs.s3.name` : the URL of the bucket for "S3"  tests
+
+The contents of each bucket will be destroyed during the test process:
+do not use the bucket for any purpose other than testing.
+
+Example:
+
+    <configuration>
+      
+      <property>
+        <name>test.fs.s3n.name</name>
+        <value>s3n://test-aws-s3n/</value>
+      </property>
+    
+      <property>
+        <name>test.fs.s3a.name</name>
+        <value>s3a://test-aws-s3a/</value>
+      </property>
+    
+      <property>
+        <name>test.fs.s3.name</name>
+        <value>s3a://test-aws-s3/</value>
+      </property>
+  
+      <property>
+        <name>fs.s3.awsAccessKeyId</name>
+        <value>DONOTPCOMMITTHISKEYTOSCM</value>
+      </property>
+
+      <property>
+        <name>fs.s3.awsSecretAccessKey</name>
+        <value>DONOTEVERSHARETHISSECRETKEY!</value>
+      </property>
+
+      <property>
+        <name>fs.s3n.awsAccessKeyId</name>
+        <value>DONOTPCOMMITTHISKEYTOSCM</value>
+      </property>
+
+      <property>
+        <name>fs.s3n.awsSecretAccessKey</name>
+        <value>DONOTEVERSHARETHISSECRETKEY!</value>
+      </property>
+
+      <property>
+        <name>fs.s3a.awsAccessKeyId</name>
+        <description>AWS access key ID. Omit for Role-based authentication.</description>
+        <value>DONOTPCOMMITTHISKEYTOSCM</value>
+      </property>
+  
+      <property>
+        <name>fs.s3a.awsSecretAccessKey</name>
+        <description>AWS secret key. Omit for Role-based authentication.</description>
+        <value>DONOTEVERSHARETHISSECRETKEY!</value>
+      </property>
+    </configuration>
+
+## File `contract-test-options.xml`
+
+The file `hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml`
+must be created and configured for the test fileystems.
+
+If a specific file `fs.contract.test.fs.*` test path is not defined for
+any of the filesystems, those tests will be skipped.
+
+The standard S3 authentication details must also be provided. This can be
+through copy-and-paste of the `auth-keys.xml` credentials, or it can be
+through direct XInclude inclustion.
+
+#### s3://
+
+The filesystem name must be defined in the property `fs.contract.test.fs.s3`. 
+
+
+Example:
+
+      <property>
+        <name>fs.contract.test.fs.s3</name>
+        <value>s3://test-aws-s3/</value>
+      </property>
+
+### s3n://
+
+
+In the file `src/test/resources/contract-test-options.xml`, the filesystem
+name must be defined in the property `fs.contract.test.fs.s3n`.
+The standard configuration options to define the S3N authentication details
+must also be provided.
+
+Example:
+
+      <property>
+        <name>fs.contract.test.fs.s3n</name>
+        <value>s3n://test-aws-s3n/</value>
+      </property>
+
+### s3a://
+
+
+In the file `src/test/resources/contract-test-options.xml`, the filesystem
+name must be defined in the property `fs.contract.test.fs.s3a`.
+The standard configuration options to define the S3N authentication details
+must also be provided.
+
+Example:
+
+    <property>
+      <name>fs.contract.test.fs.s3a</name>
+      <value>s3a://test-aws-s3a/</value>
+    </property>
+
+### Complete example of `contract-test-options.xml`
+
+
+
+    <?xml version="1.0"?>
+    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+    <!--
+      ~ Licensed to the Apache Software Foundation (ASF) under one
+      ~  or more contributor license agreements.  See the NOTICE file
+      ~  distributed with this work for additional information
+      ~  regarding copyright ownership.  The ASF licenses this file
+      ~  to you under the Apache License, Version 2.0 (the
+      ~  "License"); you may not use this file except in compliance
+      ~  with the License.  You may obtain a copy of the License at
+      ~
+      ~       http://www.apache.org/licenses/LICENSE-2.0
+      ~
+      ~  Unless required by applicable law or agreed to in writing, software
+      ~  distributed under the License is distributed on an "AS IS" BASIS,
+      ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      ~  See the License for the specific language governing permissions and
+      ~  limitations under the License.
+      -->
+    
+    <configuration>
+    
+      <include xmlns="http://www.w3.org/2001/XInclude"
+        href="auth-keys.xml"/>
+    
+      <property>
+        <name>fs.contract.test.fs.s3</name>
+        <value>s3://test-aws-s3/</value>
+      </property>
+
+
+      <property>
+        <name>fs.contract.test.fs.s3a</name>
+        <value>s3a://test-aws-s3a/</value>
+      </property>
+
+      <property>
+        <name>fs.contract.test.fs.s3n</name>
+        <value>s3n://test-aws-s3n/</value>
+      </property>
+
+    </configuration>
+
+This example pulls in the `auth-keys.xml` file for the credentials. 
+This provides one single place to keep the keys up to date —and means
+that the file `contract-test-options.xml` does not contain any
+secret credentials itself.
\ No newline at end of file


[14/50] [abbrv] hadoop git commit: HDFS-7481. Add ACL indicator to the 'Permission Denied' exception. (Contributed by Vinayakumar B )

Posted by ka...@apache.org.
HDFS-7481. Add ACL indicator to the 'Permission Denied' exception. (Contributed by Vinayakumar B )


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d93f3b98
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d93f3b98
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d93f3b98

Branch: refs/heads/YARN-2139
Commit: d93f3b9815f90d24c838574a56013e6dc60dc5ad
Parents: 437322a
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Dec 10 08:27:15 2014 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Wed Dec 10 08:27:15 2014 +0530

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt              |  3 +++
 .../hadoop/hdfs/server/namenode/FSPermissionChecker.java | 11 ++++++++++-
 2 files changed, 13 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d93f3b98/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9398429..d141439 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -558,6 +558,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7473. Document setting dfs.namenode.fs-limits.max-directory-items to 0
     is invalid. (Akira AJISAKA via cnauroth)
 
+    HDFS-7481. Add ACL indicator to the "Permission Denied" exception.
+    (vinayakumarb)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d93f3b98/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index 8de8c54..0508484 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -47,6 +47,12 @@ class FSPermissionChecker {
   /** @return a string for throwing {@link AccessControlException} */
   private String toAccessControlString(INode inode, int snapshotId,
       FsAction access, FsPermission mode) {
+    return toAccessControlString(inode, snapshotId, access, mode, false);
+  }
+
+  /** @return a string for throwing {@link AccessControlException} */
+  private String toAccessControlString(INode inode, int snapshotId, FsAction access,
+      FsPermission mode, boolean deniedFromAcl) {
     StringBuilder sb = new StringBuilder("Permission denied: ")
       .append("user=").append(user).append(", ")
       .append("access=").append(access).append(", ")
@@ -55,6 +61,9 @@ class FSPermissionChecker {
       .append(inode.getGroupName(snapshotId)).append(':')
       .append(inode.isDirectory() ? 'd' : '-')
       .append(mode);
+    if (deniedFromAcl) {
+      sb.append("+");
+    }
     return sb.toString();
   }
 
@@ -338,7 +347,7 @@ class FSPermissionChecker {
     }
 
     throw new AccessControlException(
-      toAccessControlString(inode, snapshotId, access, mode));
+      toAccessControlString(inode, snapshotId, access, mode, true));
   }
 
   /** Guarded by {@link FSNamesystem#readLock()} */


[20/50] [abbrv] hadoop git commit: HADOOP-6590. Add a username check for hadoop sub-commands (John Smith via aw)

Posted by ka...@apache.org.
HADOOP-6590. Add a username check for hadoop sub-commands (John Smith via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5361426
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5361426
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5361426

Branch: refs/heads/YARN-2139
Commit: c536142699099c0e6da3413f1bbb01784577e28e
Parents: a7c6c71
Author: Allen Wittenauer <aw...@apache.org>
Authored: Wed Dec 10 13:41:28 2014 -0800
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Dec 10 13:41:28 2014 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt         |  2 ++
 hadoop-common-project/hadoop-common/src/main/bin/hadoop |  2 ++
 .../hadoop-common/src/main/bin/hadoop-functions.sh      | 12 ++++++++++++
 .../hadoop-common/src/main/conf/hadoop-env.sh           |  6 ++++++
 hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs       |  2 ++
 hadoop-mapreduce-project/bin/mapred                     |  2 ++
 hadoop-yarn-project/hadoop-yarn/bin/yarn                |  2 ++
 7 files changed, 28 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5361426/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 7310dd4..6e1cc11 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -24,6 +24,8 @@ Trunk (Unreleased)
     (Dexter Bradshaw, Mostafa Elhemali, Xi Fang, Johannes Klein, David Lao,
     Mike Liddell, Chuan Liu, Lengning Liu, Ivan Mitic, Michael Rys,
     Alexander Stojanovich, Brian Swan, and Min Wei via cnauroth)
+
+    HADOOP-6590. Add a username check for hadoop sub-commands (John Smith via aw)
     
   IMPROVEMENTS
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5361426/hadoop-common-project/hadoop-common/src/main/bin/hadoop
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
index be38382..b216b8f 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
@@ -179,6 +179,8 @@ case ${COMMAND} in
   ;;
 esac
 
+hadoop_verify_user "${COMMAND}"
+
 # Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
 hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
 HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5361426/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 3e353d9..dfd7315 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -1154,3 +1154,15 @@ function hadoop_secure_daemon_handler
   esac
 }
 
+function hadoop_verify_user
+{
+  local command=$1
+  local uservar="HADOOP_${command}_USER"
+
+  if [[ -n ${!uservar} ]]; then
+    if [[ ${!uservar} !=  ${USER} ]]; then
+      hadoop_error "ERROR: ${command} can only be executed by ${!uservar}."
+      exit 1
+    fi
+  fi
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5361426/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
index 588b02a..ae18542 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
@@ -398,3 +398,9 @@ esac
 # via this special env var:
 # export HADOOP_ENABLE_BUILD_PATHS="true"
 
+#
+# To prevent accidents, shell commands be (superficially) locked
+# to only allow certain users to execute certain subcommands.
+#
+# For example, to limit who can execute the namenode command,
+# export HADOOP_namenode_USER=hdfs

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5361426/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 8140f18..98a89b7 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -247,6 +247,8 @@ case ${COMMAND} in
   ;;
 esac
 
+hadoop_verify_user "${COMMAND}"
+
 if [[ -n "${secure_service}" ]]; then
   HADOOP_SECURE_USER="${secure_user}"
   hadoop_verify_secure_prereq

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5361426/hadoop-mapreduce-project/bin/mapred
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred
index 9f28471..066c438 100755
--- a/hadoop-mapreduce-project/bin/mapred
+++ b/hadoop-mapreduce-project/bin/mapred
@@ -135,6 +135,8 @@ case ${COMMAND} in
   ;;
 esac
 
+hadoop_verify_user "${COMMAND}"
+
 daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out"
 daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}.pid"
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5361426/hadoop-yarn-project/hadoop-yarn/bin/yarn
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index 760d8e6..ab9f7dd 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -184,6 +184,8 @@ case "${COMMAND}" in
   ;;
 esac
 
+hadoop_verify_user "${COMMAND}"
+
 # set HADOOP_OPTS to YARN_OPTS so that we can use
 # finalize, etc, without doing anything funky
 hadoop_debug "Resetting HADOOP_OPTS=YARN_OPTS"


[15/50] [abbrv] hadoop git commit: HDFS-7502. Fix findbugs warning in hdfs-nfs project. Contributed by Brandon Li.

Posted by ka...@apache.org.
HDFS-7502. Fix findbugs warning in hdfs-nfs project. Contributed by Brandon Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/195f31a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/195f31a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/195f31a8

Branch: refs/heads/YARN-2139
Commit: 195f31a8ef6b15e1962ab945b2f83af98e0058c6
Parents: d93f3b9
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Dec 9 20:42:42 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Dec 9 20:42:42 2014 -0800

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java  | 15 +++++++++------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt          |  3 +++
 2 files changed, 12 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/195f31a8/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index c860dd5..aaac797 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -25,6 +25,7 @@ import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.SocketAddress;
 import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
 import java.util.EnumSet;
 
 import org.apache.commons.logging.Log;
@@ -651,15 +652,16 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       }
       int rtmax = config.getInt(NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY,
           NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT);
-      if (rtmax < target.getBytes().length) {
-        LOG.error("Link size: " + target.getBytes().length
+      if (rtmax < target.getBytes(Charset.forName("UTF-8")).length) {
+        LOG.error("Link size: "
+            + target.getBytes(Charset.forName("UTF-8")).length
             + " is larger than max transfer size: " + rtmax);
         return new READLINK3Response(Nfs3Status.NFS3ERR_IO, postOpAttr,
             new byte[0]);
       }
 
       return new READLINK3Response(Nfs3Status.NFS3_OK, postOpAttr,
-          target.getBytes());
+          target.getBytes(Charset.forName("UTF-8")));
 
     } catch (IOException e) {
       LOG.warn("Readlink error: " + e.getClass(), e);
@@ -1462,7 +1464,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         throw io;
       }
       // This happens when startAfter was just deleted
-      LOG.info("Cookie couldn't be found: " + new String(startAfter)
+      LOG.info("Cookie couldn't be found: "
+          + new String(startAfter, Charset.forName("UTF-8"))
           + ", do listing from beginning");
       dlisting = dfsClient
           .listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME);
@@ -1571,7 +1574,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         startAfter = HdfsFileStatus.EMPTY_NAME;
       } else {
         String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
-        startAfter = inodeIdPath.getBytes();
+        startAfter = inodeIdPath.getBytes(Charset.forName("UTF-8"));
       }
 
       dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
@@ -1733,7 +1736,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         startAfter = HdfsFileStatus.EMPTY_NAME;
       } else {
         String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
-        startAfter = inodeIdPath.getBytes();
+        startAfter = inodeIdPath.getBytes(Charset.forName("UTF-8"));
       }
 
       dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/195f31a8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d141439..9f3f9ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -561,6 +561,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7481. Add ACL indicator to the "Permission Denied" exception.
     (vinayakumarb)
 
+    HDFS-7502. Fix findbugs warning in hdfs-nfs project.
+    (Brandon Li via wheat9)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[10/50] [abbrv] hadoop git commit: YARN-2910. FSLeafQueue can throw ConcurrentModificationException. (Wilfred Spiegelenburg via kasha)

Posted by ka...@apache.org.
YARN-2910. FSLeafQueue can throw ConcurrentModificationException. (Wilfred Spiegelenburg via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2e07a54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2e07a54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2e07a54

Branch: refs/heads/YARN-2139
Commit: a2e07a54561a57a83b943628ebbc53ed5ba52718
Parents: 1340617
Author: Karthik Kambatla <ka...@apache.org>
Authored: Tue Dec 9 14:00:31 2014 -0800
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Tue Dec 9 14:00:31 2014 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../scheduler/fair/FSAppAttempt.java            |   2 +-
 .../scheduler/fair/FSLeafQueue.java             | 151 +++++++++++++------
 .../scheduler/fair/TestFSLeafQueue.java         |  93 +++++++++++-
 4 files changed, 199 insertions(+), 50 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2e07a54/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d06c831..d87322f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -203,6 +203,9 @@ Release 2.7.0 - UNRELEASED
     YARN-2931. PublicLocalizer may fail until directory is initialized by
     LocalizeRunner. (Anubhav Dhoot via kasha)
 
+    YARN-2910. FSLeafQueue can throw ConcurrentModificationException. 
+    (Wilfred Spiegelenburg via kasha)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2e07a54/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index b9966e7..b23ec3e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -172,7 +172,7 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
   }
 
   @Override
-  public synchronized Resource getHeadroom() {
+  public Resource getHeadroom() {
     final FSQueue queue = (FSQueue) this.queue;
     SchedulingPolicy policy = queue.getPolicy();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2e07a54/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 345ea8b..bbf1be7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -23,6 +23,9 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
@@ -50,6 +53,10 @@ public class FSLeafQueue extends FSQueue {
       new ArrayList<FSAppAttempt>();
   private final List<FSAppAttempt> nonRunnableApps =
       new ArrayList<FSAppAttempt>();
+  // get a lock with fair distribution for app list updates
+  private final ReadWriteLock rwl = new ReentrantReadWriteLock(true);
+  private final Lock readLock = rwl.readLock();
+  private final Lock writeLock = rwl.writeLock();
   
   private Resource demand = Resources.createResource(0);
   
@@ -72,16 +79,26 @@ public class FSLeafQueue extends FSQueue {
   }
   
   public void addApp(FSAppAttempt app, boolean runnable) {
-    if (runnable) {
-      runnableApps.add(app);
-    } else {
-      nonRunnableApps.add(app);
+    writeLock.lock();
+    try {
+      if (runnable) {
+        runnableApps.add(app);
+      } else {
+        nonRunnableApps.add(app);
+      }
+    } finally {
+      writeLock.unlock();
     }
   }
   
   // for testing
   void addAppSchedulable(FSAppAttempt appSched) {
-    runnableApps.add(appSched);
+    writeLock.lock();
+    try {
+      runnableApps.add(appSched);
+    } finally {
+      writeLock.unlock();
+    }
   }
   
   /**
@@ -89,18 +106,25 @@ public class FSLeafQueue extends FSQueue {
    * @return whether or not the app was runnable
    */
   public boolean removeApp(FSAppAttempt app) {
-    if (runnableApps.remove(app)) {
-      // Update AM resource usage
-      if (app.isAmRunning() && app.getAMResource() != null) {
-        Resources.subtractFrom(amResourceUsage, app.getAMResource());
+    boolean runnable = false;
+    writeLock.lock();
+    try {
+      if (runnableApps.remove(app)) {
+        runnable = true;
+      } else if (nonRunnableApps.remove(app)) {
+        runnable = false; //nop, runnable is initialised to false already
+      } else {
+        throw new IllegalStateException("Given app to remove " + app +
+            " does not exist in queue " + this);
       }
-      return true;
-    } else if (nonRunnableApps.remove(app)) {
-      return false;
-    } else {
-      throw new IllegalStateException("Given app to remove " + app +
-          " does not exist in queue " + this);
+    } finally {
+      writeLock.unlock();
+    }
+    // Update AM resource usage if needed
+    if (runnable && app.isAmRunning() && app.getAMResource() != null) {
+      Resources.subtractFrom(amResourceUsage, app.getAMResource());
     }
+    return runnable;
   }
   
   public Collection<FSAppAttempt> getRunnableAppSchedulables() {
@@ -114,11 +138,16 @@ public class FSLeafQueue extends FSQueue {
   @Override
   public void collectSchedulerApplications(
       Collection<ApplicationAttemptId> apps) {
-    for (FSAppAttempt appSched : runnableApps) {
-      apps.add(appSched.getApplicationAttemptId());
-    }
-    for (FSAppAttempt appSched : nonRunnableApps) {
-      apps.add(appSched.getApplicationAttemptId());
+    readLock.lock();
+    try {
+      for (FSAppAttempt appSched : runnableApps) {
+        apps.add(appSched.getApplicationAttemptId());
+      }
+      for (FSAppAttempt appSched : nonRunnableApps) {
+        apps.add(appSched.getApplicationAttemptId());
+      }
+    } finally {
+      readLock.unlock();
     }
   }
 
@@ -144,11 +173,16 @@ public class FSLeafQueue extends FSQueue {
   @Override
   public Resource getResourceUsage() {
     Resource usage = Resources.createResource(0);
-    for (FSAppAttempt app : runnableApps) {
-      Resources.addTo(usage, app.getResourceUsage());
-    }
-    for (FSAppAttempt app : nonRunnableApps) {
-      Resources.addTo(usage, app.getResourceUsage());
+    readLock.lock();
+    try {
+      for (FSAppAttempt app : runnableApps) {
+        Resources.addTo(usage, app.getResourceUsage());
+      }
+      for (FSAppAttempt app : nonRunnableApps) {
+        Resources.addTo(usage, app.getResourceUsage());
+      }
+    } finally {
+      readLock.unlock();
     }
     return usage;
   }
@@ -164,17 +198,22 @@ public class FSLeafQueue extends FSQueue {
     Resource maxRes = scheduler.getAllocationConfiguration()
         .getMaxResources(getName());
     demand = Resources.createResource(0);
-    for (FSAppAttempt sched : runnableApps) {
-      if (Resources.equals(demand, maxRes)) {
-        break;
+    readLock.lock();
+    try {
+      for (FSAppAttempt sched : runnableApps) {
+        if (Resources.equals(demand, maxRes)) {
+          break;
+        }
+        updateDemandForApp(sched, maxRes);
       }
-      updateDemandForApp(sched, maxRes);
-    }
-    for (FSAppAttempt sched : nonRunnableApps) {
-      if (Resources.equals(demand, maxRes)) {
-        break;
+      for (FSAppAttempt sched : nonRunnableApps) {
+        if (Resources.equals(demand, maxRes)) {
+          break;
+        }
+        updateDemandForApp(sched, maxRes);
       }
-      updateDemandForApp(sched, maxRes);
+    } finally {
+      readLock.unlock();
     }
     if (LOG.isDebugEnabled()) {
       LOG.debug("The updated demand for " + getName() + " is " + demand
@@ -198,7 +237,8 @@ public class FSLeafQueue extends FSQueue {
   public Resource assignContainer(FSSchedulerNode node) {
     Resource assigned = Resources.none();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("Node " + node.getNodeName() + " offered to queue: " + getName());
+      LOG.debug("Node " + node.getNodeName() + " offered to queue: " +
+          getName());
     }
 
     if (!assignContainerPreCheck(node)) {
@@ -206,16 +246,26 @@ public class FSLeafQueue extends FSQueue {
     }
 
     Comparator<Schedulable> comparator = policy.getComparator();
-    Collections.sort(runnableApps, comparator);
-    for (FSAppAttempt sched : runnableApps) {
-      if (SchedulerAppUtils.isBlacklisted(sched, node, LOG)) {
-        continue;
-      }
-
-      assigned = sched.assignContainer(node);
-      if (!assigned.equals(Resources.none())) {
-        break;
+    writeLock.lock();
+    try {
+      Collections.sort(runnableApps, comparator);
+    } finally {
+      writeLock.unlock();
+    }
+    readLock.lock();
+    try {
+      for (FSAppAttempt sched : runnableApps) {
+        if (SchedulerAppUtils.isBlacklisted(sched, node, LOG)) {
+          continue;
+        }
+
+        assigned = sched.assignContainer(node);
+        if (!assigned.equals(Resources.none())) {
+          break;
+        }
       }
+    } finally {
+      readLock.unlock();
     }
     return assigned;
   }
@@ -237,11 +287,16 @@ public class FSLeafQueue extends FSQueue {
     // Choose the app that is most over fair share
     Comparator<Schedulable> comparator = policy.getComparator();
     FSAppAttempt candidateSched = null;
-    for (FSAppAttempt sched : runnableApps) {
-      if (candidateSched == null ||
-          comparator.compare(sched, candidateSched) > 0) {
-        candidateSched = sched;
+    readLock.lock();
+    try {
+      for (FSAppAttempt sched : runnableApps) {
+        if (candidateSched == null ||
+            comparator.compare(sched, candidateSched) > 0) {
+          candidateSched = sched;
+        }
       }
+    } finally {
+      readLock.unlock();
     }
 
     // Preempt from the selected app

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2e07a54/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
index 97736be..385ea0b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
@@ -28,12 +28,22 @@ import java.io.File;
 import java.io.FileWriter;
 import java.io.IOException;
 import java.io.PrintWriter;
+import java.util.ArrayList;
 import java.util.Collection;
-
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
@@ -222,4 +232,85 @@ public class TestFSLeafQueue extends FairSchedulerTestBase {
     assertFalse(queueB1.isStarvedForFairShare());
     assertFalse(queueB2.isStarvedForFairShare());
   }
+
+  @Test
+  public void testConcurrentAccess() {
+    conf.set(FairSchedulerConfiguration.ASSIGN_MULTIPLE, "false");
+    resourceManager = new MockRM(conf);
+    resourceManager.start();
+    scheduler = (FairScheduler) resourceManager.getResourceScheduler();
+
+    String queueName = "root.queue1";
+    final FSLeafQueue schedulable = scheduler.getQueueManager().
+      getLeafQueue(queueName, true);
+    ApplicationAttemptId applicationAttemptId = createAppAttemptId(1, 1);
+    RMContext rmContext = resourceManager.getRMContext();
+    final FSAppAttempt app =
+        new FSAppAttempt(scheduler, applicationAttemptId, "user1",
+            schedulable, null, rmContext);
+
+    // this needs to be in sync with the number of runnables declared below
+    int testThreads = 2;
+    List<Runnable> runnables = new ArrayList<Runnable>();
+
+    // add applications to modify the list
+    runnables.add(new Runnable() {
+      @Override
+      public void run() {
+        for (int i=0; i < 500; i++) {
+          schedulable.addAppSchedulable(app);
+        }
+      }
+    });
+
+    // iterate over the list a couple of times in a different thread
+    runnables.add(new Runnable() {
+      @Override
+      public void run() {
+        for (int i=0; i < 500; i++) {
+          schedulable.getResourceUsage();
+        }
+      }
+    });
+
+    final List<Throwable> exceptions = Collections.synchronizedList(
+        new ArrayList<Throwable>());
+    final ExecutorService threadPool = Executors.newFixedThreadPool(
+        testThreads);
+
+    try {
+      final CountDownLatch allExecutorThreadsReady =
+          new CountDownLatch(testThreads);
+      final CountDownLatch startBlocker = new CountDownLatch(1);
+      final CountDownLatch allDone = new CountDownLatch(testThreads);
+      for (final Runnable submittedTestRunnable : runnables) {
+        threadPool.submit(new Runnable() {
+          public void run() {
+            allExecutorThreadsReady.countDown();
+            try {
+              startBlocker.await();
+              submittedTestRunnable.run();
+            } catch (final Throwable e) {
+              exceptions.add(e);
+            } finally {
+              allDone.countDown();
+            }
+          }
+        });
+      }
+      // wait until all threads are ready
+      allExecutorThreadsReady.await();
+      // start all test runners
+      startBlocker.countDown();
+      int testTimeout = 2;
+      assertTrue("Timeout waiting for more than " + testTimeout + " seconds",
+          allDone.await(testTimeout, TimeUnit.SECONDS));
+    } catch (InterruptedException ie) {
+      exceptions.add(ie);
+    } finally {
+      threadPool.shutdownNow();
+    }
+    assertTrue("Test failed with exception(s)" + exceptions,
+        exceptions.isEmpty());
+  }
 }


[21/50] [abbrv] hadoop git commit: YARN-2437. start-yarn.sh/stop-yarn should give info (Varun Saxena via aw)

Posted by ka...@apache.org.
YARN-2437. start-yarn.sh/stop-yarn should give info (Varun Saxena via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59cb8b91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59cb8b91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59cb8b91

Branch: refs/heads/YARN-2139
Commit: 59cb8b9123fac725660fc7cfbaaad3d1aa3e3bd7
Parents: c536142
Author: Allen Wittenauer <aw...@apache.org>
Authored: Wed Dec 10 13:48:07 2014 -0800
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Dec 10 13:48:07 2014 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                   | 2 ++
 hadoop-yarn-project/hadoop-yarn/bin/start-yarn.sh | 2 ++
 hadoop-yarn-project/hadoop-yarn/bin/stop-yarn.sh  | 8 +++++---
 3 files changed, 9 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59cb8b91/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 81d5707..832efee 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -13,6 +13,8 @@ Trunk - Unreleased
     YARN-2472. yarn-daemons.sh should jsut call yarn directly (Masatake Iwasaki
     via aw)
 
+    YARN-2437. start-yarn.sh/stop-yarn should give info (Varun Saxena via aw)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59cb8b91/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.sh
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.sh b/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.sh
index f44b199..8cd5b54 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.sh
+++ b/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.sh
@@ -42,8 +42,10 @@ else
 fi
 
 # start resourceManager
+echo "Starting resourcemanager" 
 "${HADOOP_YARN_HOME}/bin/yarn" --config "${YARN_CONF_DIR}" --daemon start resourcemanager
 # start nodeManager
+echo "Starting nodemanagers" 
 "${bin}/yarn-daemons.sh" --config "${YARN_CONF_DIR}"  start nodemanager
 # start proxyserver
 #"${HADOOP_YARN_HOME}/bin/yarn" --config "${YARN_CONF_DIR}" --daemon start proxyserver

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59cb8b91/hadoop-yarn-project/hadoop-yarn/bin/stop-yarn.sh
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/stop-yarn.sh b/hadoop-yarn-project/hadoop-yarn/bin/stop-yarn.sh
index c807f52..e356e40 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/stop-yarn.sh
+++ b/hadoop-yarn-project/hadoop-yarn/bin/stop-yarn.sh
@@ -43,9 +43,11 @@ else
   exit 1
 fi
 
-# start resourceManager
+# stop resourceManager
+echo "Stopping resourcemanager"
 "${HADOOP_YARN_HOME}/bin/yarn" --config "${YARN_CONF_DIR}" --daemon stop resourcemanager
-# start nodeManager
+# stop nodeManager
+echo "Stopping nodemanagers"
 "${bin}/yarn-daemons.sh" --config "${YARN_CONF_DIR}"  stop nodemanager
-# start proxyserver
+# stop proxyserver
 #"${HADOOP_YARN_HOME}/bin/yarn" --config "${YARN_CONF_DIR}" --daemon stop proxyserver


[16/50] [abbrv] hadoop git commit: HADOOP-11381. Fix findbugs warnings in hadoop-distcp, hadoop-aws, hadoop-azure, and hadoop-openstack. Contributed by Li Lu.

Posted by ka...@apache.org.
HADOOP-11381. Fix findbugs warnings in hadoop-distcp, hadoop-aws, hadoop-azure, and hadoop-openstack. Contributed by Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e98ad34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e98ad34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e98ad34

Branch: refs/heads/YARN-2139
Commit: 2e98ad34ce64a9e5184c53447004de20a637f927
Parents: 195f31a
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Dec 9 20:45:21 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Dec 9 20:45:21 2014 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt               | 2 ++
 .../src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java | 4 ++++
 .../org/apache/hadoop/fs/azure/NativeAzureFileSystem.java     | 5 +++--
 .../java/org/apache/hadoop/fs/azure/SelfRenewingLease.java    | 7 ++++---
 .../java/org/apache/hadoop/tools/FileBasedCopyListing.java    | 4 +++-
 .../hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java   | 7 ++++---
 .../java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java  | 4 ++--
 7 files changed, 22 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e98ad34/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0019b3a..9065ff5 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -555,6 +555,8 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11349. RawLocalFileSystem leaks file descriptor while creating a
     file if creat succeeds but chmod fails. (Varun Saxena via Colin P. McCabe)
 
+    HADOOP-11381. Fix findbugs warnings in hadoop-distcp, hadoop-aws,
+    hadoop-azure, and hadoop-openstack. (Li Lu via wheat9)
 
 Release 2.6.0 - 2014-11-18
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e98ad34/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 6bdd233..457351d 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -875,6 +875,8 @@ public class S3AFileSystem extends FileSystem {
           case ProgressEvent.PART_COMPLETED_EVENT_CODE:
             statistics.incrementWriteOps(1);
             break;
+          default:
+            break;
         }
       }
     };
@@ -933,6 +935,8 @@ public class S3AFileSystem extends FileSystem {
           case ProgressEvent.PART_COMPLETED_EVENT_CODE:
             statistics.incrementWriteOps(1);
             break;
+          default:
+            break;
         }
       }
     };

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e98ad34/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index ad2e2e6..c136002 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -25,6 +25,7 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.nio.charset.Charset;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Date;
@@ -153,7 +154,7 @@ public class NativeAzureFileSystem extends FileSystem {
             "Error reading pending rename file contents -- "
                 + "maximum file size exceeded");
       }
-      String contents = new String(bytes, 0, l);
+      String contents = new String(bytes, 0, l, Charset.forName("UTF-8"));
 
       // parse the JSON
       ObjectMapper objMapper = new ObjectMapper();
@@ -253,7 +254,7 @@ public class NativeAzureFileSystem extends FileSystem {
       // Write file.
       try {
         output = fs.create(path);
-        output.write(contents.getBytes());
+        output.write(contents.getBytes(Charset.forName("UTF-8")));
       } catch (IOException e) {
         throw new IOException("Unable to write RenamePending file for folder rename from "
             + srcKey + " to " + dstKey, e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e98ad34/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
index 2d5c0c8..bda6006 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.fs.azure;
 
-import org.apache.commons.lang.exception.ExceptionUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper;
@@ -27,6 +26,8 @@ import com.microsoft.windowsazure.storage.AccessCondition;
 import com.microsoft.windowsazure.storage.StorageException;
 import com.microsoft.windowsazure.storage.blob.CloudBlob;
 
+import java.util.concurrent.atomic.AtomicInteger;
+
 /**
  * An Azure blob lease that automatically renews itself indefinitely
  * using a background thread. Use it to synchronize distributed processes,
@@ -56,7 +57,7 @@ public class SelfRenewingLease {
   private static final Log LOG = LogFactory.getLog(SelfRenewingLease.class);
 
   // Used to allocate thread serial numbers in thread name
-  private static volatile int threadNumber = 0;
+  private static AtomicInteger threadNumber = new AtomicInteger(0);
 
 
   // Time to wait to retry getting the lease in milliseconds
@@ -99,7 +100,7 @@ public class SelfRenewingLease {
 
     // A Renewer running should not keep JVM from exiting, so make it a daemon.
     renewer.setDaemon(true);
-    renewer.setName("AzureLeaseRenewer-" + threadNumber++);
+    renewer.setName("AzureLeaseRenewer-" + threadNumber.getAndIncrement());
     renewer.start();
     LOG.debug("Acquired lease " + leaseID + " on " + blob.getUri()
         + " managed by thread " + renewer.getName());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e98ad34/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/FileBasedCopyListing.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/FileBasedCopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/FileBasedCopyListing.java
index 0fe93c2..2bc343e 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/FileBasedCopyListing.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/FileBasedCopyListing.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.security.Credentials;
 import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.InputStreamReader;
+import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -74,7 +75,8 @@ public class FileBasedCopyListing extends CopyListing {
     FileSystem fs = sourceListing.getFileSystem(getConf());
     BufferedReader input = null;
     try {
-      input = new BufferedReader(new InputStreamReader(fs.open(sourceListing)));
+      input = new BufferedReader(new InputStreamReader(fs.open(sourceListing),
+          Charset.forName("UTF-8")));
       String line = input.readLine();
       while (line != null) {
         result.add(new Path(line));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e98ad34/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java
index b3e6b94..0138eae 100644
--- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystemStore.java
@@ -45,6 +45,7 @@ import java.io.InputStream;
 import java.io.InterruptedIOException;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.nio.charset.Charset;
 import java.text.ParseException;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
@@ -352,8 +353,8 @@ public class SwiftNativeFileSystemStore {
     final CollectionType collectionType = JSONUtil.getJsonMapper().getTypeFactory().
             constructCollectionType(List.class, SwiftObjectFileStatus.class);
 
-    final List<SwiftObjectFileStatus> fileStatusList =
-            JSONUtil.toObject(new String(bytes), collectionType);
+    final List<SwiftObjectFileStatus> fileStatusList = JSONUtil.toObject(
+        new String(bytes, Charset.forName("UTF-8")), collectionType);
 
     //this can happen if user lists file /data/files/file
     //in this case swift will return empty array
@@ -447,7 +448,7 @@ public class SwiftNativeFileSystemStore {
       //no object location, return an empty list
       return new LinkedList<URI>();
     }
-    return extractUris(new String(objectLocation), path);
+    return extractUris(new String(objectLocation, Charset.forName("UTF-8")), path);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e98ad34/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java
index 7e850e7..c9e26ac 100644
--- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java
+++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/util/SwiftTestUtils.java
@@ -219,9 +219,9 @@ public class SwiftTestUtils extends org.junit.Assert {
         byte actual = dest[i];
         byte expected = src[i];
         String letter = toChar(actual);
-        String line = String.format("[%04d] %2x %s\n", i, actual, letter);
+        String line = String.format("[%04d] %2x %s%n", i, actual, letter);
         if (expected != actual) {
-          line = String.format("[%04d] %2x %s -expected %2x %s\n",
+          line = String.format("[%04d] %2x %s -expected %2x %s%n",
                                i,
                                actual,
                                letter,


[06/50] [abbrv] hadoop git commit: HADOOP-11273. TestMiniKdc failure: login options not compatible with IBM JDK. Contributed by Gao Zhong Liang.

Posted by ka...@apache.org.
HADOOP-11273. TestMiniKdc failure: login options not compatible with IBM JDK. Contributed by Gao Zhong Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d08fc9ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d08fc9ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d08fc9ac

Branch: refs/heads/YARN-2139
Commit: d08fc9aca807af5a240f5e1904d9c0ba027196b8
Parents: d8352b9
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Dec 9 10:57:32 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Tue Dec 9 10:58:34 2014 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../org/apache/hadoop/minikdc/TestMiniKdc.java  | 22 +++++++++++++-------
 2 files changed, 17 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d08fc9ac/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4b23471..b030bf7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -545,6 +545,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11372. Fix new findbugs warnings in mapreduce-examples.
     (Li Lu via wheat9)
 
+    HADOOP-11273. TestMiniKdc failure: login options not compatible with IBM
+    JDK. (Gao Zhong Liang via wheat9)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d08fc9ac/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java
index c052bb1..fac7f0f 100644
--- a/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java
+++ b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java
@@ -37,7 +37,8 @@ import java.util.HashMap;
 import java.util.Arrays;
 
 public class TestMiniKdc extends KerberosSecurityTestcase {
-
+  private static final boolean IBM_JAVA = System.getProperty("java.vendor")
+      .contains("IBM");
   @Test
   public void testMiniKdcStart() {
     MiniKdc kdc = getKdc();
@@ -94,15 +95,20 @@ public class TestMiniKdc extends KerberosSecurityTestcase {
     @Override
     public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
       Map<String, String> options = new HashMap<String, String>();
-      options.put("keyTab", keytab);
       options.put("principal", principal);
-      options.put("useKeyTab", "true");
-      options.put("storeKey", "true");
-      options.put("doNotPrompt", "true");
-      options.put("useTicketCache", "true");
-      options.put("renewTGT", "true");
       options.put("refreshKrb5Config", "true");
-      options.put("isInitiator", Boolean.toString(isInitiator));
+      if (IBM_JAVA) {
+        options.put("useKeytab", keytab);
+        options.put("credsType", "both");
+      } else {
+        options.put("keyTab", keytab);
+        options.put("useKeyTab", "true");
+        options.put("storeKey", "true");
+        options.put("doNotPrompt", "true");
+        options.put("useTicketCache", "true");
+        options.put("renewTGT", "true");
+        options.put("isInitiator", Boolean.toString(isInitiator));
+      }
       String ticketCache = System.getenv("KRB5CCNAME");
       if (ticketCache != null) {
         options.put("ticketCache", ticketCache);


[07/50] [abbrv] hadoop git commit: HDFS-7498. Simplify the logic in INodesInPath. Contributed by Jing Zhao.

Posted by ka...@apache.org.
HDFS-7498. Simplify the logic in INodesInPath. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5776a41d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5776a41d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5776a41d

Branch: refs/heads/YARN-2139
Commit: 5776a41da08af653206bb94d7c76c9c4dcce059a
Parents: d08fc9a
Author: Jing Zhao <ji...@apache.org>
Authored: Tue Dec 9 11:37:39 2014 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Tue Dec 9 11:37:39 2014 -0800

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/fs/Path.java    |   1 -
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 .../java/org/apache/hadoop/hdfs/DFSUtil.java    |  15 +-
 .../server/namenode/EncryptionZoneManager.java  |  10 +-
 .../hdfs/server/namenode/FSDirConcatOp.java     |   5 +-
 .../hdfs/server/namenode/FSDirMkdirOp.java      |  42 ++--
 .../hdfs/server/namenode/FSDirRenameOp.java     |  51 ++--
 .../hdfs/server/namenode/FSDirSnapshotOp.java   |   2 +-
 .../server/namenode/FSDirStatAndListingOp.java  |  10 +-
 .../hdfs/server/namenode/FSDirectory.java       |  95 ++++----
 .../hdfs/server/namenode/FSEditLogLoader.java   |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java      |   8 +-
 .../server/namenode/FSPermissionChecker.java    |  51 ++--
 .../hdfs/server/namenode/INodesInPath.java      | 240 +++++++++----------
 .../server/namenode/TestSnapshotPathINodes.java | 134 +++++------
 15 files changed, 320 insertions(+), 350 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5776a41d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
index 54ddeda..caeb7a1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
@@ -60,7 +60,6 @@ public class Path implements Comparable {
 
   /**
    * Pathnames with scheme and relative path are illegal.
-   * @param path to be checked
    */
   void checkNotSchemeWithRelative() {
     if (toUri().isAbsolute() && !isUriPathAbsolute()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5776a41d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 626d90a..9398429 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -447,6 +447,8 @@ Release 2.7.0 - UNRELEASED
     HDFS-7486. Consolidate XAttr-related implementation into a single class.
     (wheat9)
 
+    HDFS-7498. Simplify the logic in INodesInPath. (jing9)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5776a41d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index f1bfcb4..8b3f512 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -341,15 +341,20 @@ public class DFSUtil {
   /**
    * Given a list of path components returns a path as a UTF8 String
    */
-  public static String byteArray2PathString(byte[][] pathComponents) {
+  public static String byteArray2PathString(byte[][] pathComponents,
+      int offset, int length) {
     if (pathComponents.length == 0) {
       return "";
-    } else if (pathComponents.length == 1
+    }
+    Preconditions.checkArgument(offset >= 0 && offset < pathComponents.length);
+    Preconditions.checkArgument(length >= 0 && offset + length <=
+        pathComponents.length);
+    if (pathComponents.length == 1
         && (pathComponents[0] == null || pathComponents[0].length == 0)) {
       return Path.SEPARATOR;
     }
     StringBuilder result = new StringBuilder();
-    for (int i = 0; i < pathComponents.length; i++) {
+    for (int i = offset; i < offset + length; i++) {
       result.append(new String(pathComponents[i], Charsets.UTF_8));
       if (i < pathComponents.length - 1) {
         result.append(Path.SEPARATOR_CHAR);
@@ -358,6 +363,10 @@ public class DFSUtil {
     return result.toString();
   }
 
+  public static String byteArray2PathString(byte[][] pathComponents) {
+    return byteArray2PathString(pathComponents, 0, pathComponents.length);
+  }
+
   /**
    * Converts a list of path components into a path using Path.SEPARATOR.
    * 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5776a41d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index faab1f0..5c4f39d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -199,9 +199,9 @@ public class EncryptionZoneManager {
   private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) {
     assert dir.hasReadLock();
     Preconditions.checkNotNull(iip);
-    final INode[] inodes = iip.getINodes();
-    for (int i = inodes.length - 1; i >= 0; i--) {
-      final INode inode = inodes[i];
+    List<INode> inodes = iip.getReadOnlyINodes();
+    for (int i = inodes.size() - 1; i >= 0; i--) {
+      final INode inode = inodes.get(i);
       if (inode != null) {
         final EncryptionZoneInt ezi = encryptionZones.get(inode.getId());
         if (ezi != null) {
@@ -259,9 +259,7 @@ public class EncryptionZoneManager {
       }
     }
 
-    if (srcInEZ || dstInEZ) {
-      Preconditions.checkState(srcEZI != null, "couldn't find src EZ?");
-      Preconditions.checkState(dstEZI != null, "couldn't find dst EZ?");
+    if (srcInEZ) {
       if (srcEZI != dstEZI) {
         final String srcEZPath = getFullPathName(srcEZI);
         final String dstEZPath = getFullPathName(dstEZI);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5776a41d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index c2e0f08..f7e57be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -187,9 +187,8 @@ class FSDirConcatOp {
     // do the move
 
     final INodesInPath trgIIP = fsd.getINodesInPath4Write(target, true);
-    final INode[] trgINodes = trgIIP.getINodes();
     final INodeFile trgInode = trgIIP.getLastINode().asFile();
-    INodeDirectory trgParent = trgINodes[trgINodes.length-2].asDirectory();
+    INodeDirectory trgParent = trgIIP.getINode(-2).asDirectory();
     final int trgLatestSnapshot = trgIIP.getLatestSnapshotId();
 
     final INodeFile [] allSrcInodes = new INodeFile[srcs.length];
@@ -229,6 +228,6 @@ class FSDirConcatOp {
     trgInode.setModificationTime(timestamp, trgLatestSnapshot);
     trgParent.updateModificationTime(timestamp, trgLatestSnapshot);
     // update quota on the parent directory ('count' files removed, 0 space)
-    FSDirectory.unprotectedUpdateCount(trgIIP, trgINodes.length - 1, -count, 0);
+    FSDirectory.unprotectedUpdateCount(trgIIP, trgIIP.length() - 1, -count, 0);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5776a41d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index af9e925..c8c5cb2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -85,12 +85,11 @@ class FSDirMkdirOp {
       throws QuotaExceededException, UnresolvedLinkException, AclException {
     assert fsd.hasWriteLock();
     byte[][] components = INode.getPathComponents(src);
-    INodesInPath iip = fsd.getExistingPathINodes(components);
-    INode[] inodes = iip.getINodes();
-    final int pos = inodes.length - 1;
-    unprotectedMkdir(fsd, inodeId, iip, pos, components[pos], permissions,
-        aclEntries, timestamp);
-    return inodes[pos];
+    final INodesInPath iip = fsd.getExistingPathINodes(components);
+    final int pos = iip.length() - 1;
+    final INodesInPath newiip = unprotectedMkdir(fsd, inodeId, iip, pos,
+        components[pos], permissions, aclEntries, timestamp);
+    return newiip.getINode(pos);
   }
 
   /**
@@ -129,17 +128,17 @@ class FSDirMkdirOp {
         throw new SnapshotAccessControlException(
                 "Modification on RO snapshot is disallowed");
       }
-      INode[] inodes = iip.getINodes();
-
+      final int length = iip.length();
       // find the index of the first null in inodes[]
       StringBuilder pathbuilder = new StringBuilder();
       int i = 1;
-      for(; i < inodes.length && inodes[i] != null; i++) {
+      INode curNode;
+      for(; i < length && (curNode = iip.getINode(i)) != null; i++) {
         pathbuilder.append(Path.SEPARATOR).append(names[i]);
-        if (!inodes[i].isDirectory()) {
+        if (!curNode.isDirectory()) {
           throw new FileAlreadyExistsException(
                   "Parent path is not a directory: "
-                  + pathbuilder + " "+inodes[i].getLocalName());
+                  + pathbuilder + " " + curNode.getLocalName());
         }
       }
 
@@ -152,8 +151,8 @@ class FSDirMkdirOp {
         // if inheriting (ie. creating a file or symlink), use the parent dir,
         // else the supplied permissions
         // NOTE: the permissions of the auto-created directories violate posix
-        FsPermission parentFsPerm = inheritPermission
-                ? inodes[i-1].getFsPermission() : permissions.getPermission();
+        FsPermission parentFsPerm = inheritPermission ?
+            iip.getINode(i-1).getFsPermission() : permissions.getPermission();
 
         // ensure that the permissions allow user write+execute
         if (!parentFsPerm.getUserAction().implies(FsAction.WRITE_EXECUTE)) {
@@ -176,11 +175,12 @@ class FSDirMkdirOp {
       }
 
       // create directories beginning from the first null index
-      for(; i < inodes.length; i++) {
+      for(; i < length; i++) {
         pathbuilder.append(Path.SEPARATOR).append(names[i]);
-        unprotectedMkdir(fsd, fsd.allocateNewInodeId(), iip, i, components[i],
-            (i < lastInodeIndex) ? parentPermissions : permissions, null, now);
-        if (inodes[i] == null) {
+        iip = unprotectedMkdir(fsd, fsd.allocateNewInodeId(), iip, i,
+            components[i], (i < lastInodeIndex) ? parentPermissions :
+                permissions, null, now);
+        if (iip.getINode(i) == null) {
           return false;
         }
         // Directory creation also count towards FilesCreated
@@ -188,7 +188,7 @@ class FSDirMkdirOp {
         NameNode.getNameNodeMetrics().incrFilesCreated();
 
         final String cur = pathbuilder.toString();
-        fsd.getEditLog().logMkDir(cur, inodes[i]);
+        fsd.getEditLog().logMkDir(cur, iip.getINode(i));
         if(NameNode.stateChangeLog.isDebugEnabled()) {
           NameNode.stateChangeLog.debug(
                   "mkdirs: created directory " + cur);
@@ -219,7 +219,7 @@ class FSDirMkdirOp {
    * The parent path to the directory is at [0, pos-1].
    * All ancestors exist. Newly created one stored at index pos.
    */
-  private static void unprotectedMkdir(
+  private static INodesInPath unprotectedMkdir(
       FSDirectory fsd, long inodeId, INodesInPath inodesInPath, int pos,
       byte[] name, PermissionStatus permission, List<AclEntry> aclEntries,
       long timestamp)
@@ -231,7 +231,9 @@ class FSDirMkdirOp {
       if (aclEntries != null) {
         AclStorage.updateINodeAcl(dir, aclEntries, Snapshot.CURRENT_STATE_ID);
       }
-      inodesInPath.setINode(pos, dir);
+      return INodesInPath.replace(inodesInPath, pos, dir);
+    } else {
+      return inodesInPath;
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5776a41d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index 9f3983a..511de7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.FSLimitException;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
@@ -42,6 +41,8 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 
+import static org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
+import static org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
 import static org.apache.hadoop.util.Time.now;
 
 class FSDirRenameOp {
@@ -77,44 +78,40 @@ class FSDirRenameOp {
    * Verify quota for rename operation where srcInodes[srcInodes.length-1] moves
    * dstInodes[dstInodes.length-1]
    */
-  static void verifyQuotaForRename(FSDirectory fsd,
-      INode[] src, INode[] dst)
-      throws QuotaExceededException {
+  private static void verifyQuotaForRename(FSDirectory fsd, INodesInPath src,
+      INodesInPath dst) throws QuotaExceededException {
     if (!fsd.getFSNamesystem().isImageLoaded() || fsd.shouldSkipQuotaChecks()) {
       // Do not check quota if edits log is still being processed
       return;
     }
     int i = 0;
-    while(src[i] == dst[i]) { i++; }
+    while(src.getINode(i) == dst.getINode(i)) { i++; }
     // src[i - 1] is the last common ancestor.
 
-    final Quota.Counts delta = src[src.length - 1].computeQuotaUsage();
+    final Quota.Counts delta = src.getLastINode().computeQuotaUsage();
 
     // Reduce the required quota by dst that is being removed
-    final int dstIndex = dst.length - 1;
-    if (dst[dstIndex] != null) {
-      delta.subtract(dst[dstIndex].computeQuotaUsage());
+    final INode dstINode = dst.getLastINode();
+    if (dstINode != null) {
+      delta.subtract(dstINode.computeQuotaUsage());
     }
-    FSDirectory.verifyQuota(dst, dstIndex, delta.get(Quota.NAMESPACE),
-        delta.get(Quota.DISKSPACE), src[i - 1]);
+    FSDirectory.verifyQuota(dst, dst.length() - 1, delta.get(Quota.NAMESPACE),
+        delta.get(Quota.DISKSPACE), src.getINode(i - 1));
   }
 
   /**
    * Checks file system limits (max component length and max directory items)
    * during a rename operation.
    */
-  static void verifyFsLimitsForRename(FSDirectory fsd,
-      INodesInPath srcIIP,
+  static void verifyFsLimitsForRename(FSDirectory fsd, INodesInPath srcIIP,
       INodesInPath dstIIP)
-      throws FSLimitException.PathComponentTooLongException,
-          FSLimitException.MaxDirectoryItemsExceededException {
+      throws PathComponentTooLongException, MaxDirectoryItemsExceededException {
     byte[] dstChildName = dstIIP.getLastLocalName();
-    INode[] dstInodes = dstIIP.getINodes();
-    int pos = dstInodes.length - 1;
-    fsd.verifyMaxComponentLength(dstChildName, dstInodes, pos);
+    final String parentPath = dstIIP.getParentPath();
+    fsd.verifyMaxComponentLength(dstChildName, parentPath);
     // Do not enforce max directory items if renaming within same directory.
     if (srcIIP.getINode(-2) != dstIIP.getINode(-2)) {
-      fsd.verifyMaxDirItems(dstInodes, pos);
+      fsd.verifyMaxDirItems(dstIIP.getINode(-2).asDirectory(), parentPath);
     }
   }
 
@@ -176,7 +173,7 @@ class FSDirRenameOp {
     fsd.ezManager.checkMoveValidity(srcIIP, dstIIP, src);
     // Ensure dst has quota to accommodate rename
     verifyFsLimitsForRename(fsd, srcIIP, dstIIP);
-    verifyQuotaForRename(fsd, srcIIP.getINodes(), dstIIP.getINodes());
+    verifyQuotaForRename(fsd, srcIIP, dstIIP);
 
     RenameOperation tx = new RenameOperation(fsd, src, dst, srcIIP, dstIIP);
 
@@ -184,7 +181,7 @@ class FSDirRenameOp {
 
     try {
       // remove src
-      final long removedSrc = fsd.removeLastINode(srcIIP);
+      final long removedSrc = fsd.removeLastINode(tx.srcIIP);
       if (removedSrc == -1) {
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
             + "failed to rename " + src + " to " + dst + " because the source" +
@@ -326,7 +323,7 @@ class FSDirRenameOp {
     validateDestination(src, dst, srcInode);
 
     INodesInPath dstIIP = fsd.getINodesInPath4Write(dst, false);
-    if (dstIIP.getINodes().length == 1) {
+    if (dstIIP.length() == 1) {
       error = "rename destination cannot be the root";
       NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " +
           error);
@@ -357,12 +354,12 @@ class FSDirRenameOp {
 
     // Ensure dst has quota to accommodate rename
     verifyFsLimitsForRename(fsd, srcIIP, dstIIP);
-    verifyQuotaForRename(fsd, srcIIP.getINodes(), dstIIP.getINodes());
+    verifyQuotaForRename(fsd, srcIIP, dstIIP);
 
     RenameOperation tx = new RenameOperation(fsd, src, dst, srcIIP, dstIIP);
 
     boolean undoRemoveSrc = true;
-    final long removedSrc = fsd.removeLastINode(srcIIP);
+    final long removedSrc = fsd.removeLastINode(tx.srcIIP);
     if (removedSrc == -1) {
       error = "Failed to rename " + src + " to " + dst +
           " because the source can not be removed";
@@ -594,7 +591,7 @@ class FSDirRenameOp {
           + error);
       throw new FileNotFoundException(error);
     }
-    if (srcIIP.getINodes().length == 1) {
+    if (srcIIP.length() == 1) {
       error = "rename source cannot be the root";
       NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
           + error);
@@ -624,7 +621,6 @@ class FSDirRenameOp {
                     INodesInPath srcIIP, INodesInPath dstIIP)
         throws QuotaExceededException {
       this.fsd = fsd;
-      this.srcIIP = srcIIP;
       this.dstIIP = dstIIP;
       this.src = src;
       this.dst = dst;
@@ -652,7 +648,7 @@ class FSDirRenameOp {
                 srcChild, srcIIP.getLatestSnapshotId());
         withCount = (INodeReference.WithCount) withName.getReferredINode();
         srcChild = withName;
-        srcIIP.setLastINode(srcChild);
+        srcIIP = INodesInPath.replace(srcIIP, srcIIP.length() - 1, srcChild);
         // get the counts before rename
         withCount.getReferredINode().computeQuotaUsage(oldSrcCounts, true);
       } else if (srcChildIsReference) {
@@ -662,6 +658,7 @@ class FSDirRenameOp {
       } else {
         withCount = null;
       }
+      this.srcIIP = srcIIP;
     }
 
     boolean addSourceToDestination() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5776a41d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
index f295e06..ea7dc24 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
@@ -45,7 +45,7 @@ class FSDirSnapshotOp {
     }
     final byte[] bytes = DFSUtil.string2Bytes(snapshotName);
     fsd.verifyINodeName(bytes);
-    fsd.verifyMaxComponentLength(bytes, path, 0);
+    fsd.verifyMaxComponentLength(bytes, path);
   }
 
   /** Allow snapshot on a directory. */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5776a41d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index a8c3c16..2e7ed6b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -122,9 +122,7 @@ class FSDirStatAndListingOp {
     if (fsd.isPermissionEnabled()) {
       fsd.checkTraverse(pc, iip);
     }
-    INode[] inodes = iip.getINodes();
-    return !INodeFile.valueOf(inodes[inodes.length - 1],
-        src).isUnderConstruction();
+    return !INodeFile.valueOf(iip.getLastINode(), src).isUnderConstruction();
   }
 
   static ContentSummary getContentSummary(
@@ -167,9 +165,8 @@ class FSDirStatAndListingOp {
         return getSnapshotsListing(fsd, srcs, startAfter);
       }
       final INodesInPath inodesInPath = fsd.getINodesInPath(srcs, true);
-      final INode[] inodes = inodesInPath.getINodes();
       final int snapshot = inodesInPath.getPathSnapshotId();
-      final INode targetNode = inodes[inodes.length - 1];
+      final INode targetNode = inodesInPath.getLastINode();
       if (targetNode == null)
         return null;
       byte parentStoragePolicy = isSuperUser ?
@@ -278,8 +275,7 @@ class FSDirStatAndListingOp {
         return getFileInfo4DotSnapshot(fsd, srcs);
       }
       final INodesInPath inodesInPath = fsd.getINodesInPath(srcs, resolveLink);
-      final INode[] inodes = inodesInPath.getINodes();
-      final INode i = inodes[inodes.length - 1];
+      final INode i = inodesInPath.getLastINode();
       byte policyId = includeStoragePolicy && i != null && !i.isSymlink() ?
           i.getStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED;
       return i == null ? null : createFileStatus(fsd,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5776a41d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index e802627..81b0eb6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -642,8 +642,7 @@ public class FSDirectory implements Closeable {
    * @param path the file path
    * @return the block size of the file. 
    */
-  long getPreferredBlockSize(String path) throws UnresolvedLinkException,
-      FileNotFoundException, IOException {
+  long getPreferredBlockSize(String path) throws IOException {
     readLock();
     try {
       return INodeFile.valueOf(getNode(path, false), path
@@ -740,15 +739,13 @@ public class FSDirectory implements Closeable {
   
   private static boolean deleteAllowed(final INodesInPath iip,
       final String src) {
-    final INode[] inodes = iip.getINodes(); 
-    if (inodes == null || inodes.length == 0
-        || inodes[inodes.length - 1] == null) {
+    if (iip.length() < 1 || iip.getLastINode() == null) {
       if(NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
             + "failed to remove " + src + " because it does not exist");
       }
       return false;
-    } else if (inodes.length == 1) { // src is the root
+    } else if (iip.length() == 1) { // src is the root
       NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: "
           + "failed to remove " + src
           + " because the root is not allowed to be deleted");
@@ -763,8 +760,7 @@ public class FSDirectory implements Closeable {
   boolean isNonEmptyDirectory(INodesInPath inodesInPath) {
     readLock();
     try {
-      final INode[] inodes = inodesInPath.getINodes();
-      final INode inode = inodes[inodes.length - 1];
+      final INode inode = inodesInPath.getLastINode();
       if (inode == null || !inode.isDirectory()) {
         //not found or not a directory
         return false;
@@ -991,7 +987,7 @@ public class FSDirectory implements Closeable {
   
   private void updateCount(INodesInPath iip, long nsDelta, long dsDelta,
       boolean checkQuota) throws QuotaExceededException {
-    updateCount(iip, iip.getINodes().length - 1, nsDelta, dsDelta, checkQuota);
+    updateCount(iip, iip.length() - 1, nsDelta, dsDelta, checkQuota);
   }
 
   /** update count of each inode with quota
@@ -1011,12 +1007,11 @@ public class FSDirectory implements Closeable {
       //still initializing. do not check or update quotas.
       return;
     }
-    final INode[] inodes = iip.getINodes();
-    if (numOfINodes > inodes.length) {
-      numOfINodes = inodes.length;
+    if (numOfINodes > iip.length()) {
+      numOfINodes = iip.length();
     }
     if (checkQuota && !skipQuotaCheck) {
-      verifyQuota(inodes, numOfINodes, nsDelta, dsDelta, null);
+      verifyQuota(iip, numOfINodes, nsDelta, dsDelta, null);
     }
     unprotectedUpdateCount(iip, numOfINodes, nsDelta, dsDelta);
   }
@@ -1039,11 +1034,11 @@ public class FSDirectory implements Closeable {
    * updates quota without verification
    * callers responsibility is to make sure quota is not exceeded
    */
-  static void unprotectedUpdateCount(INodesInPath inodesInPath, int numOfINodes, long nsDelta, long dsDelta) {
-    final INode[] inodes = inodesInPath.getINodes();
+  static void unprotectedUpdateCount(INodesInPath inodesInPath,
+      int numOfINodes, long nsDelta, long dsDelta) {
     for(int i=0; i < numOfINodes; i++) {
-      if (inodes[i].isQuotaSet()) { // a directory with quota
-        inodes[i].asDirectory().getDirectoryWithQuotaFeature()
+      if (inodesInPath.getINode(i).isQuotaSet()) { // a directory with quota
+        inodesInPath.getINode(i).asDirectory().getDirectoryWithQuotaFeature()
             .addSpaceConsumed2Cache(nsDelta, dsDelta);
       }
     }
@@ -1105,14 +1100,15 @@ public class FSDirectory implements Closeable {
    * @param src The full path name of the child node.
    * @throws QuotaExceededException is thrown if it violates quota limit
    */
-  private boolean addINode(String src, INode child
-      ) throws QuotaExceededException, UnresolvedLinkException {
+  private boolean addINode(String src, INode child)
+      throws QuotaExceededException, UnresolvedLinkException {
     byte[][] components = INode.getPathComponents(src);
     child.setLocalName(components[components.length-1]);
     cacheName(child);
     writeLock();
     try {
-      return addLastINode(getExistingPathINodes(components), child, true);
+      final INodesInPath iip = getExistingPathINodes(components);
+      return addLastINode(iip, child, true);
     } finally {
       writeUnlock();
     }
@@ -1122,7 +1118,7 @@ public class FSDirectory implements Closeable {
    * Verify quota for adding or moving a new INode with required 
    * namespace and diskspace to a given position.
    *  
-   * @param inodes INodes corresponding to a path
+   * @param iip INodes corresponding to a path
    * @param pos position where a new INode will be added
    * @param nsDelta needed namespace
    * @param dsDelta needed diskspace
@@ -1131,7 +1127,7 @@ public class FSDirectory implements Closeable {
    *          Pass null if a node is not being moved.
    * @throws QuotaExceededException if quota limit is exceeded.
    */
-  static void verifyQuota(INode[] inodes, int pos, long nsDelta,
+  static void verifyQuota(INodesInPath iip, int pos, long nsDelta,
       long dsDelta, INode commonAncestor) throws QuotaExceededException {
     if (nsDelta <= 0 && dsDelta <= 0) {
       // if quota is being freed or not being consumed
@@ -1139,18 +1135,20 @@ public class FSDirectory implements Closeable {
     }
 
     // check existing components in the path
-    for(int i = (pos > inodes.length? inodes.length: pos) - 1; i >= 0; i--) {
-      if (commonAncestor == inodes[i]) {
+    for(int i = (pos > iip.length() ? iip.length(): pos) - 1; i >= 0; i--) {
+      if (commonAncestor == iip.getINode(i)) {
         // Stop checking for quota when common ancestor is reached
         return;
       }
       final DirectoryWithQuotaFeature q
-          = inodes[i].asDirectory().getDirectoryWithQuotaFeature();
+          = iip.getINode(i).asDirectory().getDirectoryWithQuotaFeature();
       if (q != null) { // a directory with quota
         try {
           q.verifyQuota(nsDelta, dsDelta);
         } catch (QuotaExceededException e) {
-          e.setPathName(getFullPathName(inodes, i));
+          List<INode> inodes = iip.getReadOnlyINodes();
+          final String path = getFullPathName(inodes.toArray(new INode[inodes.size()]), i);
+          e.setPathName(path);
           throw e;
         }
       }
@@ -1172,22 +1170,20 @@ public class FSDirectory implements Closeable {
    * Verify child's name for fs limit.
    *
    * @param childName byte[] containing new child name
-   * @param parentPath Object either INode[] or String containing parent path
-   * @param pos int position of new child in path
+   * @param parentPath String containing parent path
    * @throws PathComponentTooLongException child's name is too long.
    */
-  void verifyMaxComponentLength(byte[] childName, Object parentPath,
-      int pos) throws PathComponentTooLongException {
+  void verifyMaxComponentLength(byte[] childName, String parentPath)
+      throws PathComponentTooLongException {
     if (maxComponentLength == 0) {
       return;
     }
 
     final int length = childName.length;
     if (length > maxComponentLength) {
-      final String p = parentPath instanceof INode[]?
-          getFullPathName((INode[])parentPath, pos - 1): (String)parentPath;
       final PathComponentTooLongException e = new PathComponentTooLongException(
-          maxComponentLength, length, p, DFSUtil.bytes2String(childName));
+          maxComponentLength, length, parentPath,
+          DFSUtil.bytes2String(childName));
       if (namesystem.isImageLoaded()) {
         throw e;
       } else {
@@ -1200,20 +1196,16 @@ public class FSDirectory implements Closeable {
   /**
    * Verify children size for fs limit.
    *
-   * @param pathComponents INode[] containing full path of inodes to new child
-   * @param pos int position of new child in pathComponents
    * @throws MaxDirectoryItemsExceededException too many children.
    */
-  void verifyMaxDirItems(INode[] pathComponents, int pos)
+  void verifyMaxDirItems(INodeDirectory parent, String parentPath)
       throws MaxDirectoryItemsExceededException {
-
-    final INodeDirectory parent = pathComponents[pos-1].asDirectory();
     final int count = parent.getChildrenList(Snapshot.CURRENT_STATE_ID).size();
     if (count >= maxDirItems) {
       final MaxDirectoryItemsExceededException e
           = new MaxDirectoryItemsExceededException(maxDirItems, count);
       if (namesystem.isImageLoaded()) {
-        e.setPathName(getFullPathName(pathComponents, pos - 1));
+        e.setPathName(parentPath);
         throw e;
       } else {
         // Do not throw if edits log is still being processed
@@ -1227,9 +1219,9 @@ public class FSDirectory implements Closeable {
    * The same as {@link #addChild(INodesInPath, int, INode, boolean)}
    * with pos = length - 1.
    */
-  private boolean addLastINode(INodesInPath inodesInPath,
-      INode inode, boolean checkQuota) throws QuotaExceededException {
-    final int pos = inodesInPath.getINodes().length - 1;
+  private boolean addLastINode(INodesInPath inodesInPath, INode inode,
+      boolean checkQuota) throws QuotaExceededException {
+    final int pos = inodesInPath.length() - 1;
     return addChild(inodesInPath, pos, inode, checkQuota);
   }
 
@@ -1241,18 +1233,18 @@ public class FSDirectory implements Closeable {
    */
   boolean addChild(INodesInPath iip, int pos, INode child, boolean checkQuota)
       throws QuotaExceededException {
-    final INode[] inodes = iip.getINodes();
     // Disallow creation of /.reserved. This may be created when loading
     // editlog/fsimage during upgrade since /.reserved was a valid name in older
     // release. This may also be called when a user tries to create a file
     // or directory /.reserved.
-    if (pos == 1 && inodes[0] == rootDir && isReservedName(child)) {
+    if (pos == 1 && iip.getINode(0) == rootDir && isReservedName(child)) {
       throw new HadoopIllegalArgumentException(
           "File name \"" + child.getLocalName() + "\" is reserved and cannot "
               + "be created. If this is during upgrade change the name of the "
               + "existing file or directory to another name before upgrading "
               + "to the new release.");
     }
+    final INodeDirectory parent = iip.getINode(pos-1).asDirectory();
     // The filesystem limits are not really quotas, so this check may appear
     // odd. It's because a rename operation deletes the src, tries to add
     // to the dest, if that fails, re-adds the src from whence it came.
@@ -1260,8 +1252,9 @@ public class FSDirectory implements Closeable {
     // original location becase a quota violation would cause the the item
     // to go "poof".  The fs limits must be bypassed for the same reason.
     if (checkQuota) {
-      verifyMaxComponentLength(child.getLocalNameBytes(), inodes, pos);
-      verifyMaxDirItems(inodes, pos);
+      final String parentPath = iip.getPath(pos - 1);
+      verifyMaxComponentLength(child.getLocalNameBytes(), parentPath);
+      verifyMaxDirItems(parent, parentPath);
     }
     // always verify inode name
     verifyINodeName(child.getLocalNameBytes());
@@ -1270,7 +1263,6 @@ public class FSDirectory implements Closeable {
     updateCount(iip, pos,
         counts.get(Quota.NAMESPACE), counts.get(Quota.DISKSPACE), checkQuota);
     boolean isRename = (child.getParent() != null);
-    final INodeDirectory parent = inodes[pos-1].asDirectory();
     boolean added;
     try {
       added = parent.addChild(child, true, iip.getLatestSnapshotId());
@@ -1283,7 +1275,6 @@ public class FSDirectory implements Closeable {
       updateCountNoQuotaCheck(iip, pos,
           -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
     } else {
-      iip.setINode(pos - 1, child.getParent());
       if (!isRename) {
         AclStorage.copyINodeDefaultAcl(child);
       }
@@ -1320,7 +1311,7 @@ public class FSDirectory implements Closeable {
     
     if (!last.isInLatestSnapshot(latestSnapshot)) {
       final Quota.Counts counts = last.computeQuotaUsage();
-      updateCountNoQuotaCheck(iip, iip.getINodes().length - 1,
+      updateCountNoQuotaCheck(iip, iip.length() - 1,
           -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));
 
       if (INodeReference.tryRemoveReference(last) > 0) {
@@ -1715,10 +1706,10 @@ public class FSDirectory implements Closeable {
 
   static INode resolveLastINode(String src, INodesInPath iip)
       throws FileNotFoundException {
-    INode[] inodes = iip.getINodes();
-    INode inode = inodes[inodes.length - 1];
-    if (inode == null)
+    INode inode = iip.getLastINode();
+    if (inode == null) {
       throw new FileNotFoundException("cannot find " + src);
+    }
     return inode;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5776a41d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index d12ae15..2721f85 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -343,9 +343,7 @@ public class FSEditLogLoader {
 
       // See if the file already exists (persistBlocks call)
       final INodesInPath iip = fsDir.getINodesInPath(path, true);
-      final INode[] inodes = iip.getINodes();
-      INodeFile oldFile = INodeFile.valueOf(
-          inodes[inodes.length - 1], path, true);
+      INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path, true);
       if (oldFile != null && addCloseOp.overwrite) {
         // This is OP_ADD with overwrite
         fsDir.unprotectedDelete(path, addCloseOp.mtime);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5776a41d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 2b530fa..30ac941 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1861,9 +1861,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
           doAccessTime = false;
         }
 
-        final INode[] inodes = iip.getINodes();
-        final INodeFile inode = INodeFile.valueOf(
-            inodes[inodes.length - 1], src);
+        final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
         if (isPermissionEnabled) {
           checkUnreadableBySuperuser(pc, inode, iip.getPathSnapshotId());
         }
@@ -8027,8 +8025,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       checkOperation(OperationCategory.READ);
       src = FSDirectory.resolvePath(src, pathComponents, dir);
       final INodesInPath iip = dir.getINodesInPath(src, true);
-      INode[] inodes = iip.getINodes();
-      if (inodes[inodes.length - 1] == null) {
+      INode inode = iip.getLastINode();
+      if (inode == null) {
         throw new FileNotFoundException("Path not found");
       }
       if (isPermissionEnabled) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5776a41d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index a0455dc..8de8c54 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Set;
 import java.util.Stack;
 
@@ -144,22 +145,25 @@ class FSPermissionChecker {
     // check if (parentAccess != null) && file exists, then check sb
     // If resolveLink, the check is performed on the link target.
     final int snapshotId = inodesInPath.getPathSnapshotId();
-    final INode[] inodes = inodesInPath.getINodes();
-    int ancestorIndex = inodes.length - 2;
-    for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null;
-        ancestorIndex--);
-    checkTraverse(inodes, ancestorIndex, snapshotId);
+    final int length = inodesInPath.length();
+    final INode last = length > 0 ? inodesInPath.getLastINode() : null;
+    final INode parent = length > 1 ? inodesInPath.getINode(-2) : null;
+
+    checkTraverse(inodesInPath, snapshotId);
 
-    final INode last = inodes[inodes.length - 1];
     if (parentAccess != null && parentAccess.implies(FsAction.WRITE)
-        && inodes.length > 1 && last != null) {
-      checkStickyBit(inodes[inodes.length - 2], last, snapshotId);
+        && length > 1 && last != null) {
+      checkStickyBit(parent, last, snapshotId);
     }
-    if (ancestorAccess != null && inodes.length > 1) {
-      check(inodes, ancestorIndex, snapshotId, ancestorAccess);
+    if (ancestorAccess != null && length > 1) {
+      List<INode> inodes = inodesInPath.getReadOnlyINodes();
+      INode ancestor = null;
+      for (int i = inodes.size() - 2; i >= 0 && (ancestor = inodes.get(i)) ==
+          null; i--);
+      check(ancestor, snapshotId, ancestorAccess);
     }
-    if (parentAccess != null && inodes.length > 1) {
-      check(inodes, inodes.length - 2, snapshotId, parentAccess);
+    if (parentAccess != null && length > 1 && parent != null) {
+      check(parent, snapshotId, parentAccess);
     }
     if (access != null) {
       check(last, snapshotId, access);
@@ -184,10 +188,15 @@ class FSPermissionChecker {
   }
 
   /** Guarded by {@link FSNamesystem#readLock()} */
-  private void checkTraverse(INode[] inodes, int last, int snapshotId
-      ) throws AccessControlException {
-    for(int j = 0; j <= last; j++) {
-      check(inodes[j], snapshotId, FsAction.EXECUTE);
+  private void checkTraverse(INodesInPath iip, int snapshotId)
+      throws AccessControlException {
+    List<INode> inodes = iip.getReadOnlyINodes();
+    for (int i = 0; i < inodes.size() - 1; i++) {
+      INode inode = inodes.get(i);
+      if (inode == null) {
+        break;
+      }
+      check(inode, snapshotId, FsAction.EXECUTE);
     }
   }
 
@@ -215,14 +224,8 @@ class FSPermissionChecker {
   }
 
   /** Guarded by {@link FSNamesystem#readLock()} */
-  private void check(INode[] inodes, int i, int snapshotId, FsAction access
-      ) throws AccessControlException {
-    check(i >= 0? inodes[i]: null, snapshotId, access);
-  }
-
-  /** Guarded by {@link FSNamesystem#readLock()} */
-  private void check(INode inode, int snapshotId, FsAction access
-      ) throws AccessControlException {
+  private void check(INode inode, int snapshotId, FsAction access)
+      throws AccessControlException {
     if (inode == null) {
       return;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5776a41d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index 58f5f3d..1501fce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -18,6 +18,9 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.NoSuchElementException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -31,6 +34,9 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 
 import com.google.common.base.Preconditions;
 
+import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
+import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.ID_INTEGER_COMPARATOR;
+
 /**
  * Contains INodes information resolved from a given path.
  */
@@ -54,7 +60,6 @@ public class INodesInPath {
     }
     final byte[][] path = new byte[depth][];
     final INode[] inodes = new INode[depth];
-    final INodesInPath iip = new INodesInPath(path, depth);
     tmp = inode;
     index = depth;
     while (tmp != null) {
@@ -63,8 +68,7 @@ public class INodesInPath {
       inodes[index] = tmp;
       tmp = tmp.getParent();
     }
-    iip.setINodes(inodes);
-    return iip;
+    return new INodesInPath(inodes, path);
   }
 
   /**
@@ -134,30 +138,34 @@ public class INodesInPath {
    * @return the specified number of existing INodes in the path
    */
   static INodesInPath resolve(final INodeDirectory startingDir,
-      final byte[][] components, final int numOfINodes, 
+      final byte[][] components, final int numOfINodes,
       final boolean resolveLink) throws UnresolvedLinkException {
     Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0);
 
     INode curNode = startingDir;
-    final INodesInPath existing = new INodesInPath(components, numOfINodes);
     int count = 0;
-    int index = numOfINodes - components.length;
-    if (index > 0) {
-      index = 0;
-    }
+    int index = numOfINodes <= components.length ?
+        numOfINodes - components.length : 0;
+    int inodeNum = 0;
+    int capacity = numOfINodes;
+    INode[] inodes = new INode[numOfINodes];
+    boolean isSnapshot = false;
+    int snapshotId = CURRENT_STATE_ID;
+
     while (count < components.length && curNode != null) {
       final boolean lastComp = (count == components.length - 1);      
       if (index >= 0) {
-        existing.addNode(curNode);
+        inodes[inodeNum++] = curNode;
       }
       final boolean isRef = curNode.isReference();
       final boolean isDir = curNode.isDirectory();
       final INodeDirectory dir = isDir? curNode.asDirectory(): null;  
       if (!isRef && isDir && dir.isWithSnapshot()) {
         //if the path is a non-snapshot path, update the latest snapshot.
-        if (!existing.isSnapshot()) {
-          existing.updateLatestSnapshotId(dir.getDirectoryWithSnapshotFeature()
-              .getLastSnapshotId());
+        if (!isSnapshot && shouldUpdateLatestId(
+            dir.getDirectoryWithSnapshotFeature().getLastSnapshotId(),
+            snapshotId)) {
+          snapshotId = dir.getDirectoryWithSnapshotFeature().getLastSnapshotId();
         }
       } else if (isRef && isDir && !lastComp) {
         // If the curNode is a reference node, need to check its dstSnapshot:
@@ -170,19 +178,18 @@ public class INodesInPath {
         // the latest snapshot if lastComp is true. In case of the operation is
         // a modification operation, we do a similar check in corresponding 
         // recordModification method.
-        if (!existing.isSnapshot()) {
+        if (!isSnapshot) {
           int dstSnapshotId = curNode.asReference().getDstSnapshotId();
-          int latest = existing.getLatestSnapshotId();
-          if (latest == Snapshot.CURRENT_STATE_ID || // no snapshot in dst tree of rename
-              (dstSnapshotId != Snapshot.CURRENT_STATE_ID && 
-                dstSnapshotId >= latest)) { // the above scenario 
-            int lastSnapshot = Snapshot.CURRENT_STATE_ID;
+          if (snapshotId == CURRENT_STATE_ID || // no snapshot in dst tree of rename
+              (dstSnapshotId != CURRENT_STATE_ID &&
+               dstSnapshotId >= snapshotId)) { // the above scenario
+            int lastSnapshot = CURRENT_STATE_ID;
             DirectoryWithSnapshotFeature sf;
             if (curNode.isDirectory() && 
                 (sf = curNode.asDirectory().getDirectoryWithSnapshotFeature()) != null) {
               lastSnapshot = sf.getLastSnapshotId();
             }
-            existing.setSnapshotId(lastSnapshot);
+            snapshotId = lastSnapshot;
           }
         }
       }
@@ -211,9 +218,9 @@ public class INodesInPath {
         // skip the ".snapshot" in components
         count++;
         index++;
-        existing.isSnapshot = true;
+        isSnapshot = true;
         if (index >= 0) { // decrease the capacity by 1 to account for .snapshot
-          existing.capacity--;
+          capacity--;
         }
         // check if ".snapshot" is the last element of components
         if (count == components.length - 1) {
@@ -222,65 +229,82 @@ public class INodesInPath {
         // Resolve snapshot root
         final Snapshot s = dir.getSnapshot(components[count + 1]);
         if (s == null) {
-          //snapshot not found
-          curNode = null;
+          curNode = null; // snapshot not found
         } else {
           curNode = s.getRoot();
-          existing.setSnapshotId(s.getId());
-        }
-        if (index >= -1) {
-          existing.snapshotRootIndex = existing.numNonNull;
+          snapshotId = s.getId();
         }
       } else {
         // normal case, and also for resolving file/dir under snapshot root
-        curNode = dir.getChild(childName, existing.getPathSnapshotId());
+        curNode = dir.getChild(childName,
+            isSnapshot ? snapshotId : CURRENT_STATE_ID);
       }
       count++;
       index++;
     }
-    return existing;
+    if (isSnapshot && capacity < numOfINodes &&
+        !isDotSnapshotDir(components[components.length - 1])) {
+      // for snapshot path shrink the inode array. however, for path ending with
+      // .snapshot, still keep last the null inode in the array
+      INode[] newNodes = new INode[capacity];
+      System.arraycopy(inodes, 0, newNodes, 0, capacity);
+      inodes = newNodes;
+    }
+    return new INodesInPath(inodes, components, isSnapshot, snapshotId);
+  }
+
+  private static boolean shouldUpdateLatestId(int sid, int snapshotId) {
+    return snapshotId == CURRENT_STATE_ID || (sid != CURRENT_STATE_ID &&
+        ID_INTEGER_COMPARATOR.compare(snapshotId, sid) < 0);
   }
 
-  private final byte[][] path;
-  /**
-   * Array with the specified number of INodes resolved for a given path.
-   */
-  private INode[] inodes;
   /**
-   * Indicate the number of non-null elements in {@link #inodes}
+   * Replace an inode of the given INodesInPath in the given position. We do a
+   * deep copy of the INode array.
+   * @param pos the position of the replacement
+   * @param inode the new inode
+   * @return a new INodesInPath instance
    */
-  private int numNonNull;
+  public static INodesInPath replace(INodesInPath iip, int pos, INode inode) {
+    Preconditions.checkArgument(iip.length() > 0 && pos > 0 // no for root
+        && pos < iip.length());
+    if (iip.getINode(pos) == null) {
+      Preconditions.checkState(iip.getINode(pos - 1) != null);
+    }
+    INode[] inodes = new INode[iip.inodes.length];
+    System.arraycopy(iip.inodes, 0, inodes, 0, inodes.length);
+    inodes[pos] = inode;
+    return new INodesInPath(inodes, iip.path, iip.isSnapshot, iip.snapshotId);
+  }
+
+  private final byte[][] path;
   /**
-   * The path for a snapshot file/dir contains the .snapshot thus makes the
-   * length of the path components larger the number of inodes. We use
-   * the capacity to control this special case.
+   * Array with the specified number of INodes resolved for a given path.
    */
-  private int capacity;
+  private final INode[] inodes;
   /**
    * true if this path corresponds to a snapshot
    */
-  private boolean isSnapshot;
-  /**
-   * index of the {@link Snapshot.Root} node in the inodes array,
-   * -1 for non-snapshot paths.
-   */
-  private int snapshotRootIndex;
+  private final boolean isSnapshot;
   /**
    * For snapshot paths, it is the id of the snapshot; or 
    * {@link Snapshot#CURRENT_STATE_ID} if the snapshot does not exist. For 
    * non-snapshot paths, it is the id of the latest snapshot found in the path;
    * or {@link Snapshot#CURRENT_STATE_ID} if no snapshot is found.
    */
-  private int snapshotId = Snapshot.CURRENT_STATE_ID; 
+  private final int snapshotId;
 
-  private INodesInPath(byte[][] path, int number) {
+  private INodesInPath(INode[] inodes, byte[][] path, boolean isSnapshot,
+      int snapshotId) {
+    Preconditions.checkArgument(inodes != null && path != null);
+    this.inodes = inodes;
     this.path = path;
-    assert (number >= 0);
-    inodes = new INode[number];
-    capacity = number;
-    numNonNull = 0;
-    isSnapshot = false;
-    snapshotRootIndex = -1;
+    this.isSnapshot = isSnapshot;
+    this.snapshotId = snapshotId;
+  }
+
+  private INodesInPath(INode[] inodes, byte[][] path) {
+    this(inodes, path, false, CURRENT_STATE_ID);
   }
 
   /**
@@ -296,49 +320,28 @@ public class INodesInPath {
    * For non-snapshot paths, return {@link Snapshot#CURRENT_STATE_ID}.
    */
   public int getPathSnapshotId() {
-    return isSnapshot ? snapshotId : Snapshot.CURRENT_STATE_ID;
-  }
-
-  private void setSnapshotId(int sid) {
-    snapshotId = sid;
+    return isSnapshot ? snapshotId : CURRENT_STATE_ID;
   }
-  
-  private void updateLatestSnapshotId(int sid) {
-    if (snapshotId == Snapshot.CURRENT_STATE_ID
-        || (sid != Snapshot.CURRENT_STATE_ID && Snapshot.ID_INTEGER_COMPARATOR
-            .compare(snapshotId, sid) < 0)) {
-      snapshotId = sid;
-    }
-  }
-
-  /**
-   * @return a new array of inodes excluding the null elements introduced by
-   * snapshot path elements. E.g., after resolving path "/dir/.snapshot",
-   * {@link #inodes} is {/, dir, null}, while the returned array only contains
-   * inodes of "/" and "dir". Note the length of the returned array is always
-   * equal to {@link #capacity}.
-   */
-  INode[] getINodes() {
-    if (capacity == inodes.length) {
-      return inodes;
-    }
 
-    INode[] newNodes = new INode[capacity];
-    System.arraycopy(inodes, 0, newNodes, 0, capacity);
-    return newNodes;
-  }
-  
   /**
    * @return the i-th inode if i >= 0;
    *         otherwise, i < 0, return the (length + i)-th inode.
    */
   public INode getINode(int i) {
-    return inodes[i >= 0? i: inodes.length + i];
+    if (inodes == null || inodes.length == 0) {
+      throw new NoSuchElementException("inodes is null or empty");
+    }
+    int index = i >= 0 ? i : inodes.length + i;
+    if (index < inodes.length && index >= 0) {
+      return inodes[index];
+    } else {
+      throw new NoSuchElementException("inodes.length == " + inodes.length);
+    }
   }
   
   /** @return the last inode. */
   public INode getLastINode() {
-    return inodes[inodes.length - 1];
+    return getINode(-1);
   }
 
   byte[] getLastLocalName() {
@@ -350,48 +353,29 @@ public class INodesInPath {
     return DFSUtil.byteArray2PathString(path);
   }
 
-  /**
-   * @return index of the {@link Snapshot.Root} node in the inodes array,
-   * -1 for non-snapshot paths.
-   */
-  int getSnapshotRootIndex() {
-    return this.snapshotRootIndex;
-  }
-  
-  /**
-   * @return isSnapshot true for a snapshot path
-   */
-  boolean isSnapshot() {
-    return this.isSnapshot;
-  }
-  
-  /**
-   * Add an INode at the end of the array
-   */
-  private void addNode(INode node) {
-    inodes[numNonNull++] = node;
+  public String getParentPath() {
+    return getPath(path.length - 1);
   }
 
-  private void setINodes(INode inodes[]) {
-    this.inodes = inodes;
-    this.numNonNull = this.inodes.length;
+  public String getPath(int pos) {
+    return DFSUtil.byteArray2PathString(path, 0, pos);
   }
-  
-  void setINode(int i, INode inode) {
-    inodes[i >= 0? i: inodes.length + i] = inode;
+
+  public int length() {
+    return inodes.length;
   }
-  
-  void setLastINode(INode last) {
-    inodes[inodes.length - 1] = last;
+
+  public List<INode> getReadOnlyINodes() {
+    return Collections.unmodifiableList(Arrays.asList(inodes));
   }
-  
+
   /**
-   * @return The number of non-null elements
+   * @return isSnapshot true for a snapshot path
    */
-  int getNumNonNull() {
-    return numNonNull;
+  boolean isSnapshot() {
+    return this.isSnapshot;
   }
-  
+
   private static String toString(INode inode) {
     return inode == null? null: inode.getLocalName();
   }
@@ -420,20 +404,16 @@ public class INodesInPath {
       }
       b.append("], length=").append(inodes.length);
     }
-    b.append("\n  numNonNull = ").append(numNonNull)
-     .append("\n  capacity   = ").append(capacity)
-     .append("\n  isSnapshot        = ").append(isSnapshot)
-     .append("\n  snapshotRootIndex = ").append(snapshotRootIndex)
+    b.append("\n  isSnapshot        = ").append(isSnapshot)
      .append("\n  snapshotId        = ").append(snapshotId);
     return b.toString();
   }
 
   void validate() {
-    // check parent up to snapshotRootIndex or numNonNull
-    final int n = snapshotRootIndex >= 0? snapshotRootIndex + 1: numNonNull;  
+    // check parent up to snapshotRootIndex if this is a snapshot path
     int i = 0;
     if (inodes[i] != null) {
-      for(i++; i < n && inodes[i] != null; i++) {
+      for(i++; i < inodes.length && inodes[i] != null; i++) {
         final INodeDirectory parent_i = inodes[i].getParent();
         final INodeDirectory parent_i_1 = inodes[i-1].getParent();
         if (parent_i != inodes[i-1] &&
@@ -447,8 +427,8 @@ public class INodesInPath {
         }
       }
     }
-    if (i != n) {
-      throw new AssertionError("i = " + i + " != " + n
+    if (i != inodes.length) {
+      throw new AssertionError("i = " + i + " != " + inodes.length
           + ", this=" + toString(false));
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5776a41d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
index d1a2377..354bff1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.FileNotFoundException;
+import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -104,19 +105,18 @@ public class TestSnapshotPathINodes {
     }
   }
   
-  static Snapshot getSnapshot(INodesInPath inodesInPath, String name) {
+  static Snapshot getSnapshot(INodesInPath inodesInPath, String name,
+      int index) {
     if (name == null) {
       return null;
     }
-    final int i = inodesInPath.getSnapshotRootIndex() - 1;
-    final INode inode = inodesInPath.getINodes()[i];
+    final INode inode = inodesInPath.getINode(index - 1);
     return inode.asDirectory().getSnapshot(DFSUtil.string2Bytes(name));
   }
 
   static void assertSnapshot(INodesInPath inodesInPath, boolean isSnapshot,
       final Snapshot snapshot, int index) {
     assertEquals(isSnapshot, inodesInPath.isSnapshot());
-    assertEquals(index, inodesInPath.getSnapshotRootIndex());
     assertEquals(Snapshot.getSnapshotId(isSnapshot ? snapshot : null),
         inodesInPath.getPathSnapshotId());
     if (!isSnapshot) {
@@ -124,7 +124,7 @@ public class TestSnapshotPathINodes {
           inodesInPath.getLatestSnapshotId());
     }
     if (isSnapshot && index >= 0) {
-      assertEquals(Snapshot.Root.class, inodesInPath.getINodes()[index].getClass());
+      assertEquals(Snapshot.Root.class, inodesInPath.getINode(index).getClass());
     }
   }
 
@@ -142,38 +142,35 @@ public class TestSnapshotPathINodes {
     String[] names = INode.getPathNames(file1.toString());
     byte[][] components = INode.getPathComponents(names);
     INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
-    INode[] inodes = nodesInPath.getINodes();
     // The number of inodes should be equal to components.length
-    assertEquals(inodes.length, components.length);
+    assertEquals(nodesInPath.length(), components.length);
     // The returned nodesInPath should be non-snapshot
     assertSnapshot(nodesInPath, false, null, -1);
 
     // The last INode should be associated with file1
     assertTrue("file1=" + file1 + ", nodesInPath=" + nodesInPath,
-        inodes[components.length - 1] != null);
-    assertEquals(inodes[components.length - 1].getFullPathName(),
+        nodesInPath.getINode(components.length - 1) != null);
+    assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(),
         file1.toString());
-    assertEquals(inodes[components.length - 2].getFullPathName(),
+    assertEquals(nodesInPath.getINode(components.length - 2).getFullPathName(),
         sub1.toString());
-    assertEquals(inodes[components.length - 3].getFullPathName(),
+    assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(),
         dir.toString());
     
     // Call getExistingPathINodes and request only one INode. This is used
     // when identifying the INode for a given path.
     nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 1, false);
-    inodes = nodesInPath.getINodes();
-    assertEquals(inodes.length, 1);
+    assertEquals(nodesInPath.length(), 1);
     assertSnapshot(nodesInPath, false, null, -1);
-    assertEquals(inodes[0].getFullPathName(), file1.toString());
+    assertEquals(nodesInPath.getINode(0).getFullPathName(), file1.toString());
     
     // Call getExistingPathINodes and request 2 INodes. This is usually used
     // when identifying the parent INode of a given path.
     nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 2, false);
-    inodes = nodesInPath.getINodes();
-    assertEquals(inodes.length, 2);
+    assertEquals(nodesInPath.length(), 2);
     assertSnapshot(nodesInPath, false, null, -1);
-    assertEquals(inodes[1].getFullPathName(), file1.toString());
-    assertEquals(inodes[0].getFullPathName(), sub1.toString());
+    assertEquals(nodesInPath.getINode(1).getFullPathName(), file1.toString());
+    assertEquals(nodesInPath.getINode(0).getFullPathName(), sub1.toString());
   }
   
   /** 
@@ -191,53 +188,49 @@ public class TestSnapshotPathINodes {
     String[] names = INode.getPathNames(snapshotPath);
     byte[][] components = INode.getPathComponents(names);
     INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
-    INode[] inodes = nodesInPath.getINodes();
     // Length of inodes should be (components.length - 1), since we will ignore
     // ".snapshot" 
-    assertEquals(inodes.length, components.length - 1);
+    assertEquals(nodesInPath.length(), components.length - 1);
     // SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s1, file1}
-    final Snapshot snapshot = getSnapshot(nodesInPath, "s1");
+    final Snapshot snapshot = getSnapshot(nodesInPath, "s1", 3);
     assertSnapshot(nodesInPath, true, snapshot, 3);
     // Check the INode for file1 (snapshot file)
-    INode snapshotFileNode = inodes[inodes.length - 1]; 
+    INode snapshotFileNode = nodesInPath.getLastINode();
     assertINodeFile(snapshotFileNode, file1);
     assertTrue(snapshotFileNode.getParent().isWithSnapshot());
     
     // Call getExistingPathINodes and request only one INode.
     nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 1, false);
-    inodes = nodesInPath.getINodes();
-    assertEquals(inodes.length, 1);
+    assertEquals(nodesInPath.length(), 1);
     // The snapshotroot (s1) is not included in inodes. Thus the
     // snapshotRootIndex should be -1.
     assertSnapshot(nodesInPath, true, snapshot, -1);
     // Check the INode for file1 (snapshot file)
-    assertINodeFile(inodes[inodes.length - 1], file1);
+    assertINodeFile(nodesInPath.getLastINode(), file1);
     
     // Call getExistingPathINodes and request 2 INodes.
     nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, 2, false);
-    inodes = nodesInPath.getINodes();
-    assertEquals(inodes.length, 2);
+    assertEquals(nodesInPath.length(), 2);
     // There should be two INodes in inodes: s1 and snapshot of file1. Thus the
     // SnapshotRootIndex should be 0.
     assertSnapshot(nodesInPath, true, snapshot, 0);
-    assertINodeFile(inodes[inodes.length - 1], file1);
+    assertINodeFile(nodesInPath.getLastINode(), file1);
     
     // Resolve the path "/TestSnapshot/sub1/.snapshot"  
     String dotSnapshotPath = sub1.toString() + "/.snapshot";
     names = INode.getPathNames(dotSnapshotPath);
     components = INode.getPathComponents(names);
     nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
-    inodes = nodesInPath.getINodes();
-    // The number of INodes returned should be components.length - 1 since we
-    // will ignore ".snapshot"
-    assertEquals(inodes.length, components.length - 1);
+    // The number of INodes returned should still be components.length
+    // since we put a null in the inode array for ".snapshot"
+    assertEquals(nodesInPath.length(), components.length);
 
     // No SnapshotRoot dir is included in the resolved inodes  
     assertSnapshot(nodesInPath, true, snapshot, -1);
-    // The last INode should be the INode for sub1
-    final INode last = inodes[inodes.length - 1];
-    assertEquals(last.getFullPathName(), sub1.toString());
-    assertFalse(last instanceof INodeFile);
+    // The last INode should be null, the last but 1 should be sub1
+    assertNull(nodesInPath.getLastINode());
+    assertEquals(nodesInPath.getINode(-2).getFullPathName(), sub1.toString());
+    assertTrue(nodesInPath.getINode(-2).isDirectory());
     
     String[] invalidPathComponent = {"invalidDir", "foo", ".snapshot", "bar"};
     Path invalidPath = new Path(invalidPathComponent[0]);
@@ -275,16 +268,15 @@ public class TestSnapshotPathINodes {
       String[] names = INode.getPathNames(snapshotPath);
       byte[][] components = INode.getPathComponents(names);
       INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
-      INode[] inodes = nodesInPath.getINodes();
       // Length of inodes should be (components.length - 1), since we will ignore
       // ".snapshot" 
-      assertEquals(inodes.length, components.length - 1);
+      assertEquals(nodesInPath.length(), components.length - 1);
       // SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s2, file1}
-      snapshot = getSnapshot(nodesInPath, "s2");
+      snapshot = getSnapshot(nodesInPath, "s2", 3);
       assertSnapshot(nodesInPath, true, snapshot, 3);
   
       // Check the INode for file1 (snapshot file)
-      final INode inode = inodes[inodes.length - 1];
+      final INode inode = nodesInPath.getLastINode();
       assertEquals(file1.getName(), inode.getLocalName());
       assertTrue(inode.asFile().isWithSnapshot());
     }
@@ -293,25 +285,34 @@ public class TestSnapshotPathINodes {
     String[] names = INode.getPathNames(file1.toString());
     byte[][] components = INode.getPathComponents(names);
     INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
-    INode[] inodes = nodesInPath.getINodes();
     // The length of inodes should be equal to components.length
-    assertEquals(inodes.length, components.length);
+    assertEquals(nodesInPath.length(), components.length);
     // The number of non-null elements should be components.length - 1 since
     // file1 has been deleted
-    assertEquals(nodesInPath.getNumNonNull(), components.length - 1);
+    assertEquals(getNumNonNull(nodesInPath), components.length - 1);
     // The returned nodesInPath should be non-snapshot
     assertSnapshot(nodesInPath, false, snapshot, -1);
     // The last INode should be null, and the one before should be associated
     // with sub1
-    assertNull(inodes[components.length - 1]);
-    assertEquals(inodes[components.length - 2].getFullPathName(),
+    assertNull(nodesInPath.getINode(components.length - 1));
+    assertEquals(nodesInPath.getINode(components.length - 2).getFullPathName(),
         sub1.toString());
-    assertEquals(inodes[components.length - 3].getFullPathName(),
+    assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(),
         dir.toString());
     hdfs.deleteSnapshot(sub1, "s2");
     hdfs.disallowSnapshot(sub1);
   }
 
+  private int getNumNonNull(INodesInPath iip) {
+    List<INode> inodes = iip.getReadOnlyINodes();
+    for (int i = inodes.size() - 1; i >= 0; i--) {
+      if (inodes.get(i) != null) {
+        return i+1;
+      }
+    }
+    return 0;
+  }
+
   /**
    * for snapshot file while adding a new file after snapshot.
    */
@@ -333,39 +334,37 @@ public class TestSnapshotPathINodes {
       String[] names = INode.getPathNames(snapshotPath);
       byte[][] components = INode.getPathComponents(names);
       INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
-      INode[] inodes = nodesInPath.getINodes();
       // Length of inodes should be (components.length - 1), since we will ignore
       // ".snapshot" 
-      assertEquals(inodes.length, components.length - 1);
+      assertEquals(nodesInPath.length(), components.length - 1);
       // The number of non-null inodes should be components.length - 2, since
       // snapshot of file3 does not exist
-      assertEquals(nodesInPath.getNumNonNull(), components.length - 2);
-      s4 = getSnapshot(nodesInPath, "s4");
+      assertEquals(getNumNonNull(nodesInPath), components.length - 2);
+      s4 = getSnapshot(nodesInPath, "s4", 3);
 
       // SnapshotRootIndex should still be 3: {root, Testsnapshot, sub1, s4, null}
       assertSnapshot(nodesInPath, true, s4, 3);
   
       // Check the last INode in inodes, which should be null
-      assertNull(inodes[inodes.length - 1]);
+      assertNull(nodesInPath.getINode(nodesInPath.length() - 1));
     }
 
     // Check the inodes for /TestSnapshot/sub1/file3
     String[] names = INode.getPathNames(file3.toString());
     byte[][] components = INode.getPathComponents(names);
     INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
-    INode[] inodes = nodesInPath.getINodes();
     // The number of inodes should be equal to components.length
-    assertEquals(inodes.length, components.length);
+    assertEquals(nodesInPath.length(), components.length);
 
     // The returned nodesInPath should be non-snapshot
     assertSnapshot(nodesInPath, false, s4, -1);
 
     // The last INode should be associated with file3
-    assertEquals(inodes[components.length - 1].getFullPathName(),
+    assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(),
         file3.toString());
-    assertEquals(inodes[components.length - 2].getFullPathName(),
+    assertEquals(nodesInPath.getINode(components.length - 2).getFullPathName(),
         sub1.toString());
-    assertEquals(inodes[components.length - 3].getFullPathName(),
+    assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(),
         dir.toString());
     hdfs.deleteSnapshot(sub1, "s4");
     hdfs.disallowSnapshot(sub1);
@@ -380,15 +379,15 @@ public class TestSnapshotPathINodes {
     String[] names = INode.getPathNames(file1.toString());
     byte[][] components = INode.getPathComponents(names);
     INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
-    INode[] inodes = nodesInPath.getINodes();
     // The number of inodes should be equal to components.length
-    assertEquals(inodes.length, components.length);
+    assertEquals(nodesInPath.length(), components.length);
 
     // The last INode should be associated with file1
-    assertEquals(inodes[components.length - 1].getFullPathName(),
+    assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(),
         file1.toString());
     // record the modification time of the inode
-    final long modTime = inodes[inodes.length - 1].getModificationTime();
+    final long modTime = nodesInPath.getINode(nodesInPath.length() - 1)
+        .getModificationTime();
     
     // Create a snapshot for the dir, and check the inodes for the path
     // pointing to a snapshot file
@@ -403,14 +402,13 @@ public class TestSnapshotPathINodes {
     names = INode.getPathNames(snapshotPath);
     components = INode.getPathComponents(names);
     INodesInPath ssNodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
-    INode[] ssInodes = ssNodesInPath.getINodes();
     // Length of ssInodes should be (components.length - 1), since we will
     // ignore ".snapshot" 
-    assertEquals(ssInodes.length, components.length - 1);
-    final Snapshot s3 = getSnapshot(ssNodesInPath, "s3");
+    assertEquals(ssNodesInPath.length(), components.length - 1);
+    final Snapshot s3 = getSnapshot(ssNodesInPath, "s3", 3);
     assertSnapshot(ssNodesInPath, true, s3, 3);
     // Check the INode for snapshot of file1
-    INode snapshotFileNode = ssInodes[ssInodes.length - 1]; 
+    INode snapshotFileNode = ssNodesInPath.getLastINode();
     assertEquals(snapshotFileNode.getLocalName(), file1.getName());
     assertTrue(snapshotFileNode.asFile().isWithSnapshot());
     // The modification time of the snapshot INode should be the same with the
@@ -423,14 +421,14 @@ public class TestSnapshotPathINodes {
     components = INode.getPathComponents(names);
     INodesInPath newNodesInPath = INodesInPath.resolve(fsdir.rootDir, components);
     assertSnapshot(newNodesInPath, false, s3, -1);
-    INode[] newInodes = newNodesInPath.getINodes();
     // The number of inodes should be equal to components.length
-    assertEquals(newInodes.length, components.length);
+    assertEquals(newNodesInPath.length(), components.length);
     // The last INode should be associated with file1
     final int last = components.length - 1;
-    assertEquals(newInodes[last].getFullPathName(), file1.toString());
+    assertEquals(newNodesInPath.getINode(last).getFullPathName(),
+        file1.toString());
     // The modification time of the INode for file3 should have been changed
-    Assert.assertFalse(modTime == newInodes[last].getModificationTime());
+    Assert.assertFalse(modTime == newNodesInPath.getINode(last).getModificationTime());
     hdfs.deleteSnapshot(sub1, "s3");
     hdfs.disallowSnapshot(sub1);
   }


[31/50] [abbrv] hadoop git commit: HADOOP-11211. mapreduce.job.classloader.system.classes semantics should be order-independent. (Yitong Zhou via gera)

Posted by ka...@apache.org.
HADOOP-11211. mapreduce.job.classloader.system.classes semantics should be order-independent. (Yitong Zhou via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bcea111
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bcea111
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bcea111

Branch: refs/heads/YARN-2139
Commit: 0bcea111e5daa9a4315346cf6919a4cfc8d90e0d
Parents: b9f6d0c
Author: Gera Shegalov <ge...@apache.org>
Authored: Thu Dec 11 12:25:25 2014 -0800
Committer: Gera Shegalov <ge...@apache.org>
Committed: Thu Dec 11 13:12:13 2014 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../hadoop/util/ApplicationClassLoader.java     | 25 ++++++++++++++++----
 .../hadoop/util/TestApplicationClassLoader.java |  8 +++++--
 .../src/main/resources/mapred-default.xml       | 21 ++++++++++++----
 4 files changed, 46 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bcea111/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 47d36e4..d923b87 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -571,6 +571,9 @@ Release 2.7.0 - UNRELEASED
 
     HADOOP-11386. Replace \n by %n in format hadoop-common format strings.
     (Li Lu via wheat9)
+
+    HADOOP-11211. mapreduce.job.classloader.system.classes semantics should be
+    be order-independent. (Yitong Zhou via gera)
     
 Release 2.6.0 - 2014-11-18
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bcea111/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
index d2ab015..9f16b61 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java
@@ -216,28 +216,43 @@ public class ApplicationClassLoader extends URLClassLoader {
     return c;
   }
 
+  /**
+   * Checks if a class should be included as a system class.
+   *
+   * A class is a system class if and only if it matches one of the positive
+   * patterns and none of the negative ones.
+   *
+   * @param name the class name to check
+   * @param systemClasses a list of system class configurations.
+   * @return true if the class is a system class
+   */
   public static boolean isSystemClass(String name, List<String> systemClasses) {
+    boolean result = false;
     if (systemClasses != null) {
       String canonicalName = name.replace('/', '.');
       while (canonicalName.startsWith(".")) {
         canonicalName=canonicalName.substring(1);
       }
       for (String c : systemClasses) {
-        boolean result = true;
+        boolean shouldInclude = true;
         if (c.startsWith("-")) {
           c = c.substring(1);
-          result = false;
+          shouldInclude = false;
         }
         if (canonicalName.startsWith(c)) {
           if (   c.endsWith(".")                                   // package
               || canonicalName.length() == c.length()              // class
               ||    canonicalName.length() > c.length()            // nested
                  && canonicalName.charAt(c.length()) == '$' ) {
-            return result;
+            if (shouldInclude) {
+              result = true;
+            } else {
+              return false;
+            }
           }
         }
       }
     }
-    return false;
+    return result;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bcea111/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java
index cc16493..be8e61e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestApplicationClassLoader.java
@@ -87,7 +87,7 @@ public class TestApplicationClassLoader {
     assertEquals(jarFile.toURI().toURL(), urls[2]);
     // nofile should be ignored
   }
-  
+
   @Test
   public void testIsSystemClass() {
     testIsSystemClassInternal("");
@@ -112,8 +112,12 @@ public class TestApplicationClassLoader {
         classes("-org.example.Foo,org.example.")));
     assertTrue(isSystemClass("org.example.Bar" + nestedClass,
         classes("-org.example.Foo.,org.example.")));
+    assertFalse(isSystemClass("org.example.Foo" + nestedClass,
+        classes("org.example.,-org.example.Foo")));
+    assertFalse(isSystemClass("org.example.Foo" + nestedClass,
+        classes("org.example.Foo,-org.example.Foo")));
   }
-  
+
   private List<String> classes(String classes) {
     return Lists.newArrayList(Splitter.on(',').split(classes));
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bcea111/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 00a89c9..6e0deaa 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1363,10 +1363,23 @@
    <value></value>
   <description>Used to override the default definition of the system classes for
     the job classloader. The system classes are a comma-separated list of
-    classes that should be loaded from the system classpath, not the
-    user-supplied JARs, when mapreduce.job.classloader is enabled. Names ending
-    in '.' (period) are treated as package names, and names starting with a '-'
-    are treated as negative matches.
+    patterns that indicate whether to load a class from the system classpath,
+    instead from the user-supplied JARs, when mapreduce.job.classloader is
+    enabled.
+
+    A positive pattern is defined as:
+        1. A single class name 'C' that matches 'C' and transitively all nested
+            classes 'C$*' defined in C;
+        2. A package name ending with a '.' (e.g., "com.example.") that matches
+            all classes from that package.
+    A negative pattern is defined by a '-' in front of a positive pattern
+    (e.g., "-com.example.").
+
+    A class is considered a system class if and only if it matches one of the
+    positive patterns and none of the negative ones. More formally:
+    A class is a member of the inclusion set I if it matches one of the positive
+    patterns. A class is a member of the exclusion set E if it matches one of
+    the negative patterns. The set of system classes S = I \ E.
   </description>
 </property>
 


[46/50] [abbrv] hadoop git commit: HADOOP-11396. Provide navigation in the site documentation linking to the Hadoop Compatible File Systems. Contributed by Chris Nauroth.

Posted by ka...@apache.org.
HADOOP-11396. Provide navigation in the site documentation linking to the Hadoop Compatible File Systems. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbfb996f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbfb996f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbfb996f

Branch: refs/heads/YARN-2139
Commit: cbfb996fb4787e9d4bbaee1d01e3eca2caf0ce55
Parents: 9458cd5
Author: cnauroth <cn...@apache.org>
Authored: Fri Dec 12 23:32:37 2014 -0800
Committer: cnauroth <cn...@apache.org>
Committed: Fri Dec 12 23:32:37 2014 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 hadoop-project/src/site/site.xml                | 5 +++++
 2 files changed, 8 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbfb996f/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 729a456..40a088b 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -582,6 +582,9 @@ Release 2.7.0 - UNRELEASED
     (wheat9)
 
     HADOOP-11394. hadoop-aws documentation missing. (cnauroth)
+
+    HADOOP-11396. Provide navigation in the site documentation linking to the
+    Hadoop Compatible File Systems. (cnauroth)
     
 Release 2.6.0 - 2014-11-18
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbfb996f/hadoop-project/src/site/site.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 4a2c2f8..0146ff2 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -133,6 +133,11 @@
       <item name="Node Manager" href="hadoop-yarn/hadoop-yarn-site/NodeManagerRest.html"/>
     </menu>
     
+    <menu name="Hadoop Compatible File Systems" inherit="top">
+      <item name="Amazon S3" href="hadoop-aws/tools/hadoop-aws/index.html"/>
+      <item name="OpenStack Swift" href="hadoop-openstack/index.html"/>
+    </menu>
+
     <menu name="Auth" inherit="top">
       <item name="Overview" href="hadoop-auth/index.html"/>
       <item name="Examples" href="hadoop-auth/Examples.html"/>


[29/50] [abbrv] hadoop git commit: YARN-2917. Fixed potential deadlock when system.exit is called in AsyncDispatcher. Contributed by Rohith Sharmaks

Posted by ka...@apache.org.
YARN-2917. Fixed potential deadlock when system.exit is called in AsyncDispatcher. Contributed by Rohith Sharmaks


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/614b6afe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/614b6afe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/614b6afe

Branch: refs/heads/YARN-2139
Commit: 614b6afea450ebb897fbb2519c6f02e13b9bd12d
Parents: 8e9a266
Author: Jian He <ji...@apache.org>
Authored: Thu Dec 11 11:16:45 2014 -0800
Committer: Jian He <ji...@apache.org>
Committed: Thu Dec 11 11:17:54 2014 -0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                      |  3 +++
 .../apache/hadoop/yarn/event/AsyncDispatcher.java    | 15 +++++++++++++--
 2 files changed, 16 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/614b6afe/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 832efee..3432f6f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -214,6 +214,9 @@ Release 2.7.0 - UNRELEASED
     YARN-2924. Fixed RMAdminCLI to not convert node labels to lower case.
     (Wangda Tan via jianhe)
 
+    YARN-2917. Fixed potential deadlock when system.exit is called in AsyncDispatcher
+    (Rohith Sharmaks via jianhe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/614b6afe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
index 370b0f7..28be6ac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -181,8 +181,9 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
       if (exitOnDispatchException
           && (ShutdownHookManager.get().isShutdownInProgress()) == false
           && stopped == false) {
-        LOG.info("Exiting, bbye..");
-        System.exit(-1);
+        Thread shutDownThread = new Thread(createShutDownThread());
+        shutDownThread.setName("AsyncDispatcher ShutDown handler");
+        shutDownThread.start();
       }
     }
   }
@@ -271,4 +272,14 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
     }
 
   }
+
+  Runnable createShutDownThread() {
+    return new Runnable() {
+      @Override
+      public void run() {
+        LOG.info("Exiting, bbye..");
+        System.exit(-1);
+      }
+    };
+  }
 }


[22/50] [abbrv] hadoop git commit: HADOOP-11386. Replace \n by %n in format hadoop-common format strings. Contributed by Li Lu.

Posted by ka...@apache.org.
HADOOP-11386. Replace \n by %n in format hadoop-common format strings. Contributed by Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84d50003
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84d50003
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84d50003

Branch: refs/heads/YARN-2139
Commit: 84d50003f6e46f9f9ac2b9d7bb937de757be161b
Parents: 59cb8b9
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Dec 10 14:37:31 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Wed Dec 10 14:37:52 2014 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt       |  3 +++
 .../apache/hadoop/conf/ReconfigurationServlet.java    |  4 ++--
 .../main/java/org/apache/hadoop/fs/shell/Display.java |  4 ++--
 .../java/org/apache/hadoop/io/file/tfile/TFile.java   |  2 +-
 .../org/apache/hadoop/io/file/tfile/TFileDumper.java  |  8 ++++----
 .../apache/hadoop/security/ShellBasedIdMapping.java   |  2 +-
 .../org/apache/hadoop/util/NativeLibraryChecker.java  | 14 +++++++-------
 7 files changed, 20 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84d50003/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6e1cc11..8a4f13c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -567,6 +567,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11388. Remove deprecated o.a.h.metrics.file.FileContext.
     (Li Lu via wheat9)
 
+    HADOOP-11386. Replace \n by %n in format hadoop-common format strings.
+    (Li Lu via wheat9)
+    
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84d50003/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
index eb1fb6b..bb221ee 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
@@ -71,10 +71,10 @@ public class ReconfigurationServlet extends HttpServlet {
 
   private void printHeader(PrintWriter out, String nodeName) {
     out.print("<html><head>");
-    out.printf("<title>%s Reconfiguration Utility</title>\n", 
+    out.printf("<title>%s Reconfiguration Utility</title>%n",
                StringEscapeUtils.escapeHtml(nodeName));
     out.print("</head><body>\n");
-    out.printf("<h1>%s Reconfiguration Utility</h1>\n",
+    out.printf("<h1>%s Reconfiguration Utility</h1>%n",
                StringEscapeUtils.escapeHtml(nodeName));
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84d50003/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
index d437a66..ba65cd2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
@@ -195,11 +195,11 @@ class Display extends FsCommand {
 
       FileChecksum checksum = item.fs.getFileChecksum(item.path);
       if (checksum == null) {
-        out.printf("%s\tNONE\t\n", item.toString());
+        out.printf("%s\tNONE\t%n", item.toString());
       } else {
         String checksumString = StringUtils.byteToHexString(
             checksum.getBytes(), 0, checksum.getLength());
-        out.printf("%s\t%s\t%s\n",
+        out.printf("%s\t%s\t%s%n",
             item.toString(), checksum.getAlgorithmName(),
             checksumString);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84d50003/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java
index c11678d..f17be1a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFile.java
@@ -2341,7 +2341,7 @@ public class TFile {
    *          A list of TFile paths.
    */
   public static void main(String[] args) {
-    System.out.printf("TFile Dumper (TFile %s, BCFile %s)\n", TFile.API_VERSION
+    System.out.printf("TFile Dumper (TFile %s, BCFile %s)%n", TFile.API_VERSION
         .toString(), BCFile.API_VERSION.toString());
     if (args.length == 0) {
       System.out

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84d50003/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
index 829a1c6..ad94c42 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
@@ -176,7 +176,7 @@ class TFileDumper {
       for (Iterator<Map.Entry<String, String>> it = entrySet.iterator(); it
           .hasNext();) {
         Map.Entry<String, String> e = it.next();
-        out.printf("%s : %s\n", Align.format(e.getKey(), maxKeyLength,
+        out.printf("%s : %s%n", Align.format(e.getKey(), maxKeyLength,
             Align.LEFT), e.getValue());
       }
       out.println();
@@ -200,7 +200,7 @@ class TFileDumper {
         String endKey = "End-Key";
         int endKeyWidth = Math.max(endKey.length(), maxKeySampleLen * 2 + 5);
 
-        out.printf("%s %s %s %s %s %s\n", Align.format(blkID, blkIDWidth,
+        out.printf("%s %s %s %s %s %s%n", Align.format(blkID, blkIDWidth,
             Align.CENTER), Align.format(offset, offsetWidth, Align.CENTER),
             Align.format(blkLen, blkLenWidth, Align.CENTER), Align.format(
                 rawSize, rawSizeWidth, Align.CENTER), Align.format(records,
@@ -267,7 +267,7 @@ class TFileDumper {
                 * 10);
         String compression = "Compression";
         int compressionWidth = compression.length();
-        out.printf("%s %s %s %s %s\n", Align.format(name, nameWidth,
+        out.printf("%s %s %s %s %s%n", Align.format(name, nameWidth,
             Align.CENTER), Align.format(offset, offsetWidth, Align.CENTER),
             Align.format(blkLen, blkLenWidth, Align.CENTER), Align.format(
                 rawSize, rawSizeWidth, Align.CENTER), Align.format(compression,
@@ -280,7 +280,7 @@ class TFileDumper {
           BlockRegion region = e.getValue().getRegion();
           String blkCompression =
               e.getValue().getCompressionAlgorithm().getName();
-          out.printf("%s %s %s %s %s\n", Align.format(blkName, nameWidth,
+          out.printf("%s %s %s %s %s%n", Align.format(blkName, nameWidth,
               Align.LEFT), Align.format(region.getOffset(), offsetWidth,
               Align.LEFT), Align.format(region.getCompressedSize(),
               blkLenWidth, Align.LEFT), Align.format(region.getRawSize(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84d50003/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java
index 768294d..e152d46 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java
@@ -181,7 +181,7 @@ public class ShellBasedIdMapping implements IdMappingServiceProvider {
       final Integer key, final String value,
       final Integer ekey, final String evalue) {    
       LOG.warn("\n" + header + String.format(
-          "new entry (%d, %s), existing entry: (%d, %s).\n%s\n%s",
+          "new entry (%d, %s), existing entry: (%d, %s).%n%s%n%s",
           key, value, ekey, evalue,
           "The new entry is to be ignored for the following reason.",
           DUPLICATE_NAME_ID_DEBUG_INFO));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84d50003/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
index 6416355..81448ab 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
@@ -108,14 +108,14 @@ public class NativeLibraryChecker {
     }
 
     System.out.println("Native library checking:");
-    System.out.printf("hadoop:  %b %s\n", nativeHadoopLoaded, hadoopLibraryName);
-    System.out.printf("zlib:    %b %s\n", zlibLoaded, zlibLibraryName);
-    System.out.printf("snappy:  %b %s\n", snappyLoaded, snappyLibraryName);
-    System.out.printf("lz4:     %b %s\n", lz4Loaded, lz4LibraryName);
-    System.out.printf("bzip2:   %b %s\n", bzip2Loaded, bzip2LibraryName);
-    System.out.printf("openssl: %b %s\n", openSslLoaded, openSslDetail);
+    System.out.printf("hadoop:  %b %s%n", nativeHadoopLoaded, hadoopLibraryName);
+    System.out.printf("zlib:    %b %s%n", zlibLoaded, zlibLibraryName);
+    System.out.printf("snappy:  %b %s%n", snappyLoaded, snappyLibraryName);
+    System.out.printf("lz4:     %b %s%n", lz4Loaded, lz4LibraryName);
+    System.out.printf("bzip2:   %b %s%n", bzip2Loaded, bzip2LibraryName);
+    System.out.printf("openssl: %b %s%n", openSslLoaded, openSslDetail);
     if (Shell.WINDOWS) {
-      System.out.printf("winutils: %b %s\n", winutilsExists, winutilsPath);
+      System.out.printf("winutils: %b %s%n", winutilsExists, winutilsPath);
     }
 
     if ((!nativeHadoopLoaded) || (Shell.WINDOWS && (!winutilsExists)) ||


[28/50] [abbrv] hadoop git commit: HADOOP-11353. Add support for .hadooprc (aw)

Posted by ka...@apache.org.
HADOOP-11353. Add support for .hadooprc (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e9a2668
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e9a2668
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e9a2668

Branch: refs/heads/YARN-2139
Commit: 8e9a2668195d69e30cfce51fda6ad2bd6c1466ed
Parents: 390642a
Author: Allen Wittenauer <aw...@apache.org>
Authored: Thu Dec 11 09:00:35 2014 -0800
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Thu Dec 11 09:00:35 2014 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt          |  2 ++
 .../hadoop-common/src/main/bin/hadoop-config.sh          |  2 ++
 .../hadoop-common/src/main/bin/hadoop-functions.sh       | 11 +++++++++++
 3 files changed, 15 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e9a2668/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8a4f13c..47d36e4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -26,6 +26,8 @@ Trunk (Unreleased)
     Alexander Stojanovich, Brian Swan, and Min Wei via cnauroth)
 
     HADOOP-6590. Add a username check for hadoop sub-commands (John Smith via aw)
+
+    HADOOP-11353. Add support for .hadooprc (aw)
     
   IMPROVEMENTS
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e9a2668/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
index 89b0c93..06fb0ef 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
@@ -168,6 +168,8 @@ hadoop_exec_userfuncs
 # IMPORTANT! User provided code is now available!
 #
 
+hadoop_exec_hadooprc
+
 # do all the OS-specific startup bits here
 # this allows us to get a decent JAVA_HOME,
 # call crle for LD_LIBRARY_PATH, etc.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e9a2668/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index dfd7315..af45cec 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -113,6 +113,17 @@ function hadoop_exec_userfuncs
   fi
 }
 
+function hadoop_exec_hadooprc
+{
+  # Read the user's settings.  This provides for users to override 
+  # and/or append hadoop-env.sh. It is not meant as a complete system override.
+
+  if [[ -f "${HOME}/.hadooprc" ]]; then
+    hadoop_debug "Applying the user's .hadooprc"
+    . "${HOME}/.hadooprc"
+  fi
+}
+
 function hadoop_basic_init
 {
   # Some of these are also set in hadoop-env.sh.


[37/50] [abbrv] hadoop git commit: YARN-2912 Jersey Tests failing with port in use. (varun saxena via stevel)

Posted by ka...@apache.org.
 YARN-2912 Jersey Tests failing with port in use. (varun saxena via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3681de20
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3681de20
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3681de20

Branch: refs/heads/YARN-2139
Commit: 3681de203949f84c9fa4a6df49948dbf6980c9ba
Parents: bda748a
Author: Steve Loughran <st...@apache.org>
Authored: Fri Dec 12 17:10:54 2014 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Fri Dec 12 17:11:07 2014 +0000

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  2 +
 .../hadoop-yarn/hadoop-yarn-common/pom.xml      |  5 +++
 .../hadoop/yarn/webapp/JerseyTestBase.java      | 42 ++++++++++++++++++++
 .../webapp/TestAHSWebServices.java              |  4 +-
 .../webapp/TestTimelineWebServices.java         |  4 +-
 .../nodemanager/webapp/TestNMWebServices.java   |  4 +-
 .../webapp/TestNMWebServicesApps.java           |  4 +-
 .../webapp/TestNMWebServicesContainers.java     |  4 +-
 .../webapp/TestRMWebServices.java               |  4 +-
 .../webapp/TestRMWebServicesApps.java           |  4 +-
 .../TestRMWebServicesAppsModification.java      |  4 +-
 .../webapp/TestRMWebServicesCapacitySched.java  |  4 +-
 .../TestRMWebServicesDelegationTokens.java      |  3 +-
 .../webapp/TestRMWebServicesFairScheduler.java  |  4 +-
 .../webapp/TestRMWebServicesNodeLabels.java     |  4 +-
 .../webapp/TestRMWebServicesNodes.java          |  4 +-
 16 files changed, 75 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3681de20/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 58d28b8..cd0bf7c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -220,6 +220,8 @@ Release 2.7.0 - UNRELEASED
     YARN-2243. Order of arguments for Preconditions.checkNotNull() is wrong in 
     SchedulerApplicationAttempt ctor. (devaraj)
 
+    YARN-2912 Jersey Tests failing with port in use. (varun saxena via stevel)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3681de20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index 3adfe8b..2301399 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -143,6 +143,11 @@
       <scope>test</scope>
     </dependency>
     <dependency>
+      <groupId>com.sun.jersey.jersey-test-framework</groupId>
+      <artifactId>jersey-test-framework-grizzly2</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
       <groupId>commons-io</groupId>
       <artifactId>commons-io</artifactId>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3681de20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/JerseyTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/JerseyTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/JerseyTestBase.java
new file mode 100644
index 0000000..0b177f9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/JerseyTestBase.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.webapp;
+
+import org.junit.Before;
+import com.sun.jersey.test.framework.JerseyTest;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+
+public abstract class JerseyTestBase extends JerseyTest {
+  public JerseyTestBase(WebAppDescriptor appDescriptor) {
+    super(appDescriptor);
+  }
+
+  @Before
+  public void initializeJerseyPort() {
+    int jerseyPort = 9998;
+    String port = System.getProperty("jersey.test.port");
+    if(null != port) {
+      jerseyPort = Integer.parseInt(port) + 10;
+      if(jerseyPort > 65535) {
+        jerseyPort = 9998;
+      }
+    }
+    System.setProperty("jersey.test.port", Integer.toString(jerseyPort));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3681de20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
index 76bf8c3..41dda91 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestAHSWebServices.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.yarn.server.timeline.TimelineDataManager;
 import org.apache.hadoop.yarn.server.timeline.TimelineStore;
 import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import org.codehaus.jettison.json.JSONArray;
@@ -73,11 +74,10 @@ import com.sun.jersey.api.client.ClientResponse.Status;
 import com.sun.jersey.api.client.UniformInterfaceException;
 import com.sun.jersey.api.client.WebResource;
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
 @RunWith(Parameterized.class)
-public class TestAHSWebServices extends JerseyTest {
+public class TestAHSWebServices extends JerseyTestBase {
 
   private static ApplicationHistoryManagerOnTimelineStore historyManager;
   private static final String[] USERS = new String[] { "foo" , "bar" };

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3681de20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java
index fe2ed5c..7e96d2a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.yarn.server.timeline.TimelineStore;
 import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
 import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilter;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
 import org.junit.Assert;
 import org.junit.Test;
@@ -72,10 +73,9 @@ import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.WebResource;
 import com.sun.jersey.api.client.config.DefaultClientConfig;
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
-public class TestTimelineWebServices extends JerseyTest {
+public class TestTimelineWebServices extends JerseyTestBase {
 
   private static TimelineStore store;
   private static TimelineACLsManager timelineACLsManager;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3681de20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
index 61bdf10..7caad4a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.YarnVersionInfo;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.WebApp;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
 import org.codehaus.jettison.json.JSONException;
@@ -78,13 +79,12 @@ import com.sun.jersey.api.client.ClientResponse.Status;
 import com.sun.jersey.api.client.UniformInterfaceException;
 import com.sun.jersey.api.client.WebResource;
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
 /**
  * Test the nodemanager node info web services api's
  */
-public class TestNMWebServices extends JerseyTest {
+public class TestNMWebServices extends JerseyTestBase {
 
   private static Context nmContext;
   private static ResourceView resourceView;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3681de20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java
index 87aa852..3e7aac8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.yarn.server.nodemanager.webapp.WebServer.NMWebApp;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.WebApp;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
 import org.codehaus.jettison.json.JSONArray;
@@ -73,10 +74,9 @@ import com.sun.jersey.api.client.ClientResponse.Status;
 import com.sun.jersey.api.client.UniformInterfaceException;
 import com.sun.jersey.api.client.WebResource;
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
-public class TestNMWebServicesApps extends JerseyTest {
+public class TestNMWebServicesApps extends JerseyTestBase {
 
   private static Context nmContext;
   private static ResourceView resourceView;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3681de20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java
index 62d9cb7..ceb1d57 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.WebApp;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
 import org.codehaus.jettison.json.JSONArray;
@@ -73,10 +74,9 @@ import com.sun.jersey.api.client.ClientResponse.Status;
 import com.sun.jersey.api.client.UniformInterfaceException;
 import com.sun.jersey.api.client.WebResource;
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
-public class TestNMWebServicesContainers extends JerseyTest {
+public class TestNMWebServicesContainers extends JerseyTestBase {
 
   private static Context nmContext;
   private static ResourceView resourceView;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3681de20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
index 9f091d2..5e1ab74 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.util.YarnVersionInfo;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
@@ -61,10 +62,9 @@ import com.sun.jersey.api.client.ClientResponse.Status;
 import com.sun.jersey.api.client.UniformInterfaceException;
 import com.sun.jersey.api.client.WebResource;
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
-public class TestRMWebServices extends JerseyTest {
+public class TestRMWebServices extends JerseyTestBase {
 
   private static MockRM rm;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3681de20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
index 23ea22e..705fd31 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptS
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONException;
@@ -67,10 +68,9 @@ import com.sun.jersey.api.client.UniformInterfaceException;
 import com.sun.jersey.api.client.WebResource;
 import com.sun.jersey.core.util.MultivaluedMapImpl;
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
-public class TestRMWebServicesApps extends JerseyTest {
+public class TestRMWebServicesApps extends JerseyTestBase {
 
   private static MockRM rm;
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3681de20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java
index df23e85..632eeb8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java
@@ -74,6 +74,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CredentialsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LocalResourceInfo;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
@@ -102,11 +103,10 @@ import com.sun.jersey.api.client.filter.LoggingFilter;
 import com.sun.jersey.api.json.JSONJAXBContext;
 import com.sun.jersey.api.json.JSONMarshaller;
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
 @RunWith(Parameterized.class)
-public class TestRMWebServicesAppsModification extends JerseyTest {
+public class TestRMWebServicesAppsModification extends JerseyTestBase {
   private static MockRM rm;
 
   private static final int CONTAINER_MB = 1024;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3681de20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
index 87bacc6..c7c403d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONException;
@@ -55,10 +56,9 @@ import com.google.inject.servlet.ServletModule;
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.WebResource;
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
-public class TestRMWebServicesCapacitySched extends JerseyTest {
+public class TestRMWebServicesCapacitySched extends JerseyTestBase {
 
   private static MockRM rm;
   private CapacitySchedulerConfiguration csConf;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3681de20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokens.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokens.java
index c5c048f..dab8343 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokens.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokens.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.DelegationToken;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
@@ -87,7 +88,7 @@ import com.sun.jersey.test.framework.JerseyTest;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
 @RunWith(Parameterized.class)
-public class TestRMWebServicesDelegationTokens extends JerseyTest {
+public class TestRMWebServicesDelegationTokens extends JerseyTestBase {
 
   private static File testRootDir;
   private static File httpSpnegoKeytabFile = new File(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3681de20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
index 9de3f76..21ca6a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 import org.junit.Test;
@@ -39,10 +40,9 @@ import com.google.inject.servlet.ServletModule;
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.WebResource;
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
-public class TestRMWebServicesFairScheduler extends JerseyTest {
+public class TestRMWebServicesFairScheduler extends JerseyTestBase {
   private static MockRM rm;
   private YarnConfiguration conf;
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3681de20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java
index df5aecb..ae27c02 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
@@ -51,10 +52,9 @@ import com.sun.jersey.api.json.JSONJAXBContext;
 import com.sun.jersey.api.json.JSONMarshaller;
 import com.sun.jersey.api.json.JSONUnmarshaller;
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
-public class TestRMWebServicesNodeLabels extends JerseyTest {
+public class TestRMWebServicesNodeLabels extends JerseyTestBase {
 
   private static final Log LOG = LogFactory
       .getLog(TestRMWebServicesNodeLabels.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3681de20/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
index e685f22..f507e17 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStatusEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONException;
@@ -64,10 +65,9 @@ import com.sun.jersey.api.client.ClientResponse.Status;
 import com.sun.jersey.api.client.UniformInterfaceException;
 import com.sun.jersey.api.client.WebResource;
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
-public class TestRMWebServicesNodes extends JerseyTest {
+public class TestRMWebServicesNodes extends JerseyTestBase {
 
   private static MockRM rm;
 


[36/50] [abbrv] hadoop git commit: YARN-2243. Order of arguments for Preconditions.checkNotNull() is wrong in SchedulerApplicationAttempt ctor. Contributed by Devaraj K.

Posted by ka...@apache.org.
YARN-2243. Order of arguments for Preconditions.checkNotNull() is wrong in
SchedulerApplicationAttempt ctor. Contributed by Devaraj K.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bda748ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bda748ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bda748ac

Branch: refs/heads/YARN-2139
Commit: bda748ac3abf30f6cd4c0e22c80c73396abc59fb
Parents: 0bd0229
Author: Devaraj K <de...@apache.org>
Authored: Fri Dec 12 12:34:43 2014 +0530
Committer: Devaraj K <de...@apache.org>
Committed: Fri Dec 12 12:34:43 2014 +0530

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                   | 3 +++
 .../resourcemanager/scheduler/SchedulerApplicationAttempt.java    | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bda748ac/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3432f6f..58d28b8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -217,6 +217,9 @@ Release 2.7.0 - UNRELEASED
     YARN-2917. Fixed potential deadlock when system.exit is called in AsyncDispatcher
     (Rohith Sharmaks via jianhe)
 
+    YARN-2243. Order of arguments for Preconditions.checkNotNull() is wrong in 
+    SchedulerApplicationAttempt ctor. (devaraj)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bda748ac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index 84975b6..d5b6ce6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -125,7 +125,7 @@ public class SchedulerApplicationAttempt {
   public SchedulerApplicationAttempt(ApplicationAttemptId applicationAttemptId, 
       String user, Queue queue, ActiveUsersManager activeUsersManager,
       RMContext rmContext) {
-    Preconditions.checkNotNull("RMContext should not be null", rmContext);
+    Preconditions.checkNotNull(rmContext, "RMContext should not be null");
     this.rmContext = rmContext;
     this.appSchedulingInfo = 
         new AppSchedulingInfo(applicationAttemptId, user, queue,  


[05/50] [abbrv] hadoop git commit: Incorrect locking in FsVolumeList#checkDirs can hang datanodes (Noah Lorang via Colin P. McCabe)

Posted by ka...@apache.org.
Incorrect locking in FsVolumeList#checkDirs can hang datanodes (Noah Lorang via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8352b9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8352b9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8352b9b

Branch: refs/heads/YARN-2139
Commit: d8352b9b2b99aa46679c5880a724ba3f0ceb41ff
Parents: be86237
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Tue Dec 9 10:55:17 2014 -0800
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Tue Dec 9 10:56:46 2014 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../datanode/fsdataset/impl/FsVolumeList.java   | 56 ++++++++++----------
 2 files changed, 31 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8352b9b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 55026a2..626d90a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -574,6 +574,9 @@ Release 2.6.1 - UNRELEASED
     HDFS-4882. Prevent the Namenode's LeaseManager from looping forever in
     checkLeases (Ravi Prakash via Colin P. McCabe)
 
+    HDFS-7489. Incorrect locking in FsVolumeList#checkDirs can hang datanodes
+    (Noah Lorang via Colin P. McCabe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8352b9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index 837ddf7..55329ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -36,6 +36,7 @@ class FsVolumeList {
    * This list is replaced on modification holding "this" lock.
    */
   volatile List<FsVolumeImpl> volumes = null;
+  private Object checkDirsMutex = new Object();
 
   private final VolumeChoosingPolicy<FsVolumeImpl> blockChooser;
   private volatile int numFailedVolumes;
@@ -167,40 +168,39 @@ class FsVolumeList {
    * Calls {@link FsVolumeImpl#checkDirs()} on each volume, removing any
    * volumes from the active list that result in a DiskErrorException.
    * 
-   * This method is synchronized to allow only one instance of checkDirs() 
-   * call
+   * Use checkDirsMutext to allow only one instance of checkDirs() call
+   *
    * @return list of all the removed volumes.
    */
-  synchronized List<FsVolumeImpl> checkDirs() {
-    ArrayList<FsVolumeImpl> removedVols = null;
-    
-    // Make a copy of volumes for performing modification 
-    final List<FsVolumeImpl> volumeList = new ArrayList<FsVolumeImpl>(volumes);
+  List<FsVolumeImpl> checkDirs() {
+    synchronized(checkDirsMutex) {
+      ArrayList<FsVolumeImpl> removedVols = null;
+      
+      // Make a copy of volumes for performing modification 
+      final List<FsVolumeImpl> volumeList = new ArrayList<FsVolumeImpl>(volumes);
 
-    for(Iterator<FsVolumeImpl> i = volumeList.iterator(); i.hasNext(); ) {
-      final FsVolumeImpl fsv = i.next();
-      try {
-        fsv.checkDirs();
-      } catch (DiskErrorException e) {
-        FsDatasetImpl.LOG.warn("Removing failed volume " + fsv + ": ",e);
-        if (removedVols == null) {
-          removedVols = new ArrayList<FsVolumeImpl>(1);
+      for(Iterator<FsVolumeImpl> i = volumeList.iterator(); i.hasNext(); ) {
+        final FsVolumeImpl fsv = i.next();
+        try {
+          fsv.checkDirs();
+        } catch (DiskErrorException e) {
+          FsDatasetImpl.LOG.warn("Removing failed volume " + fsv + ": ",e);
+          if (removedVols == null) {
+            removedVols = new ArrayList<FsVolumeImpl>(1);
+          }
+          removedVols.add(fsv);
+          removeVolume(fsv.getBasePath());
+          numFailedVolumes++;
         }
-        removedVols.add(fsv);
-        fsv.shutdown(); 
-        i.remove(); // Remove the volume
-        numFailedVolumes++;
       }
-    }
-    
-    if (removedVols != null && removedVols.size() > 0) {
-      // Replace volume list
-      volumes = Collections.unmodifiableList(volumeList);
-      FsDatasetImpl.LOG.warn("Completed checkDirs. Removed " + removedVols.size()
-          + " volumes. Current volumes: " + this);
-    }
+      
+      if (removedVols != null && removedVols.size() > 0) {
+        FsDatasetImpl.LOG.warn("Completed checkDirs. Removed " + removedVols.size()
+            + " volumes. Current volumes: " + this);
+      }
 
-    return removedVols;
+      return removedVols;
+    }
   }
 
   @Override


[27/50] [abbrv] hadoop git commit: HDFS-7503. Namenode restart after large deletions can cause slow processReport (Arpit Agarwal)

Posted by ka...@apache.org.
HDFS-7503. Namenode restart after large deletions can cause slow processReport (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/390642ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/390642ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/390642ac

Branch: refs/heads/YARN-2139
Commit: 390642acf35f3d599271617d30ba26c2f6406fc1
Parents: d693a25
Author: arp <ar...@apache.org>
Authored: Wed Dec 10 23:37:26 2014 -0800
Committer: arp <ar...@apache.org>
Committed: Wed Dec 10 23:44:28 2014 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../server/blockmanagement/BlockManager.java    | 22 ++++++++++++++------
 2 files changed, 19 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/390642ac/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fd48605..9049083 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -593,6 +593,9 @@ Release 2.6.1 - UNRELEASED
     HDFS-7489. Incorrect locking in FsVolumeList#checkDirs can hang datanodes
     (Noah Lorang via Colin P. McCabe)
 
+    HDFS-7503. Namenode restart after large deletions can cause slow
+    processReport. (Arpit Agarwal)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/390642ac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 2676696..5f718e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1785,6 +1785,8 @@ public class BlockManager {
     final long startTime = Time.now(); //after acquiring write lock
     final long endTime;
     DatanodeDescriptor node;
+    Collection<Block> invalidatedBlocks = null;
+
     try {
       node = datanodeManager.getDatanode(nodeID);
       if (node == null || !node.isAlive) {
@@ -1813,7 +1815,7 @@ public class BlockManager {
         // ordinary block reports.  This shortens restart times.
         processFirstBlockReport(storageInfo, newReport);
       } else {
-        processReport(storageInfo, newReport);
+        invalidatedBlocks = processReport(storageInfo, newReport);
       }
       
       // Now that we have an up-to-date block report, we know that any
@@ -1832,6 +1834,14 @@ public class BlockManager {
       namesystem.writeUnlock();
     }
 
+    if (invalidatedBlocks != null) {
+      for (Block b : invalidatedBlocks) {
+        blockLog.info("BLOCK* processReport: " + b + " on " + node
+                          + " size " + b.getNumBytes()
+                          + " does not belong to any file");
+      }
+    }
+
     // Log the block report processing stats from Namenode perspective
     final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
     if (metrics != null) {
@@ -1875,8 +1885,9 @@ public class BlockManager {
     }
   }
   
-  private void processReport(final DatanodeStorageInfo storageInfo,
-                             final BlockListAsLongs report) throws IOException {
+  private Collection<Block> processReport(
+      final DatanodeStorageInfo storageInfo,
+      final BlockListAsLongs report) throws IOException {
     // Normal case:
     // Modify the (block-->datanode) map, according to the difference
     // between the old and new block report.
@@ -1907,14 +1918,13 @@ public class BlockManager {
           + " of " + numBlocksLogged + " reported.");
     }
     for (Block b : toInvalidate) {
-      blockLog.info("BLOCK* processReport: "
-          + b + " on " + node + " size " + b.getNumBytes()
-          + " does not belong to any file");
       addToInvalidates(b, node);
     }
     for (BlockToMarkCorrupt b : toCorrupt) {
       markBlockAsCorrupt(b, storageInfo, node);
     }
+
+    return toInvalidate;
   }
 
   /**


[26/50] [abbrv] hadoop git commit: HDFS-7463. Simplify FSNamesystem#getBlockLocationsUpdateTimes. Contributed by Haohui Mai.

Posted by ka...@apache.org.
HDFS-7463. Simplify FSNamesystem#getBlockLocationsUpdateTimes. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d693a252
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d693a252
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d693a252

Branch: refs/heads/YARN-2139
Commit: d693a252bd0041c2493e7e07a3bf8bcf28e1923c
Parents: cb99f43
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Dec 10 23:01:17 2014 -0800
Committer: Haohui Mai <wh...@apache.org>
Committed: Wed Dec 10 23:01:17 2014 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 .../hdfs/server/namenode/FSNamesystem.java      | 218 +++++++++----------
 .../hdfs/server/namenode/NamenodeFsck.java      |   9 +-
 .../org/apache/hadoop/hdfs/TestGetBlocks.java   |   4 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |   4 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   |   6 +-
 6 files changed, 118 insertions(+), 125 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d693a252/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7b4e0c5..fd48605 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -449,6 +449,8 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7498. Simplify the logic in INodesInPath. (jing9)
 
+    HDFS-7463. Simplify FSNamesystem#getBlockLocationsUpdateTimes. (wheat9)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d693a252/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 30ac941..c17c4f5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1749,27 +1749,76 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     logAuditEvent(true, "setOwner", srcArg, null, resultingStat);
   }
 
+  static class GetBlockLocationsResult {
+    final INodesInPath iip;
+    final LocatedBlocks blocks;
+    boolean updateAccessTime() {
+      return iip != null;
+    }
+    private GetBlockLocationsResult(INodesInPath iip, LocatedBlocks blocks) {
+      this.iip = iip;
+      this.blocks = blocks;
+    }
+  }
+
   /**
    * Get block locations within the specified range.
    * @see ClientProtocol#getBlockLocations(String, long, long)
    */
   LocatedBlocks getBlockLocations(String clientMachine, String src,
-      long offset, long length) throws AccessControlException,
-      FileNotFoundException, UnresolvedLinkException, IOException {
-    LocatedBlocks blocks = getBlockLocations(src, offset, length, true, true,
-        true);
+      long offset, long length) throws IOException {
+    checkOperation(OperationCategory.READ);
+    GetBlockLocationsResult res = null;
+    readLock();
+    try {
+      checkOperation(OperationCategory.READ);
+      res = getBlockLocations(src, offset, length, true, true);
+    } catch (AccessControlException e) {
+      logAuditEvent(false, "open", src);
+      throw e;
+    } finally {
+      readUnlock();
+    }
+
+    logAuditEvent(true, "open", src);
+
+    if (res == null) {
+      return null;
+    }
+
+    if (res.updateAccessTime()) {
+      writeLock();
+      final long now = now();
+      try {
+        checkOperation(OperationCategory.WRITE);
+        INode inode = res.iip.getLastINode();
+        boolean updateAccessTime = now > inode.getAccessTime() +
+            getAccessTimePrecision();
+        if (!isInSafeMode() && updateAccessTime) {
+          boolean changed = dir.setTimes(
+              inode, -1, now, false, res.iip.getLatestSnapshotId());
+          if (changed) {
+            getEditLog().logTimes(src, -1, now);
+          }
+        }
+      } catch (Throwable e) {
+        LOG.warn("Failed to update the access time of " + src, e);
+      } finally {
+        writeUnlock();
+      }
+    }
+
+    LocatedBlocks blocks = res.blocks;
     if (blocks != null) {
-      blockManager.getDatanodeManager().sortLocatedBlocks(clientMachine,
-          blocks.getLocatedBlocks());
+      blockManager.getDatanodeManager().sortLocatedBlocks(
+          clientMachine, blocks.getLocatedBlocks());
 
       // lastBlock is not part of getLocatedBlocks(), might need to sort it too
       LocatedBlock lastBlock = blocks.getLastLocatedBlock();
       if (lastBlock != null) {
-        ArrayList<LocatedBlock> lastBlockList =
-            Lists.newArrayListWithCapacity(1);
-        lastBlockList.add(lastBlock);
-        blockManager.getDatanodeManager().sortLocatedBlocks(clientMachine,
-            lastBlockList);
+        ArrayList<LocatedBlock> lastBlockList = Lists.newArrayList(lastBlock);
+        blockManager.getDatanodeManager().sortLocatedBlocks(
+            clientMachine, lastBlockList);
       }
     }
     return blocks;
@@ -1778,24 +1827,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   /**
    * Get block locations within the specified range.
    * @see ClientProtocol#getBlockLocations(String, long, long)
-   * @throws FileNotFoundException, UnresolvedLinkException, IOException
+   * @throws IOException
    */
-  LocatedBlocks getBlockLocations(String src, long offset, long length,
-      boolean doAccessTime, boolean needBlockToken, boolean checkSafeMode)
-      throws FileNotFoundException, UnresolvedLinkException, IOException {
-    try {
-      return getBlockLocationsInt(src, offset, length, doAccessTime,
-                                  needBlockToken, checkSafeMode);
-    } catch (AccessControlException e) {
-      logAuditEvent(false, "open", src);
-      throw e;
-    }
-  }
-
-  private LocatedBlocks getBlockLocationsInt(String src, long offset,
-      long length, boolean doAccessTime, boolean needBlockToken,
-      boolean checkSafeMode)
-      throws FileNotFoundException, UnresolvedLinkException, IOException {
+  GetBlockLocationsResult getBlockLocations(
+      String src, long offset, long length, boolean needBlockToken,
+      boolean checkSafeMode) throws IOException {
     if (offset < 0) {
       throw new HadoopIllegalArgumentException(
           "Negative offset is not supported. File: " + src);
@@ -1804,16 +1840,16 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       throw new HadoopIllegalArgumentException(
           "Negative length is not supported. File: " + src);
     }
-    final LocatedBlocks ret = getBlockLocationsUpdateTimes(src,
-        offset, length, doAccessTime, needBlockToken);  
-    logAuditEvent(true, "open", src);
+    final GetBlockLocationsResult ret = getBlockLocationsInt(
+        src, offset, length, needBlockToken);
+
     if (checkSafeMode && isInSafeMode()) {
-      for (LocatedBlock b : ret.getLocatedBlocks()) {
+      for (LocatedBlock b : ret.blocks.getLocatedBlocks()) {
         // if safemode & no block locations yet then throw safemodeException
         if ((b.getLocations() == null) || (b.getLocations().length == 0)) {
           SafeModeException se = new SafeModeException(
               "Zero blocklocations for " + src, safeMode);
-          if (haEnabled && haContext != null && 
+          if (haEnabled && haContext != null &&
               haContext.getState().getServiceState() == HAServiceState.ACTIVE) {
             throw new RetriableException(se);
           } else {
@@ -1825,95 +1861,49 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return ret;
   }
 
-  /*
-   * Get block locations within the specified range, updating the
-   * access times if necessary. 
-   */
-  private LocatedBlocks getBlockLocationsUpdateTimes(final String srcArg,
-      long offset, long length, boolean doAccessTime, boolean needBlockToken)
+  private GetBlockLocationsResult getBlockLocationsInt(
+      final String srcArg, long offset, long length, boolean needBlockToken)
       throws IOException {
     String src = srcArg;
     FSPermissionChecker pc = getPermissionChecker();
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
-    for (int attempt = 0; attempt < 2; attempt++) {
-      boolean isReadOp = (attempt == 0);
-      if (isReadOp) { // first attempt is with readlock
-        checkOperation(OperationCategory.READ);
-        readLock();
-      }  else { // second attempt is with  write lock
-        checkOperation(OperationCategory.WRITE);
-        writeLock(); // writelock is needed to set accesstime
-      }
-      try {
-        if (isReadOp) {
-          checkOperation(OperationCategory.READ);
-        } else {
-          checkOperation(OperationCategory.WRITE);
-        }
-        src = dir.resolvePath(pc, src, pathComponents);
-        final INodesInPath iip = dir.getINodesInPath(src, true);
-        if (isPermissionEnabled) {
-          dir.checkPathAccess(pc, iip, FsAction.READ);
-        }
+    src = dir.resolvePath(pc, src, pathComponents);
+    final INodesInPath iip = dir.getINodesInPath(src, true);
+    final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
+    if (isPermissionEnabled) {
+      dir.checkPathAccess(pc, iip, FsAction.READ);
+      checkUnreadableBySuperuser(pc, inode, iip.getPathSnapshotId());
+    }
 
-        // if the namenode is in safemode, then do not update access time
-        if (isInSafeMode()) {
-          doAccessTime = false;
-        }
+    final long fileSize = iip.isSnapshot()
+        ? inode.computeFileSize(iip.getPathSnapshotId())
+        : inode.computeFileSizeNotIncludingLastUcBlock();
+    boolean isUc = inode.isUnderConstruction();
+    if (iip.isSnapshot()) {
+      // if src indicates a snapshot file, we need to make sure the returned
+      // blocks do not exceed the size of the snapshot file.
+      length = Math.min(length, fileSize - offset);
+      isUc = false;
+    }
 
-        final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
-        if (isPermissionEnabled) {
-          checkUnreadableBySuperuser(pc, inode, iip.getPathSnapshotId());
-        }
-        if (!iip.isSnapshot() //snapshots are readonly, so don't update atime.
-            && doAccessTime && isAccessTimeSupported()) {
-          final long now = now();
-          if (now > inode.getAccessTime() + getAccessTimePrecision()) {
-            // if we have to set access time but we only have the readlock, then
-            // restart this entire operation with the writeLock.
-            if (isReadOp) {
-              continue;
-            }
-            boolean changed = dir.setTimes(inode, -1, now, false,
-                    iip.getLatestSnapshotId());
-            if (changed) {
-              getEditLog().logTimes(src, -1, now);
-            }
-          }
-        }
-        final long fileSize = iip.isSnapshot() ?
-            inode.computeFileSize(iip.getPathSnapshotId())
-            : inode.computeFileSizeNotIncludingLastUcBlock();
-        boolean isUc = inode.isUnderConstruction();
-        if (iip.isSnapshot()) {
-          // if src indicates a snapshot file, we need to make sure the returned
-          // blocks do not exceed the size of the snapshot file.
-          length = Math.min(length, fileSize - offset);
-          isUc = false;
-        }
+    final FileEncryptionInfo feInfo =
+        FSDirectory.isReservedRawName(srcArg) ? null
+            : dir.getFileEncryptionInfo(inode, iip.getPathSnapshotId(), iip);
 
-        final FileEncryptionInfo feInfo =
-          FSDirectory.isReservedRawName(srcArg) ?
-          null : dir.getFileEncryptionInfo(inode, iip.getPathSnapshotId(),
-              iip);
-
-        final LocatedBlocks blocks =
-          blockManager.createLocatedBlocks(inode.getBlocks(), fileSize,
-            isUc, offset, length, needBlockToken, iip.isSnapshot(), feInfo);
-        // Set caching information for the located blocks.
-        for (LocatedBlock lb: blocks.getLocatedBlocks()) {
-          cacheManager.setCachedLocations(lb);
-        }
-        return blocks;
-      } finally {
-        if (isReadOp) {
-          readUnlock();
-        } else {
-          writeUnlock();
-        }
-      }
+    final LocatedBlocks blocks = blockManager.createLocatedBlocks(
+        inode.getBlocks(), fileSize, isUc, offset, length, needBlockToken,
+        iip.isSnapshot(), feInfo);
+
+    // Set caching information for the located blocks.
+    for (LocatedBlock lb : blocks.getLocatedBlocks()) {
+      cacheManager.setCachedLocations(lb);
     }
-    return null; // can never reach here
+
+    final long now = now();
+    boolean updateAccessTime = isAccessTimeSupported() && !isInSafeMode()
+        && !iip.isSnapshot()
+        && now > inode.getAccessTime() + getAccessTimePrecision();
+    return new GetBlockLocationsResult(updateAccessTime ? iip : null, blocks);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d693a252/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index f82f0ea..bab8f5e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -443,12 +443,15 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
     long fileLen = file.getLen();
     // Get block locations without updating the file access time 
     // and without block access tokens
-    LocatedBlocks blocks;
+    LocatedBlocks blocks = null;
+    FSNamesystem fsn = namenode.getNamesystem();
+    fsn.readLock();
     try {
-      blocks = namenode.getNamesystem().getBlockLocations(path, 0,
-          fileLen, false, false, false);
+      blocks = fsn.getBlockLocations(path, 0, fileLen, false, false).blocks;
     } catch (FileNotFoundException fnfe) {
       blocks = null;
+    } finally {
+      fsn.readUnlock();
     }
     if (blocks == null) { // the file is deleted
       return;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d693a252/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
index 2af86bd..cc89852 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
@@ -167,9 +167,7 @@ public class TestGetBlocks {
       if (stm != null) {
         stm.close();
       }
-      if (client != null) {
-        client.close();
-      }
+      client.close();
       cluster.shutdown();
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d693a252/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
index 61e7f14..7aad378 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
@@ -64,8 +64,8 @@ public class NameNodeAdapter {
    */
   public static LocatedBlocks getBlockLocations(NameNode namenode,
       String src, long offset, long length) throws IOException {
-    return namenode.getNamesystem().getBlockLocations(
-        src, offset, length, false, true, true);
+    return namenode.getNamesystem().getBlockLocations("foo",
+        src, offset, length);
   }
   
   public static HdfsFileStatus getFileInfo(NameNode namenode, String src,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d693a252/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index ef7de0d..aecf55e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -996,9 +996,9 @@ public class TestFsck {
     DatanodeManager dnManager = mock(DatanodeManager.class);
     
     when(namenode.getNamesystem()).thenReturn(fsName);
-    when(fsName.getBlockLocations(anyString(), anyLong(), anyLong(),
-        anyBoolean(), anyBoolean(), anyBoolean())).
-        thenThrow(new FileNotFoundException()) ;
+    when(fsName.getBlockLocations(
+        anyString(), anyLong(), anyLong(), anyBoolean(), anyBoolean()))
+        .thenThrow(new FileNotFoundException());
     when(fsName.getBlockManager()).thenReturn(blockManager);
     when(blockManager.getDatanodeManager()).thenReturn(dnManager);
 


[41/50] [abbrv] hadoop git commit: HDFS-7059. Avoid resolving path multiple times. Contributed by Jing Zhao.

Posted by ka...@apache.org.
HDFS-7059. Avoid resolving path multiple times. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c78e3a7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c78e3a7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c78e3a7c

Branch: refs/heads/YARN-2139
Commit: c78e3a7cdd10c40454e9acb06986ba6d8573cb19
Parents: 7784b10
Author: Jing Zhao <ji...@apache.org>
Authored: Fri Dec 12 14:15:06 2014 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Fri Dec 12 15:13:35 2014 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java |  27 +--
 .../hdfs/server/namenode/FSDirMkdirOp.java      |  33 ++-
 .../hdfs/server/namenode/FSDirRenameOp.java     | 173 ++++++-------
 .../server/namenode/FSDirStatAndListingOp.java  |  34 +--
 .../hdfs/server/namenode/FSDirXAttrOp.java      |  11 +-
 .../hdfs/server/namenode/FSDirectory.java       | 243 +++++++------------
 .../hdfs/server/namenode/FSEditLogLoader.java   |  37 +--
 .../hdfs/server/namenode/FSImageFormat.java     |   6 +-
 .../hdfs/server/namenode/FSNamesystem.java      | 200 ++++++++-------
 .../hdfs/server/namenode/INodesInPath.java      | 137 +++++++----
 .../hdfs/server/namenode/LeaseManager.java      |   6 +-
 .../hdfs/server/namenode/FSAclBaseTest.java     |   6 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   |   4 +-
 .../hdfs/server/namenode/TestLeaseManager.java  |   2 +
 .../server/namenode/TestSnapshotPathINodes.java |  64 ++---
 .../snapshot/TestOpenFilesWithSnapshot.java     |   3 +-
 .../snapshot/TestSnapshotReplication.java       |  11 +-
 18 files changed, 473 insertions(+), 526 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d635400..eeedb0d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -453,6 +453,8 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7463. Simplify FSNamesystem#getBlockLocationsUpdateTimes. (wheat9)
 
+    HDFS-7509. Avoid resolving path multiple times. (jing9)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index c2dee20..0d2b34c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -46,7 +46,7 @@ class FSDirAclOp {
       INodesInPath iip = fsd.getINodesInPath4Write(
           FSDirectory.normalizePath(src), true);
       fsd.checkOwner(pc, iip);
-      INode inode = FSDirectory.resolveLastINode(src, iip);
+      INode inode = FSDirectory.resolveLastINode(iip);
       int snapshotId = iip.getLatestSnapshotId();
       List<AclEntry> existingAcl = AclStorage.readINodeLogicalAcl(inode);
       List<AclEntry> newAcl = AclTransformation.mergeAclEntries(
@@ -72,7 +72,7 @@ class FSDirAclOp {
       INodesInPath iip = fsd.getINodesInPath4Write(
           FSDirectory.normalizePath(src), true);
       fsd.checkOwner(pc, iip);
-      INode inode = FSDirectory.resolveLastINode(src, iip);
+      INode inode = FSDirectory.resolveLastINode(iip);
       int snapshotId = iip.getLatestSnapshotId();
       List<AclEntry> existingAcl = AclStorage.readINodeLogicalAcl(inode);
       List<AclEntry> newAcl = AclTransformation.filterAclEntriesByAclSpec(
@@ -97,7 +97,7 @@ class FSDirAclOp {
       INodesInPath iip = fsd.getINodesInPath4Write(
           FSDirectory.normalizePath(src), true);
       fsd.checkOwner(pc, iip);
-      INode inode = FSDirectory.resolveLastINode(src, iip);
+      INode inode = FSDirectory.resolveLastINode(iip);
       int snapshotId = iip.getLatestSnapshotId();
       List<AclEntry> existingAcl = AclStorage.readINodeLogicalAcl(inode);
       List<AclEntry> newAcl = AclTransformation.filterDefaultAclEntries(
@@ -121,7 +121,7 @@ class FSDirAclOp {
     try {
       INodesInPath iip = fsd.getINodesInPath4Write(src);
       fsd.checkOwner(pc, iip);
-      unprotectedRemoveAcl(fsd, src);
+      unprotectedRemoveAcl(fsd, iip);
     } finally {
       fsd.writeUnlock();
     }
@@ -168,7 +168,7 @@ class FSDirAclOp {
       if (fsd.isPermissionEnabled()) {
         fsd.checkTraverse(pc, iip);
       }
-      INode inode = FSDirectory.resolveLastINode(srcs, iip);
+      INode inode = FSDirectory.resolveLastINode(iip);
       int snapshotId = iip.getPathSnapshotId();
       List<AclEntry> acl = AclStorage.readINodeAcl(inode, snapshotId);
       FsPermission fsPermission = inode.getFsPermission(snapshotId);
@@ -185,16 +185,17 @@ class FSDirAclOp {
   static List<AclEntry> unprotectedSetAcl(
       FSDirectory fsd, String src, List<AclEntry> aclSpec)
       throws IOException {
+    assert fsd.hasWriteLock();
+    final INodesInPath iip = fsd.getINodesInPath4Write(
+        FSDirectory.normalizePath(src), true);
+
     // ACL removal is logged to edits as OP_SET_ACL with an empty list.
     if (aclSpec.isEmpty()) {
-      unprotectedRemoveAcl(fsd, src);
+      unprotectedRemoveAcl(fsd, iip);
       return AclFeature.EMPTY_ENTRY_LIST;
     }
 
-    assert fsd.hasWriteLock();
-    INodesInPath iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath
-        (src), true);
-    INode inode = FSDirectory.resolveLastINode(src, iip);
+    INode inode = FSDirectory.resolveLastINode(iip);
     int snapshotId = iip.getLatestSnapshotId();
     List<AclEntry> existingAcl = AclStorage.readINodeLogicalAcl(inode);
     List<AclEntry> newAcl = AclTransformation.replaceAclEntries(existingAcl,
@@ -212,12 +213,10 @@ class FSDirAclOp {
     }
   }
 
-  private static void unprotectedRemoveAcl(FSDirectory fsd, String src)
+  private static void unprotectedRemoveAcl(FSDirectory fsd, INodesInPath iip)
       throws IOException {
     assert fsd.hasWriteLock();
-    INodesInPath iip = fsd.getINodesInPath4Write(
-        FSDirectory.normalizePath(src), true);
-    INode inode = FSDirectory.resolveLastINode(src, iip);
+    INode inode = FSDirectory.resolveLastINode(iip);
     int snapshotId = iip.getLatestSnapshotId();
     AclFeature f = inode.getAclFeature();
     if (f == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index c8c5cb2..7e62d2c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -50,8 +50,7 @@ class FSDirMkdirOp {
       throw new InvalidPathException(src);
     }
     FSPermissionChecker pc = fsd.getPermissionChecker();
-    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath
-        (src);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     src = fsd.resolvePath(pc, src, pathComponents);
     INodesInPath iip = fsd.getINodesInPath4Write(src);
     if (fsd.isPermissionEnabled()) {
@@ -72,7 +71,7 @@ class FSDirMkdirOp {
       // create multiple inodes.
       fsn.checkFsObjectLimit();
 
-      if (!mkdirsRecursively(fsd, src, permissions, false, now())) {
+      if (mkdirsRecursively(fsd, iip, permissions, false, now()) == null) {
         throw new IOException("Failed to create directory: " + src);
       }
     }
@@ -97,33 +96,34 @@ class FSDirMkdirOp {
    * If ancestor directories do not exist, automatically create them.
 
    * @param fsd FSDirectory
-   * @param src string representation of the path to the directory
+   * @param iip the INodesInPath instance containing all the existing INodes
+   *            and null elements for non-existing components in the path
    * @param permissions the permission of the directory
    * @param inheritPermission
    *   if the permission of the directory should inherit from its parent or not.
    *   u+wx is implicitly added to the automatically created directories,
    *   and to the given directory if inheritPermission is true
    * @param now creation time
-   * @return true if the operation succeeds false otherwise
+   * @return non-null INodesInPath instance if operation succeeds
    * @throws QuotaExceededException if directory creation violates
    *                                any quota limit
    * @throws UnresolvedLinkException if a symlink is encountered in src.
    * @throws SnapshotAccessControlException if path is in RO snapshot
    */
-  static boolean mkdirsRecursively(
-      FSDirectory fsd, String src, PermissionStatus permissions,
-      boolean inheritPermission, long now)
+  static INodesInPath mkdirsRecursively(FSDirectory fsd, INodesInPath iip,
+      PermissionStatus permissions, boolean inheritPermission, long now)
       throws FileAlreadyExistsException, QuotaExceededException,
              UnresolvedLinkException, SnapshotAccessControlException,
              AclException {
-    src = FSDirectory.normalizePath(src);
-    String[] names = INode.getPathNames(src);
-    byte[][] components = INode.getPathComponents(names);
-    final int lastInodeIndex = components.length - 1;
+    final int lastInodeIndex = iip.length() - 1;
+    final byte[][] components = iip.getPathComponents();
+    final String[] names = new String[components.length];
+    for (int i = 0; i < components.length; i++) {
+      names[i] = DFSUtil.bytes2String(components[i]);
+    }
 
     fsd.writeLock();
     try {
-      INodesInPath iip = fsd.getExistingPathINodes(components);
       if (iip.isSnapshot()) {
         throw new SnapshotAccessControlException(
                 "Modification on RO snapshot is disallowed");
@@ -136,8 +136,7 @@ class FSDirMkdirOp {
       for(; i < length && (curNode = iip.getINode(i)) != null; i++) {
         pathbuilder.append(Path.SEPARATOR).append(names[i]);
         if (!curNode.isDirectory()) {
-          throw new FileAlreadyExistsException(
-                  "Parent path is not a directory: "
+          throw new FileAlreadyExistsException("Parent path is not a directory: "
                   + pathbuilder + " " + curNode.getLocalName());
         }
       }
@@ -181,7 +180,7 @@ class FSDirMkdirOp {
             components[i], (i < lastInodeIndex) ? parentPermissions :
                 permissions, null, now);
         if (iip.getINode(i) == null) {
-          return false;
+          return null;
         }
         // Directory creation also count towards FilesCreated
         // to match count of FilesDeleted metric.
@@ -197,7 +196,7 @@ class FSDirMkdirOp {
     } finally {
       fsd.writeUnlock();
     }
-    return true;
+    return iip;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index c62c88e..e3020ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.ChunkedArrayList;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.util.Time;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -43,9 +44,9 @@ import java.util.Map;
 
 import static org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
 import static org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
-import static org.apache.hadoop.util.Time.now;
 
 class FSDirRenameOp {
+  @Deprecated
   static RenameOldResult renameToInt(
       FSDirectory fsd, final String srcArg, final String dstArg,
       boolean logRetryCache)
@@ -67,7 +68,7 @@ class FSDirRenameOp {
     src = fsd.resolvePath(pc, src, srcComponents);
     dst = fsd.resolvePath(pc, dst, dstComponents);
     @SuppressWarnings("deprecation")
-    final boolean status = renameToInternal(fsd, pc, src, dst, logRetryCache);
+    final boolean status = renameTo(fsd, pc, src, dst, logRetryCache);
     if (status) {
       resultingStat = fsd.getAuditFileInfo(dst, false);
     }
@@ -116,6 +117,22 @@ class FSDirRenameOp {
   }
 
   /**
+   * <br>
+   * Note: This is to be used by {@link FSEditLogLoader} only.
+   * <br>
+   */
+  @Deprecated
+  static boolean unprotectedRenameTo(FSDirectory fsd, String src, String dst,
+      long timestamp) throws IOException {
+    if (fsd.isDir(dst)) {
+      dst += Path.SEPARATOR + new Path(src).getName();
+    }
+    final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false);
+    final INodesInPath dstIIP = fsd.getINodesInPath4Write(dst, false);
+    return unprotectedRenameTo(fsd, src, dst, srcIIP, dstIIP, timestamp);
+  }
+
+  /**
    * Change a path name
    *
    * @param fsd FSDirectory
@@ -126,24 +143,19 @@ class FSDirRenameOp {
    * boolean, Options.Rename...)}
    */
   @Deprecated
-  static boolean unprotectedRenameTo(
-      FSDirectory fsd, String src, String dst, long timestamp)
+  static boolean unprotectedRenameTo(FSDirectory fsd, String src, String dst,
+      final INodesInPath srcIIP, final INodesInPath dstIIP, long timestamp)
       throws IOException {
     assert fsd.hasWriteLock();
-    INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false);
     final INode srcInode = srcIIP.getLastINode();
     try {
-      validateRenameSource(src, srcIIP);
+      validateRenameSource(srcIIP);
     } catch (SnapshotException e) {
       throw e;
     } catch (IOException ignored) {
       return false;
     }
 
-    if (fsd.isDir(dst)) {
-      dst += Path.SEPARATOR + new Path(src).getName();
-    }
-
     // validate the destination
     if (dst.equals(src)) {
       return true;
@@ -155,7 +167,6 @@ class FSDirRenameOp {
       return false;
     }
 
-    INodesInPath dstIIP = fsd.getINodesInPath4Write(dst, false);
     if (dstIIP.getLastINode() != null) {
       NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " +
           "failed to rename " + src + " to " + dst + " because destination " +
@@ -234,8 +245,7 @@ class FSDirRenameOp {
     BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
     src = fsd.resolvePath(pc, src, srcComponents);
     dst = fsd.resolvePath(pc, dst, dstComponents);
-    renameToInternal(fsd, pc, src, dst, logRetryCache, collectedBlocks,
-        options);
+    renameTo(fsd, pc, src, dst, collectedBlocks, logRetryCache, options);
     HdfsFileStatus resultingStat = fsd.getAuditFileInfo(dst, false);
 
     return new AbstractMap.SimpleImmutableEntry<BlocksMapUpdateInfo,
@@ -246,29 +256,44 @@ class FSDirRenameOp {
    * @see #unprotectedRenameTo(FSDirectory, String, String, long,
    * org.apache.hadoop.fs.Options.Rename...)
    */
-  static void renameTo(
-      FSDirectory fsd, String src, String dst, long mtime,
-      BlocksMapUpdateInfo collectedBlocks, Options.Rename... options)
-      throws IOException {
+  static void renameTo(FSDirectory fsd, FSPermissionChecker pc, String src,
+      String dst, BlocksMapUpdateInfo collectedBlocks, boolean logRetryCache,
+      Options.Rename... options) throws IOException {
+    final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false);
+    final INodesInPath dstIIP = fsd.getINodesInPath4Write(dst, false);
+    if (fsd.isPermissionEnabled()) {
+      // Rename does not operate on link targets
+      // Do not resolveLink when checking permissions of src and dst
+      // Check write access to parent of src
+      fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, null,
+          false);
+      // Check write access to ancestor of dst
+      fsd.checkPermission(pc, dstIIP, false, FsAction.WRITE, null, null, null,
+          false);
+    }
+
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: " + src + " to "
           + dst);
     }
+    final long mtime = Time.now();
     fsd.writeLock();
     try {
-      if (unprotectedRenameTo(fsd, src, dst, mtime, collectedBlocks, options)) {
+      if (unprotectedRenameTo(fsd, src, dst, srcIIP, dstIIP, mtime,
+          collectedBlocks, options)) {
         fsd.getFSNamesystem().incrDeletedFileCount(1);
       }
     } finally {
       fsd.writeUnlock();
     }
+    fsd.getEditLog().logRename(src, dst, mtime, logRetryCache, options);
   }
 
   /**
    * Rename src to dst.
    * <br>
    * Note: This is to be used by {@link org.apache.hadoop.hdfs.server
-   * .namenode.FSEditLog} only.
+   * .namenode.FSEditLogLoader} only.
    * <br>
    *
    * @param fsd       FSDirectory
@@ -282,7 +307,9 @@ class FSDirRenameOp {
       Options.Rename... options)
       throws IOException {
     BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
-    boolean ret = unprotectedRenameTo(fsd, src, dst, timestamp,
+    final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false);
+    final INodesInPath dstIIP = fsd.getINodesInPath4Write(dst, false);
+    boolean ret = unprotectedRenameTo(fsd, src, dst, srcIIP, dstIIP, timestamp,
         collectedBlocks, options);
     if (!collectedBlocks.getToDeleteList().isEmpty()) {
       fsd.getFSNamesystem().removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
@@ -302,8 +329,8 @@ class FSDirRenameOp {
    * @param collectedBlocks blocks to be removed
    * @param options         Rename options
    */
-  static boolean unprotectedRenameTo(
-      FSDirectory fsd, String src, String dst, long timestamp,
+  static boolean unprotectedRenameTo(FSDirectory fsd, String src, String dst,
+      final INodesInPath srcIIP, final INodesInPath dstIIP, long timestamp,
       BlocksMapUpdateInfo collectedBlocks, Options.Rename... options)
       throws IOException {
     assert fsd.hasWriteLock();
@@ -311,9 +338,8 @@ class FSDirRenameOp {
         && Arrays.asList(options).contains(Options.Rename.OVERWRITE);
 
     final String error;
-    final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false);
     final INode srcInode = srcIIP.getLastINode();
-    validateRenameSource(src, srcIIP);
+    validateRenameSource(srcIIP);
 
     // validate the destination
     if (dst.equals(src)) {
@@ -322,7 +348,6 @@ class FSDirRenameOp {
     }
     validateDestination(src, dst, srcInode);
 
-    INodesInPath dstIIP = fsd.getINodesInPath4Write(dst, false);
     if (dstIIP.length() == 1) {
       error = "rename destination cannot be the root";
       NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " +
@@ -373,8 +398,8 @@ class FSDirRenameOp {
     long removedNum = 0;
     try {
       if (dstInode != null) { // dst exists remove it
-        if ((removedNum = fsd.removeLastINode(dstIIP)) != -1) {
-          removedDst = dstIIP.getLastINode();
+        if ((removedNum = fsd.removeLastINode(tx.dstIIP)) != -1) {
+          removedDst = tx.dstIIP.getLastINode();
           undoRemoveDst = true;
         }
       }
@@ -395,13 +420,13 @@ class FSDirRenameOp {
           undoRemoveDst = false;
           if (removedNum > 0) {
             List<INode> removedINodes = new ChunkedArrayList<INode>();
-            if (!removedDst.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
+            if (!removedDst.isInLatestSnapshot(tx.dstIIP.getLatestSnapshotId())) {
               removedDst.destroyAndCollectBlocks(collectedBlocks,
                   removedINodes);
               filesDeleted = true;
             } else {
               filesDeleted = removedDst.cleanSubtree(
-                  Snapshot.CURRENT_STATE_ID, dstIIP.getLatestSnapshotId(),
+                  Snapshot.CURRENT_STATE_ID, tx.dstIIP.getLatestSnapshotId(),
                   collectedBlocks, removedINodes, true)
                   .get(Quota.NAMESPACE) >= 0;
             }
@@ -431,7 +456,7 @@ class FSDirRenameOp {
           dstParent.asDirectory().undoRename4DstParent(removedDst,
               dstIIP.getLatestSnapshotId());
         } else {
-          fsd.addLastINodeNoQuotaCheck(dstIIP, removedDst);
+          fsd.addLastINodeNoQuotaCheck(tx.dstIIP, removedDst);
         }
         if (removedDst.isReference()) {
           final INodeReference removedDstRef = removedDst.asReference();
@@ -447,59 +472,41 @@ class FSDirRenameOp {
   }
 
   /**
-   * @see #unprotectedRenameTo(FSDirectory, String, String, long)
    * @deprecated Use {@link #renameToInt(FSDirectory, String, String,
    * boolean, Options.Rename...)}
    */
   @Deprecated
   @SuppressWarnings("deprecation")
-  private static boolean renameTo(
-      FSDirectory fsd, String src, String dst, long mtime)
-      throws IOException {
+  private static boolean renameTo(FSDirectory fsd, FSPermissionChecker pc,
+      String src, String dst, boolean logRetryCache) throws IOException {
+    // Rename does not operate on link targets
+    // Do not resolveLink when checking permissions of src and dst
+    // Check write access to parent of src
+    final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false);
+    // Note: We should not be doing this.  This is move() not renameTo().
+    final String actualDst = fsd.isDir(dst) ?
+        dst + Path.SEPARATOR + new Path(src).getName() : dst;
+    final INodesInPath dstIIP = fsd.getINodesInPath4Write(actualDst, false);
+    if (fsd.isPermissionEnabled()) {
+      fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, null,
+          false);
+      // Check write access to ancestor of dst
+      fsd.checkPermission(pc, dstIIP, false, FsAction.WRITE, null, null,
+          null, false);
+    }
+
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: " + src + " to "
           + dst);
     }
+    final long mtime = Time.now();
     boolean stat = false;
     fsd.writeLock();
     try {
-      stat = unprotectedRenameTo(fsd, src, dst, mtime);
+      stat = unprotectedRenameTo(fsd, src, actualDst, srcIIP, dstIIP, mtime);
     } finally {
       fsd.writeUnlock();
     }
-    return stat;
-  }
-
-  /**
-   * @deprecated See {@link #renameTo(FSDirectory, String, String, long)}
-   */
-  @Deprecated
-  private static boolean renameToInternal(
-      FSDirectory fsd, FSPermissionChecker pc, String src, String dst,
-      boolean logRetryCache)
-      throws IOException {
-    if (fsd.isPermissionEnabled()) {
-      //We should not be doing this.  This is move() not renameTo().
-      //but for now,
-      //NOTE: yes, this is bad!  it's assuming much lower level behavior
-      //      of rewriting the dst
-      String actualdst = fsd.isDir(dst) ? dst + Path.SEPARATOR + new Path
-          (src).getName() : dst;
-      // Rename does not operates on link targets
-      // Do not resolveLink when checking permissions of src and dst
-      // Check write access to parent of src
-      INodesInPath srcIIP = fsd.getINodesInPath(src, false);
-      fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, null,
-          false);
-      INodesInPath dstIIP = fsd.getINodesInPath(actualdst, false);
-      // Check write access to ancestor of dst
-      fsd.checkPermission(pc, dstIIP, false, FsAction.WRITE, null, null,
-          null, false);
-    }
-
-    long mtime = now();
-    @SuppressWarnings("deprecation")
-    final boolean stat = renameTo(fsd, src, dst, mtime);
     if (stat) {
       fsd.getEditLog().logRename(src, dst, mtime, logRetryCache);
       return true;
@@ -507,29 +514,6 @@ class FSDirRenameOp {
     return false;
   }
 
-  private static void renameToInternal(
-      FSDirectory fsd, FSPermissionChecker pc, String src, String dst,
-      boolean logRetryCache, BlocksMapUpdateInfo collectedBlocks,
-      Options.Rename... options)
-      throws IOException {
-    if (fsd.isPermissionEnabled()) {
-      // Rename does not operates on link targets
-      // Do not resolveLink when checking permissions of src and dst
-      // Check write access to parent of src
-      INodesInPath srcIIP = fsd.getINodesInPath(src, false);
-      fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, null,
-          false);
-      // Check write access to ancestor of dst
-      INodesInPath dstIIP = fsd.getINodesInPath(dst, false);
-      fsd.checkPermission(pc, dstIIP, false, FsAction.WRITE, null, null, null,
-          false);
-    }
-
-    long mtime = now();
-    renameTo(fsd, src, dst, mtime, collectedBlocks, options);
-    fsd.getEditLog().logRename(src, dst, mtime, logRetryCache, options);
-  }
-
   private static void validateDestination(
       String src, String dst, INode srcInode)
       throws IOException {
@@ -579,13 +563,13 @@ class FSDirRenameOp {
     }
   }
 
-  private static void validateRenameSource(String src, INodesInPath srcIIP)
+  private static void validateRenameSource(INodesInPath srcIIP)
       throws IOException {
     String error;
     final INode srcInode = srcIIP.getLastINode();
     // validate source
     if (srcInode == null) {
-      error = "rename source " + src + " is not found.";
+      error = "rename source " + srcIIP.getPath() + " is not found.";
       NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
           + error);
       throw new FileNotFoundException(error);
@@ -625,8 +609,7 @@ class FSDirRenameOp {
       this.dst = dst;
       srcChild = srcIIP.getLastINode();
       srcChildName = srcChild.getLocalNameBytes();
-      isSrcInSnapshot = srcChild.isInLatestSnapshot(srcIIP
-          .getLatestSnapshotId());
+      isSrcInSnapshot = srcChild.isInLatestSnapshot(srcIIP.getLatestSnapshotId());
       srcChildIsReference = srcChild.isReference();
       srcParent = srcIIP.getINode(-2).asDirectory();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 0f94171..5bc790e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -72,14 +72,14 @@ class FSDirStatAndListingOp {
 
     boolean isSuperUser = true;
     if (fsd.isPermissionEnabled()) {
-      if (fsd.isDir(src)) {
+      if (iip.getLastINode() != null && iip.getLastINode().isDirectory()) {
         fsd.checkPathAccess(pc, iip, FsAction.READ_EXECUTE);
       } else {
         fsd.checkTraverse(pc, iip);
       }
       isSuperUser = pc.isSuperUser();
     }
-    return getListing(fsd, src, startAfter, needLocation, isSuperUser);
+    return getListing(fsd, iip, src, startAfter, needLocation, isSuperUser);
   }
 
   /**
@@ -131,12 +131,12 @@ class FSDirStatAndListingOp {
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     FSPermissionChecker pc = fsd.getPermissionChecker();
     src = fsd.resolvePath(pc, src, pathComponents);
-    final INodesInPath iip = fsd.getINodesInPath(src, true);
+    final INodesInPath iip = fsd.getINodesInPath(src, false);
     if (fsd.isPermissionEnabled()) {
       fsd.checkPermission(pc, iip, false, null, null, null,
           FsAction.READ_EXECUTE);
     }
-    return getContentSummaryInt(fsd, src);
+    return getContentSummaryInt(fsd, iip);
   }
 
   /**
@@ -148,14 +148,15 @@ class FSDirStatAndListingOp {
    * that at least this.lsLimit block locations are in the response
    *
    * @param fsd FSDirectory
+   * @param iip the INodesInPath instance containing all the INodes along the
+   *            path
    * @param src the directory name
    * @param startAfter the name to start listing after
    * @param needLocation if block locations are returned
    * @return a partial listing starting after startAfter
    */
-  private static DirectoryListing getListing(
-      FSDirectory fsd, String src, byte[] startAfter, boolean needLocation,
-      boolean isSuperUser)
+  private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip,
+      String src, byte[] startAfter, boolean needLocation, boolean isSuperUser)
       throws IOException {
     String srcs = FSDirectory.normalizePath(src);
     final boolean isRawPath = FSDirectory.isReservedRawName(src);
@@ -165,9 +166,8 @@ class FSDirStatAndListingOp {
       if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
         return getSnapshotsListing(fsd, srcs, startAfter);
       }
-      final INodesInPath inodesInPath = fsd.getINodesInPath(srcs, true);
-      final int snapshot = inodesInPath.getPathSnapshotId();
-      final INode targetNode = inodesInPath.getLastINode();
+      final int snapshot = iip.getPathSnapshotId();
+      final INode targetNode = iip.getLastINode();
       if (targetNode == null)
         return null;
       byte parentStoragePolicy = isSuperUser ?
@@ -178,7 +178,7 @@ class FSDirStatAndListingOp {
         return new DirectoryListing(
             new HdfsFileStatus[]{createFileStatus(fsd,
                 HdfsFileStatus.EMPTY_NAME, targetNode, needLocation,
-                parentStoragePolicy, snapshot, isRawPath, inodesInPath)}, 0);
+                parentStoragePolicy, snapshot, isRawPath, iip)}, 0);
       }
 
       final INodeDirectory dirInode = targetNode.asDirectory();
@@ -196,7 +196,8 @@ class FSDirStatAndListingOp {
             cur.getLocalStoragePolicyID():
             BlockStoragePolicySuite.ID_UNSPECIFIED;
         listing[i] = createFileStatus(fsd, cur.getLocalNameBytes(), cur,
-            needLocation, fsd.getStoragePolicyID(curPolicy, parentStoragePolicy), snapshot, isRawPath, inodesInPath);
+            needLocation, fsd.getStoragePolicyID(curPolicy,
+                parentStoragePolicy), snapshot, isRawPath, iip);
         listingCnt++;
         if (needLocation) {
             // Once we  hit lsLimit locations, stop.
@@ -453,14 +454,13 @@ class FSDirStatAndListingOp {
     return perm;
   }
 
-  private static ContentSummary getContentSummaryInt(
-      FSDirectory fsd, String src) throws IOException {
-    String srcs = FSDirectory.normalizePath(src);
+  private static ContentSummary getContentSummaryInt(FSDirectory fsd,
+      INodesInPath iip) throws IOException {
     fsd.readLock();
     try {
-      INode targetNode = fsd.getNode(srcs, false);
+      INode targetNode = iip.getLastINode();
       if (targetNode == null) {
-        throw new FileNotFoundException("File does not exist: " + srcs);
+        throw new FileNotFoundException("File does not exist: " + iip.getPath());
       }
       else {
         // Make it relinquish locks everytime contentCountLimit entries are

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 303b9e3..47a995d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -191,7 +191,7 @@ class FSDirXAttrOp {
     assert fsd.hasWriteLock();
     INodesInPath iip = fsd.getINodesInPath4Write(
         FSDirectory.normalizePath(src), true);
-    INode inode = FSDirectory.resolveLastINode(src, iip);
+    INode inode = FSDirectory.resolveLastINode(iip);
     int snapshotId = iip.getLatestSnapshotId();
     List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
     List<XAttr> removedXAttrs = Lists.newArrayListWithCapacity(toRemove.size());
@@ -260,8 +260,9 @@ class FSDirXAttrOp {
       final EnumSet<XAttrSetFlag> flag)
       throws IOException {
     assert fsd.hasWriteLock();
-    INodesInPath iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src), true);
-    INode inode = FSDirectory.resolveLastINode(src, iip);
+    INodesInPath iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src),
+        true);
+    INode inode = FSDirectory.resolveLastINode(iip);
     int snapshotId = iip.getLatestSnapshotId();
     List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
     List<XAttr> newXAttrs = setINodeXAttrs(fsd, existingXAttrs, xAttrs, flag);
@@ -444,8 +445,8 @@ class FSDirXAttrOp {
     String srcs = FSDirectory.normalizePath(src);
     fsd.readLock();
     try {
-      INodesInPath iip = fsd.getLastINodeInPath(srcs, true);
-      INode inode = FSDirectory.resolveLastINode(src, iip);
+      INodesInPath iip = fsd.getINodesInPath(srcs, true);
+      INode inode = FSDirectory.resolveLastINode(iip);
       int snapshotId = iip.getPathSnapshotId();
       return XAttrStorage.readINodeXAttrs(inode, snapshotId);
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 81b0eb6..ee9bdd0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -335,8 +335,8 @@ public class FSDirectory implements Closeable {
 
   private static INodeFile newINodeFile(long id, PermissionStatus permissions,
       long mtime, long atime, short replication, long preferredBlockSize) {
-    return newINodeFile(id, permissions, mtime, atime, replication, preferredBlockSize,
-        (byte)0);
+    return newINodeFile(id, permissions, mtime, atime, replication,
+        preferredBlockSize, (byte)0);
   }
 
   private static INodeFile newINodeFile(long id, PermissionStatus permissions,
@@ -354,20 +354,21 @@ public class FSDirectory implements Closeable {
    * @throws UnresolvedLinkException
    * @throws SnapshotAccessControlException 
    */
-  INodeFile addFile(String path, PermissionStatus permissions,
+  INodeFile addFile(INodesInPath iip, String path, PermissionStatus permissions,
                     short replication, long preferredBlockSize,
                     String clientName, String clientMachine)
     throws FileAlreadyExistsException, QuotaExceededException,
       UnresolvedLinkException, SnapshotAccessControlException, AclException {
 
     long modTime = now();
-    INodeFile newNode = newINodeFile(allocateNewInodeId(), permissions, modTime, modTime, replication, preferredBlockSize);
+    INodeFile newNode = newINodeFile(allocateNewInodeId(), permissions, modTime,
+        modTime, replication, preferredBlockSize);
     newNode.toUnderConstruction(clientName, clientMachine);
 
     boolean added = false;
     writeLock();
     try {
-      added = addINode(path, newNode);
+      added = addINode(iip, newNode);
     } finally {
       writeUnlock();
     }
@@ -382,8 +383,8 @@ public class FSDirectory implements Closeable {
     return newNode;
   }
 
-  INodeFile unprotectedAddFile( long id,
-                            String path, 
+  INodeFile unprotectedAddFile(long id,
+                            INodesInPath iip,
                             PermissionStatus permissions,
                             List<AclEntry> aclEntries,
                             List<XAttr> xAttrs,
@@ -401,14 +402,13 @@ public class FSDirectory implements Closeable {
       newNode = newINodeFile(id, permissions, modificationTime,
           modificationTime, replication, preferredBlockSize, storagePolicyId);
       newNode.toUnderConstruction(clientName, clientMachine);
-
     } else {
       newNode = newINodeFile(id, permissions, modificationTime, atime,
           replication, preferredBlockSize, storagePolicyId);
     }
 
     try {
-      if (addINode(path, newNode)) {
+      if (addINode(iip, newNode)) {
         if (aclEntries != null) {
           AclStorage.updateINodeAcl(newNode, aclEntries,
             Snapshot.CURRENT_STATE_ID);
@@ -422,8 +422,8 @@ public class FSDirectory implements Closeable {
     } catch (IOException e) {
       if(NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug(
-            "DIR* FSDirectory.unprotectedAddFile: exception when add " + path
-                + " to the file system", e);
+            "DIR* FSDirectory.unprotectedAddFile: exception when add "
+                + iip.getPath() + " to the file system", e);
       }
     }
     return null;
@@ -468,18 +468,18 @@ public class FSDirectory implements Closeable {
    * Remove a block from the file.
    * @return Whether the block exists in the corresponding file
    */
-  boolean removeBlock(String path, INodeFile fileNode, Block block)
-      throws IOException {
+  boolean removeBlock(String path, INodesInPath iip, INodeFile fileNode,
+      Block block) throws IOException {
     Preconditions.checkArgument(fileNode.isUnderConstruction());
     writeLock();
     try {
-      return unprotectedRemoveBlock(path, fileNode, block);
+      return unprotectedRemoveBlock(path, iip, fileNode, block);
     } finally {
       writeUnlock();
     }
   }
   
-  boolean unprotectedRemoveBlock(String path,
+  boolean unprotectedRemoveBlock(String path, INodesInPath iip,
       INodeFile fileNode, Block block) throws IOException {
     // modify file-> block and blocksMap
     // fileNode should be under construction
@@ -496,7 +496,6 @@ public class FSDirectory implements Closeable {
     }
 
     // update space consumed
-    final INodesInPath iip = getINodesInPath4Write(path, true);
     updateCount(iip, 0, -fileNode.getBlockDiskspace(), true);
     return true;
   }
@@ -638,20 +637,6 @@ public class FSDirectory implements Closeable {
     XAttrStorage.updateINodeXAttrs(inode, newXAttrs, latestSnapshotId);
   }
 
-  /**
-   * @param path the file path
-   * @return the block size of the file. 
-   */
-  long getPreferredBlockSize(String path) throws IOException {
-    readLock();
-    try {
-      return INodeFile.valueOf(getNode(path, false), path
-          ).getPreferredBlockSize();
-    } finally {
-      readUnlock();
-    }
-  }
-
   void setPermission(String src, FsPermission permission)
       throws FileNotFoundException, UnresolvedLinkException,
       QuotaExceededException, SnapshotAccessControlException {
@@ -706,28 +691,26 @@ public class FSDirectory implements Closeable {
 
   /**
    * Delete the target directory and collect the blocks under it
-   * 
-   * @param src Path of a directory to delete
+   *
+   * @param iip the INodesInPath instance containing all the INodes for the path
    * @param collectedBlocks Blocks under the deleted directory
    * @param removedINodes INodes that should be removed from {@link #inodeMap}
    * @return the number of files that have been removed
    */
-  long delete(String src, BlocksMapUpdateInfo collectedBlocks,
+  long delete(INodesInPath iip, BlocksMapUpdateInfo collectedBlocks,
               List<INode> removedINodes, long mtime) throws IOException {
     if (NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src);
+      NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + iip.getPath());
     }
     final long filesRemoved;
     writeLock();
     try {
-      final INodesInPath inodesInPath = getINodesInPath4Write(
-          normalizePath(src), false);
-      if (!deleteAllowed(inodesInPath, src) ) {
+      if (!deleteAllowed(iip, iip.getPath()) ) {
         filesRemoved = -1;
       } else {
         List<INodeDirectory> snapshottableDirs = new ArrayList<INodeDirectory>();
-        FSDirSnapshotOp.checkSnapshot(inodesInPath.getLastINode(), snapshottableDirs);
-        filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks,
+        FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs);
+        filesRemoved = unprotectedDelete(iip, collectedBlocks,
             removedINodes, mtime);
         namesystem.removeSnapshottableDirs(snapshottableDirs);
       }
@@ -863,88 +846,15 @@ public class FSDirectory implements Closeable {
         parentPolicy;
   }
 
-  INode getINode4DotSnapshot(String src) throws UnresolvedLinkException {
-    Preconditions.checkArgument(
-        src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
-        "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
-    
-    final String dirPath = normalizePath(src.substring(0,
-        src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
-    
-    final INode node = this.getINode(dirPath);
-    if (node != null && node.isDirectory()
-        && node.asDirectory().isSnapshottable()) {
-      return node;
-    }
-    return null;
-  }
-
-  INodesInPath getExistingPathINodes(byte[][] components)
-      throws UnresolvedLinkException {
-    return INodesInPath.resolve(rootDir, components);
-  }
-
-  /**
-   * Get {@link INode} associated with the file / directory.
-   */
-  public INode getINode(String src) throws UnresolvedLinkException {
-    return getLastINodeInPath(src).getINode(0);
-  }
-
-  /**
-   * Get {@link INode} associated with the file / directory.
-   */
-  public INodesInPath getLastINodeInPath(String src)
-       throws UnresolvedLinkException {
-    readLock();
-    try {
-      return getLastINodeInPath(src, true);
-    } finally {
-      readUnlock();
-    }
-  }
-
-  /**
-   * Get {@link INode} associated with the file / directory.
-   */
-  public INodesInPath getINodesInPath4Write(String src
-      ) throws UnresolvedLinkException, SnapshotAccessControlException {
-    readLock();
-    try {
-      return getINodesInPath4Write(src, true);
-    } finally {
-      readUnlock();
-    }
-  }
-
-  /**
-   * Get {@link INode} associated with the file / directory.
-   * @throws SnapshotAccessControlException if path is in RO snapshot
-   */
-  public INode getINode4Write(String src) throws UnresolvedLinkException,
-      SnapshotAccessControlException {
-    readLock();
-    try {
-      return getINode4Write(src, true);
-    } finally {
-      readUnlock();
-    }
-  }
-
   /** 
    * Check whether the filepath could be created
    * @throws SnapshotAccessControlException if path is in RO snapshot
    */
-  boolean isValidToCreate(String src) throws UnresolvedLinkException,
-      SnapshotAccessControlException {
+  boolean isValidToCreate(String src, INodesInPath iip)
+      throws SnapshotAccessControlException {
     String srcs = normalizePath(src);
-    readLock();
-    try {
-      return srcs.startsWith("/") && !srcs.endsWith("/")
-              && getINode4Write(srcs, false) == null;
-    } finally {
-      readUnlock();
-    }
+    return srcs.startsWith("/") && !srcs.endsWith("/") &&
+        iip.getLastINode() == null;
   }
 
   /**
@@ -954,7 +864,7 @@ public class FSDirectory implements Closeable {
     src = normalizePath(src);
     readLock();
     try {
-      INode node = getNode(src, false);
+      INode node = getINode(src, false);
       return node != null && node.isDirectory();
     } finally {
       readUnlock();
@@ -963,21 +873,21 @@ public class FSDirectory implements Closeable {
 
   /** Updates namespace and diskspace consumed for all
    * directories until the parent directory of file represented by path.
-   * 
-   * @param path path for the file.
+   *
+   * @param iip the INodesInPath instance containing all the INodes for
+   *            updating quota usage
    * @param nsDelta the delta change of namespace
    * @param dsDelta the delta change of diskspace
    * @throws QuotaExceededException if the new count violates any quota limit
    * @throws FileNotFoundException if path does not exist.
    */
-  void updateSpaceConsumed(String path, long nsDelta, long dsDelta)
+  void updateSpaceConsumed(INodesInPath iip, long nsDelta, long dsDelta)
       throws QuotaExceededException, FileNotFoundException,
           UnresolvedLinkException, SnapshotAccessControlException {
     writeLock();
     try {
-      final INodesInPath iip = getINodesInPath4Write(path, false);
       if (iip.getLastINode() == null) {
-        throw new FileNotFoundException("Path not found: " + path);
+        throw new FileNotFoundException("Path not found: " + iip.getPath());
       }
       updateCount(iip, nsDelta, dsDelta, true);
     } finally {
@@ -1097,17 +1007,15 @@ public class FSDirectory implements Closeable {
 
   /**
    * Add the given child to the namespace.
-   * @param src The full path name of the child node.
+   * @param iip the INodesInPath instance containing all the ancestral INodes
    * @throws QuotaExceededException is thrown if it violates quota limit
    */
-  private boolean addINode(String src, INode child)
+  private boolean addINode(INodesInPath iip, INode child)
       throws QuotaExceededException, UnresolvedLinkException {
-    byte[][] components = INode.getPathComponents(src);
-    child.setLocalName(components[components.length-1]);
+    child.setLocalName(iip.getLastLocalName());
     cacheName(child);
     writeLock();
     try {
-      final INodesInPath iip = getExistingPathINodes(components);
       return addLastINode(iip, child, true);
     } finally {
       writeUnlock();
@@ -1504,7 +1412,7 @@ public class FSDirectory implements Closeable {
   boolean unprotectedSetTimes(String src, long mtime, long atime, boolean force) 
       throws UnresolvedLinkException, QuotaExceededException {
     assert hasWriteLock();
-    final INodesInPath i = getLastINodeInPath(src); 
+    final INodesInPath i = getINodesInPath(src, true);
     return unprotectedSetTimes(i.getLastINode(), mtime, atime, force,
         i.getLatestSnapshotId());
   }
@@ -1551,24 +1459,24 @@ public class FSDirectory implements Closeable {
   /**
    * Add the specified path into the namespace.
    */
-  INodeSymlink addSymlink(long id, String path, String target,
+  INodeSymlink addSymlink(INodesInPath iip, long id, String target,
                           long mtime, long atime, PermissionStatus perm)
           throws UnresolvedLinkException, QuotaExceededException {
     writeLock();
     try {
-      return unprotectedAddSymlink(id, path, target, mtime, atime, perm);
+      return unprotectedAddSymlink(iip, id, target, mtime, atime, perm);
     } finally {
       writeUnlock();
     }
   }
 
-  INodeSymlink unprotectedAddSymlink(long id, String path, String target,
+  INodeSymlink unprotectedAddSymlink(INodesInPath iip, long id, String target,
       long mtime, long atime, PermissionStatus perm)
       throws UnresolvedLinkException, QuotaExceededException {
     assert hasWriteLock();
     final INodeSymlink symlink = new INodeSymlink(id, null, perm, mtime, atime,
         target);
-    return addINode(path, symlink) ? symlink : null;
+    return addINode(iip, symlink) ? symlink : null;
   }
 
   boolean isInAnEZ(INodesInPath iip)
@@ -1704,11 +1612,10 @@ public class FSDirectory implements Closeable {
     }
   }
 
-  static INode resolveLastINode(String src, INodesInPath iip)
-      throws FileNotFoundException {
+  static INode resolveLastINode(INodesInPath iip) throws FileNotFoundException {
     INode inode = iip.getLastINode();
     if (inode == null) {
-      throw new FileNotFoundException("cannot find " + src);
+      throw new FileNotFoundException("cannot find " + iip.getPath());
     }
     return inode;
   }
@@ -1885,36 +1792,62 @@ public class FSDirectory implements Closeable {
     return path.toString();
   }
 
-  /** @return the {@link INodesInPath} containing only the last inode. */
-  INodesInPath getLastINodeInPath(
-      String path, boolean resolveLink) throws UnresolvedLinkException {
-    return INodesInPath.resolve(rootDir, INode.getPathComponents(path), 1,
-            resolveLink);
+  INode getINode4DotSnapshot(String src) throws UnresolvedLinkException {
+    Preconditions.checkArgument(
+        src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
+        "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
+
+    final String dirPath = normalizePath(src.substring(0,
+        src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
+
+    final INode node = this.getINode(dirPath);
+    if (node != null && node.isDirectory()
+        && node.asDirectory().isSnapshottable()) {
+      return node;
+    }
+    return null;
+  }
+
+  INodesInPath getExistingPathINodes(byte[][] components)
+      throws UnresolvedLinkException {
+    return INodesInPath.resolve(rootDir, components, false);
+  }
+
+  /**
+   * Get {@link INode} associated with the file / directory.
+   */
+  public INodesInPath getINodesInPath4Write(String src)
+      throws UnresolvedLinkException, SnapshotAccessControlException {
+    return getINodesInPath4Write(src, true);
+  }
+
+  /**
+   * Get {@link INode} associated with the file / directory.
+   * @throws SnapshotAccessControlException if path is in RO snapshot
+   */
+  public INode getINode4Write(String src) throws UnresolvedLinkException,
+      SnapshotAccessControlException {
+    return getINodesInPath4Write(src, true).getLastINode();
   }
 
   /** @return the {@link INodesInPath} containing all inodes in the path. */
-  INodesInPath getINodesInPath(String path, boolean resolveLink
-  ) throws UnresolvedLinkException {
+  public INodesInPath getINodesInPath(String path, boolean resolveLink)
+      throws UnresolvedLinkException {
     final byte[][] components = INode.getPathComponents(path);
-    return INodesInPath.resolve(rootDir, components, components.length,
-            resolveLink);
+    return INodesInPath.resolve(rootDir, components, resolveLink);
   }
 
   /** @return the last inode in the path. */
-  INode getNode(String path, boolean resolveLink)
-          throws UnresolvedLinkException {
-    return getLastINodeInPath(path, resolveLink).getINode(0);
+  INode getINode(String path, boolean resolveLink)
+      throws UnresolvedLinkException {
+    return getINodesInPath(path, resolveLink).getLastINode();
   }
 
   /**
-   * @return the INode of the last component in src, or null if the last
-   * component does not exist.
-   * @throws UnresolvedLinkException if symlink can't be resolved
-   * @throws SnapshotAccessControlException if path is in RO snapshot
+   * Get {@link INode} associated with the file / directory.
    */
-  INode getINode4Write(String src, boolean resolveLink)
-          throws UnresolvedLinkException, SnapshotAccessControlException {
-    return getINodesInPath4Write(src, resolveLink).getLastINode();
+  public INode getINode(String src) throws UnresolvedLinkException {
+    return getINode(src, true);
   }
 
   /**
@@ -1926,7 +1859,7 @@ public class FSDirectory implements Closeable {
           throws UnresolvedLinkException, SnapshotAccessControlException {
     final byte[][] components = INode.getPathComponents(src);
     INodesInPath inodesInPath = INodesInPath.resolve(rootDir, components,
-            components.length, resolveLink);
+        resolveLink);
     if (inodesInPath.isSnapshot()) {
       throw new SnapshotAccessControlException(
               "Modification on a read-only snapshot is disallowed");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 2721f85..833b9db 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -342,7 +342,7 @@ public class FSEditLogLoader {
       // 3. OP_ADD to open file for append
 
       // See if the file already exists (persistBlocks call)
-      final INodesInPath iip = fsDir.getINodesInPath(path, true);
+      INodesInPath iip = fsDir.getINodesInPath(path, true);
       INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path, true);
       if (oldFile != null && addCloseOp.overwrite) {
         // This is OP_ADD with overwrite
@@ -361,11 +361,12 @@ public class FSEditLogLoader {
         inodeId = getAndUpdateLastInodeId(addCloseOp.inodeId, logVersion,
             lastInodeId);
         newFile = fsDir.unprotectedAddFile(inodeId,
-            path, addCloseOp.permissions, addCloseOp.aclEntries,
+            iip, addCloseOp.permissions, addCloseOp.aclEntries,
             addCloseOp.xAttrs,
             replication, addCloseOp.mtime, addCloseOp.atime,
             addCloseOp.blockSize, true, addCloseOp.clientName,
             addCloseOp.clientMachine, addCloseOp.storagePolicyId);
+        iip = INodesInPath.replace(iip, iip.length() - 1, newFile);
         fsNamesys.leaseManager.addLease(addCloseOp.clientName, path);
 
         // add the op into retry cache if necessary
@@ -384,10 +385,10 @@ public class FSEditLogLoader {
             FSNamesystem.LOG.debug("Reopening an already-closed file " +
                 "for append");
           }
-          LocatedBlock lb = fsNamesys.prepareFileForWrite(path,
-              oldFile, addCloseOp.clientName, addCloseOp.clientMachine, false, iip.getLatestSnapshotId(), false);
-          newFile = INodeFile.valueOf(fsDir.getINode(path),
-              path, true);
+          // Note we do not replace the INodeFile when converting it to
+          // under-construction
+          LocatedBlock lb = fsNamesys.prepareFileForWrite(path, iip,
+              addCloseOp.clientName, addCloseOp.clientMachine, false, false);
           
           // add the op into retry cache is necessary
           if (toAddRetryCache) {
@@ -408,7 +409,7 @@ public class FSEditLogLoader {
       // Update the salient file attributes.
       newFile.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
       newFile.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID);
-      updateBlocks(fsDir, addCloseOp, newFile);
+      updateBlocks(fsDir, addCloseOp, iip, newFile);
       break;
     }
     case OP_CLOSE: {
@@ -422,13 +423,13 @@ public class FSEditLogLoader {
             " clientMachine " + addCloseOp.clientMachine);
       }
 
-      final INodesInPath iip = fsDir.getLastINodeInPath(path);
-      final INodeFile file = INodeFile.valueOf(iip.getINode(0), path);
+      final INodesInPath iip = fsDir.getINodesInPath(path, true);
+      final INodeFile file = INodeFile.valueOf(iip.getLastINode(), path);
 
       // Update the salient file attributes.
       file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
       file.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID);
-      updateBlocks(fsDir, addCloseOp, file);
+      updateBlocks(fsDir, addCloseOp, iip, file);
 
       // Now close the file
       if (!file.isUnderConstruction() &&
@@ -455,10 +456,10 @@ public class FSEditLogLoader {
         FSNamesystem.LOG.debug(op.opCode + ": " + path +
             " numblocks : " + updateOp.blocks.length);
       }
-      INodeFile oldFile = INodeFile.valueOf(fsDir.getINode(path),
-          path);
+      INodesInPath iip = fsDir.getINodesInPath(path, true);
+      INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path);
       // Update in-memory data structures
-      updateBlocks(fsDir, updateOp, oldFile);
+      updateBlocks(fsDir, updateOp, iip, oldFile);
       
       if (toAddRetryCache) {
         fsNamesys.addCacheEntry(updateOp.rpcClientId, updateOp.rpcCallId);
@@ -587,8 +588,10 @@ public class FSEditLogLoader {
       SymlinkOp symlinkOp = (SymlinkOp)op;
       inodeId = getAndUpdateLastInodeId(symlinkOp.inodeId, logVersion,
           lastInodeId);
-      fsDir.unprotectedAddSymlink(inodeId,
-          renameReservedPathsOnUpgrade(symlinkOp.path, logVersion),
+      final String path = renameReservedPathsOnUpgrade(symlinkOp.path,
+          logVersion);
+      final INodesInPath iip = fsDir.getINodesInPath(path, false);
+      fsDir.unprotectedAddSymlink(iip, inodeId,
           symlinkOp.value, symlinkOp.mtime, symlinkOp.atime,
           symlinkOp.permissionStatus);
       
@@ -922,7 +925,7 @@ public class FSEditLogLoader {
    * @throws IOException
    */
   private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op,
-      INodeFile file) throws IOException {
+      INodesInPath iip, INodeFile file) throws IOException {
     // Update its block list
     BlockInfo[] oldBlocks = file.getBlocks();
     Block[] newBlocks = op.getBlocks();
@@ -976,7 +979,7 @@ public class FSEditLogLoader {
             + path);
       }
       Block oldBlock = oldBlocks[oldBlocks.length - 1];
-      boolean removed = fsDir.unprotectedRemoveBlock(path, file, oldBlock);
+      boolean removed = fsDir.unprotectedRemoveBlock(path, iip, file, oldBlock);
       if (!removed && !(op instanceof UpdateBlocksOp)) {
         throw new IOException("Trying to delete non-existant block " + oldBlock);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index e26f052..0a92054 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -596,7 +596,7 @@ public class FSImageFormat {
      // Rename .snapshot paths if we're doing an upgrade
      parentPath = renameReservedPathsOnUpgrade(parentPath, getLayoutVersion());
      final INodeDirectory parent = INodeDirectory.valueOf(
-         namesystem.dir.getNode(parentPath, true), parentPath);
+         namesystem.dir.getINode(parentPath, true), parentPath);
      return loadChildren(parent, in, counter);
    }
 
@@ -940,8 +940,8 @@ public class FSImageFormat {
           inSnapshot = true;
         } else {
           path = renameReservedPathsOnUpgrade(path, getLayoutVersion());
-          final INodesInPath iip = fsDir.getLastINodeInPath(path);
-          oldnode = INodeFile.valueOf(iip.getINode(0), path);
+          final INodesInPath iip = fsDir.getINodesInPath(path, true);
+          oldnode = INodeFile.valueOf(iip.getLastINode(), path);
         }
 
         FileUnderConstructionFeature uc = cons.getFileUnderConstructionFeature();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c78e3a7c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 5dd5920..b4b897a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2028,7 +2028,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       if (!createParent) {
         dir.verifyParentDir(iip, link);
       }
-      if (!dir.isValidToCreate(link)) {
+      if (!dir.isValidToCreate(link, iip)) {
         throw new IOException("failed to create link " + link 
             +" either because the filename is invalid or the file exists");
       }
@@ -2039,7 +2039,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       checkFsObjectLimit();
 
       // add symbolic link to namespace
-      addSymlink(link, target, dirPerms, createParent, logRetryCache);
+      addSymlink(link, iip, target, dirPerms, createParent, logRetryCache);
       resultingStat = getAuditFileInfo(link, false);
     } finally {
       writeUnlock();
@@ -2191,11 +2191,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     try {
       checkOperation(OperationCategory.READ);
       filename = dir.resolvePath(pc, filename, pathComponents);
-      final INodesInPath iip = dir.getINodesInPath(filename, true);
+      final INodesInPath iip = dir.getINodesInPath(filename, false);
       if (isPermissionEnabled) {
         dir.checkTraverse(pc, iip);
       }
-      return dir.getPreferredBlockSize(filename);
+      return INodeFile.valueOf(iip.getLastINode(), filename)
+          .getPreferredBlockSize();
     } finally {
       readUnlock();
     }
@@ -2491,14 +2492,14 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         if (overwrite) {
           toRemoveBlocks = new BlocksMapUpdateInfo();
           List<INode> toRemoveINodes = new ChunkedArrayList<INode>();
-          long ret = dir.delete(src, toRemoveBlocks, toRemoveINodes, now());
+          long ret = dir.delete(iip, toRemoveBlocks, toRemoveINodes, now());
           if (ret >= 0) {
             incrDeletedFileCount(ret);
             removePathAndBlocks(src, null, toRemoveINodes, true);
           }
         } else {
           // If lease soft limit time is expired, recover the lease
-          recoverLeaseInternal(myFile, src, holder, clientMachine, false);
+          recoverLeaseInternal(iip, src, holder, clientMachine, false);
           throw new FileAlreadyExistsException(src + " for client " +
               clientMachine + " already exists");
         }
@@ -2508,10 +2509,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       INodeFile newNode = null;
 
       // Always do an implicit mkdirs for parent directory tree.
-      Path parent = new Path(src).getParent();
-      if (parent != null && FSDirMkdirOp.mkdirsRecursively(dir,
-          parent.toString(), permissions, true, now())) {
-        newNode = dir.addFile(src, permissions, replication, blockSize,
+      INodesInPath parentIIP = iip.getParentINodesInPath();
+      if (parentIIP != null && (parentIIP = FSDirMkdirOp.mkdirsRecursively(dir,
+          parentIIP, permissions, true, now())) != null) {
+        iip = INodesInPath.append(parentIIP, newNode, iip.getLastLocalName());
+        newNode = dir.addFile(iip, src, permissions, replication, blockSize,
                               holder, clientMachine);
       }
 
@@ -2621,12 +2623,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
             "Cannot append to lazy persist file " + src);
       }
       // Opening an existing file for write - may need to recover lease.
-      recoverLeaseInternal(myFile, src, holder, clientMachine, false);
+      recoverLeaseInternal(iip, src, holder, clientMachine, false);
       
-      // recoverLeaseInternal may create a new InodeFile via 
-      // finalizeINodeFileUnderConstruction so we need to refresh 
-      // the referenced file.  
-      myFile = INodeFile.valueOf(dir.getINode(src), src, true);
       final BlockInfo lastBlock = myFile.getLastBlock();
       // Check that the block has at least minimum replication.
       if(lastBlock != null && lastBlock.isComplete() &&
@@ -2634,8 +2632,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         throw new IOException("append: lastBlock=" + lastBlock +
             " of src=" + src + " is not sufficiently replicated yet.");
       }
-      return prepareFileForWrite(src, myFile, holder, clientMachine, true,
-              iip.getLatestSnapshotId(), logRetryCache);
+      return prepareFileForWrite(src, iip, holder, clientMachine, true,
+              logRetryCache);
     } catch (IOException ie) {
       NameNode.stateChangeLog.warn("DIR* NameSystem.append: " +ie.getMessage());
       throw ie;
@@ -2643,11 +2641,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   }
   
   /**
-   * Replace current node with a INodeUnderConstruction.
+   * Convert current node to under construction.
    * Recreate in-memory lease record.
    * 
    * @param src path to the file
-   * @param file existing file object
    * @param leaseHolder identifier of the lease holder on this file
    * @param clientMachine identifier of the client machine
    * @param writeToEditLog whether to persist this change to the edit log
@@ -2657,26 +2654,25 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * @throws UnresolvedLinkException
    * @throws IOException
    */
-  LocatedBlock prepareFileForWrite(String src, INodeFile file,
-                                   String leaseHolder, String clientMachine,
-                                   boolean writeToEditLog,
-                                   int latestSnapshot, boolean logRetryCache)
-      throws IOException {
-    file.recordModification(latestSnapshot);
-    final INodeFile cons = file.toUnderConstruction(leaseHolder, clientMachine);
+  LocatedBlock prepareFileForWrite(String src, INodesInPath iip,
+      String leaseHolder, String clientMachine, boolean writeToEditLog,
+      boolean logRetryCache) throws IOException {
+    final INodeFile file = iip.getLastINode().asFile();
+    file.recordModification(iip.getLatestSnapshotId());
+    file.toUnderConstruction(leaseHolder, clientMachine);
 
-    leaseManager.addLease(cons.getFileUnderConstructionFeature()
-        .getClientName(), src);
+    leaseManager.addLease(
+        file.getFileUnderConstructionFeature().getClientName(), src);
     
-    LocatedBlock ret = blockManager.convertLastBlockToUnderConstruction(cons);
+    LocatedBlock ret = blockManager.convertLastBlockToUnderConstruction(file);
     if (ret != null) {
       // update the quota: use the preferred block size for UC block
       final long diff = file.getPreferredBlockSize() - ret.getBlockSize();
-      dir.updateSpaceConsumed(src, 0, diff * file.getBlockReplication());
+      dir.updateSpaceConsumed(iip, 0, diff * file.getBlockReplication());
     }
 
     if (writeToEditLog) {
-      getEditLog().logOpenFile(src, cons, false, logRetryCache);
+      getEditLog().logOpenFile(src, file, false, logRetryCache);
     }
     return ret;
   }
@@ -2716,7 +2712,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         dir.checkPathAccess(pc, iip, FsAction.WRITE);
       }
   
-      recoverLeaseInternal(inode, src, holder, clientMachine, true);
+      recoverLeaseInternal(iip, src, holder, clientMachine, true);
     } catch (StandbyException se) {
       skipSync = true;
       throw se;
@@ -2731,11 +2727,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return false;
   }
 
-  private void recoverLeaseInternal(INodeFile fileInode, 
+  private void recoverLeaseInternal(INodesInPath iip,
       String src, String holder, String clientMachine, boolean force)
       throws IOException {
     assert hasWriteLock();
-    if (fileInode != null && fileInode.isUnderConstruction()) {
+    INodeFile file = iip.getLastINode().asFile();
+    if (file != null && file.isUnderConstruction()) {
       //
       // If the file is under construction , then it must be in our
       // leases. Find the appropriate lease record.
@@ -2758,7 +2755,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       //
       // Find the original holder.
       //
-      FileUnderConstructionFeature uc = fileInode.getFileUnderConstructionFeature();
+      FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
       String clientName = uc.getClientName();
       lease = leaseManager.getLease(clientName);
       if (lease == null) {
@@ -2772,7 +2769,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         // close only the file src
         LOG.info("recoverLease: " + lease + ", src=" + src +
           " from client " + clientName);
-        internalReleaseLease(lease, src, holder);
+        internalReleaseLease(lease, src, iip, holder);
       } else {
         assert lease.getHolder().equals(clientName) :
           "Current lease holder " + lease.getHolder() +
@@ -2784,13 +2781,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         if (lease.expiredSoftLimit()) {
           LOG.info("startFile: recover " + lease + ", src=" + src + " client "
               + clientName);
-          boolean isClosed = internalReleaseLease(lease, src, null);
+          boolean isClosed = internalReleaseLease(lease, src, iip, null);
           if(!isClosed)
             throw new RecoveryInProgressException(
                 "Failed to close file " + src +
                 ". Lease recovery is in progress. Try again later.");
         } else {
-          final BlockInfo lastBlock = fileInode.getLastBlock();
+          final BlockInfo lastBlock = file.getLastBlock();
           if (lastBlock != null
               && lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
             throw new RecoveryInProgressException("Recovery in progress, file ["
@@ -2822,10 +2819,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   }
 
   private LastBlockWithStatus appendFileInt(final String srcArg, String holder,
-      String clientMachine, boolean logRetryCache)
-      throws AccessControlException, SafeModeException,
-      FileAlreadyExistsException, FileNotFoundException,
-      ParentNotDirectoryException, IOException {
+      String clientMachine, boolean logRetryCache) throws IOException {
     String src = srcArg;
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: src=" + src
@@ -2892,10 +2886,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   LocatedBlock getAdditionalBlock(String src, long fileId, String clientName,
       ExtendedBlock previous, Set<Node> excludedNodes, 
-      List<String> favoredNodes)
-      throws LeaseExpiredException, NotReplicatedYetException,
-      QuotaExceededException, SafeModeException, UnresolvedLinkException,
-      IOException {
+      List<String> favoredNodes) throws IOException {
     final long blockSize;
     final int replication;
     final byte storagePolicyID;
@@ -2983,7 +2974,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       }
 
       // commit the last block and complete it if it has minimum replicas
-      commitOrCompleteLastBlock(pendingFile,
+      commitOrCompleteLastBlock(pendingFile, fileState.iip,
                                 ExtendedBlock.getLocalBlock(previous));
 
       // allocate new block, record block locations in INode.
@@ -3023,10 +3014,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   static class FileState {
     public final INodeFile inode;
     public final String path;
+    public final INodesInPath iip;
 
-    public FileState(INodeFile inode, String fullPath) {
+    public FileState(INodeFile inode, String fullPath, INodesInPath iip) {
       this.inode = inode;
       this.path = fullPath;
+      this.iip = iip;
     }
   }
 
@@ -3046,18 +3039,22 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     checkFsObjectLimit();
 
     Block previousBlock = ExtendedBlock.getLocalBlock(previous);
-    INode inode;
+    final INode inode;
+    final INodesInPath iip;
     if (fileId == INodeId.GRANDFATHER_INODE_ID) {
       // Older clients may not have given us an inode ID to work with.
       // In this case, we have to try to resolve the path and hope it
       // hasn't changed or been deleted since the file was opened for write.
-      final INodesInPath iip = dir.getINodesInPath4Write(src);
+      iip = dir.getINodesInPath4Write(src);
       inode = iip.getLastINode();
     } else {
       // Newer clients pass the inode ID, so we can just get the inode
       // directly.
       inode = dir.getInode(fileId);
-      if (inode != null) src = inode.getFullPathName();
+      iip = INodesInPath.fromINode(inode);
+      if (inode != null) {
+        src = iip.getPath();
+      }
     }
     final INodeFile pendingFile = checkLease(src, clientName, inode, fileId);
     BlockInfo lastBlockInFile = pendingFile.getLastBlock();
@@ -3117,7 +3114,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         onRetryBlock[0] = makeLocatedBlock(lastBlockInFile,
             ((BlockInfoUnderConstruction)lastBlockInFile).getExpectedStorageLocations(),
             offset);
-        return new FileState(pendingFile, src);
+        return new FileState(pendingFile, src, iip);
       } else {
         // Case 3
         throw new IOException("Cannot allocate block in " + src + ": " +
@@ -3130,7 +3127,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     if (!checkFileProgress(src, pendingFile, false)) {
       throw new NotReplicatedYetException("Not replicated yet: " + src);
     }
-    return new FileState(pendingFile, src);
+    return new FileState(pendingFile, src, iip);
   }
 
   LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs,
@@ -3208,8 +3205,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * The client would like to let go of the given block
    */
   boolean abandonBlock(ExtendedBlock b, long fileId, String src, String holder)
-      throws LeaseExpiredException, FileNotFoundException,
-      UnresolvedLinkException, IOException {
+      throws IOException {
     if(NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: " + b
           + "of file " + src);
@@ -3225,21 +3221,24 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       src = dir.resolvePath(pc, src, pathComponents);
 
       final INode inode;
+      final INodesInPath iip;
       if (fileId == INodeId.GRANDFATHER_INODE_ID) {
         // Older clients may not have given us an inode ID to work with.
         // In this case, we have to try to resolve the path and hope it
         // hasn't changed or been deleted since the file was opened for write.
-        inode = dir.getINode(src);
+        iip = dir.getINodesInPath(src, true);
+        inode = iip.getLastINode();
       } else {
         inode = dir.getInode(fileId);
-        if (inode != null) src = inode.getFullPathName();
+        iip = INodesInPath.fromINode(inode);
+        if (inode != null) {
+          src = iip.getPath();
+        }
       }
       final INodeFile file = checkLease(src, holder, inode, fileId);
 
-      //
       // Remove the block from the pending creates list
-      //
-      boolean removed = dir.removeBlock(src, file,
+      boolean removed = dir.removeBlock(src, iip, file,
           ExtendedBlock.getLocalBlock(b));
       if (!removed) {
         return true;
@@ -3258,8 +3257,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   }
 
   private INodeFile checkLease(String src, String holder, INode inode,
-                               long fileId)
-      throws LeaseExpiredException, FileNotFoundException {
+      long fileId) throws LeaseExpiredException, FileNotFoundException {
     assert hasReadLock();
     final String ident = src + " (inode " + fileId + ")";
     if (inode == null) {
@@ -3336,29 +3334,30 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return success;
   }
 
-  private boolean completeFileInternal(String src, 
-      String holder, Block last, long fileId) throws SafeModeException,
-      UnresolvedLinkException, IOException {
+  private boolean completeFileInternal(String src, String holder, Block last,
+      long fileId) throws IOException {
     assert hasWriteLock();
     final INodeFile pendingFile;
+    final INodesInPath iip;
+    INode inode = null;
     try {
-      final INode inode;
       if (fileId == INodeId.GRANDFATHER_INODE_ID) {
         // Older clients may not have given us an inode ID to work with.
         // In this case, we have to try to resolve the path and hope it
         // hasn't changed or been deleted since the file was opened for write.
-        final INodesInPath iip = dir.getLastINodeInPath(src);
-        inode = iip.getINode(0);
+        iip = dir.getINodesInPath(src, true);
+        inode = iip.getLastINode();
       } else {
         inode = dir.getInode(fileId);
-        if (inode != null) src = inode.getFullPathName();
+        iip = INodesInPath.fromINode(inode);
+        if (inode != null) {
+          src = iip.getPath();
+        }
       }
       pendingFile = checkLease(src, holder, inode, fileId);
     } catch (LeaseExpiredException lee) {
-      final INode inode = dir.getINode(src);
-      if (inode != null
-          && inode.isFile()
-          && !inode.asFile().isUnderConstruction()) {
+      if (inode != null && inode.isFile() &&
+          !inode.asFile().isUnderConstruction()) {
         // This could be a retry RPC - i.e the client tried to close
         // the file, but missed the RPC response. Thus, it is trying
         // again to close the file. If the file still exists and
@@ -3383,7 +3382,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
 
     // commit the last block and complete it if it has minimum replicas
-    commitOrCompleteLastBlock(pendingFile, last);
+    commitOrCompleteLastBlock(pendingFile, iip, last);
 
     if (!checkFileProgress(src, pendingFile, true)) {
       return false;
@@ -3618,7 +3617,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
       long mtime = now();
       // Unlink the target directory from directory tree
-      long filesRemoved = dir.delete(src, collectedBlocks, removedINodes,
+      long filesRemoved = dir.delete(iip, collectedBlocks, removedINodes,
               mtime);
       if (filesRemoved < 0) {
         return false;
@@ -3885,7 +3884,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * @throws IOException if path does not exist
    */
   void fsync(String src, long fileId, String clientName, long lastBlockLength)
-      throws IOException, UnresolvedLinkException {
+      throws IOException {
     NameNode.stateChangeLog.info("BLOCK* fsync: " + src + " for " + clientName);
     checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
@@ -3933,15 +3932,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    *         false if block recovery has been initiated. Since the lease owner
    *         has been changed and logged, caller should call logSync().
    */
-  boolean internalReleaseLease(Lease lease, String src, 
-      String recoveryLeaseHolder) throws AlreadyBeingCreatedException, 
-      IOException, UnresolvedLinkException {
+  boolean internalReleaseLease(Lease lease, String src, INodesInPath iip,
+      String recoveryLeaseHolder) throws IOException {
     LOG.info("Recovering " + lease + ", src=" + src);
     assert !isInSafeMode();
     assert hasWriteLock();
 
-    final INodesInPath iip = dir.getLastINodeInPath(src);
-    final INodeFile pendingFile = iip.getINode(0).asFile();
+    final INodeFile pendingFile = iip.getLastINode().asFile();
     int nrBlocks = pendingFile.numBlocks();
     BlockInfo[] blocks = pendingFile.getBlocks();
 
@@ -4070,7 +4067,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   }
 
   private void commitOrCompleteLastBlock(final INodeFile fileINode,
-      final Block commitBlock) throws IOException {
+      final INodesInPath iip, final Block commitBlock) throws IOException {
     assert hasWriteLock();
     Preconditions.checkArgument(fileINode.isUnderConstruction());
     if (!blockManager.commitOrCompleteLastBlock(fileINode, commitBlock)) {
@@ -4081,8 +4078,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final long diff = fileINode.getPreferredBlockSize() - commitBlock.getNumBytes();    
     if (diff > 0) {
       try {
-        String path = fileINode.getFullPathName();
-        dir.updateSpaceConsumed(path, 0, -diff*fileINode.getFileReplication());
+        dir.updateSpaceConsumed(iip, 0, -diff*fileINode.getFileReplication());
       } catch (IOException e) {
         LOG.warn("Unexpected exception while updating disk space.", e);
       }
@@ -4090,8 +4086,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   }
 
   private void finalizeINodeFileUnderConstruction(String src,
-      INodeFile pendingFile, int latestSnapshot) throws IOException,
-      UnresolvedLinkException {
+      INodeFile pendingFile, int latestSnapshot) throws IOException {
     assert hasWriteLock();
 
     FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature();
@@ -4103,13 +4098,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     // The file is no longer pending.
     // Create permanent INode, update blocks. No need to replace the inode here
     // since we just remove the uc feature from pendingFile
-    final INodeFile newFile = pendingFile.toCompleteFile(now());
+    pendingFile.toCompleteFile(now());
 
     waitForLoadingFSImage();
     // close file and persist block allocations for this file
-    closeFile(src, newFile);
+    closeFile(src, pendingFile);
 
-    blockManager.checkReplication(newFile);
+    blockManager.checkReplication(pendingFile);
   }
 
   @VisibleForTesting
@@ -4126,11 +4121,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       return false;
     }
 
-    INodeFile inodeUC = (INodeFile) bc;
-    String fullName = inodeUC.getName();
+    String fullName = bc.getName();
     try {
       if (fullName != null && fullName.startsWith(Path.SEPARATOR)
-          && dir.getINode(fullName) == inodeUC) {
+          && dir.getINode(fullName) == bc) {
         // If file exists in normal path then no need to look in snapshot
         return false;
       }
@@ -4139,7 +4133,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       return false;
     }
     /*
-     * 1. if bc is an instance of INodeFileUnderConstructionWithSnapshot, and
+     * 1. if bc is under construction and also with snapshot, and
      * bc is not in the current fsdirectory tree, bc must represent a snapshot
      * file. 
      * 2. if fullName is not an absolute path, bc cannot be existent in the 
@@ -4153,8 +4147,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   void commitBlockSynchronization(ExtendedBlock lastblock,
       long newgenerationstamp, long newlength,
       boolean closeFile, boolean deleteblock, DatanodeID[] newtargets,
-      String[] newtargetstorages)
-      throws IOException, UnresolvedLinkException {
+      String[] newtargetstorages) throws IOException {
     LOG.info("commitBlockSynchronization(lastblock=" + lastblock
              + ", newgenerationstamp=" + newgenerationstamp
              + ", newlength=" + newlength
@@ -4312,10 +4305,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   @VisibleForTesting
   String closeFileCommitBlocks(INodeFile pendingFile, BlockInfo storedBlock)
       throws IOException {
-    String src = pendingFile.getFullPathName();
+    final INodesInPath iip = INodesInPath.fromINode(pendingFile);
+    final String src = iip.getPath();
 
     // commit the last block and complete it if it has minimum replicas
-    commitOrCompleteLastBlock(pendingFile, storedBlock);
+    commitOrCompleteLastBlock(pendingFile, iip, storedBlock);
 
     //remove lease, close file
     finalizeINodeFileUnderConstruction(src, pendingFile,
@@ -4515,7 +4509,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   /**
    * Add the given symbolic link to the fs. Record it in the edits log.
    */
-  private INodeSymlink addSymlink(String path, String target,
+  private INodeSymlink addSymlink(String path, INodesInPath iip, String target,
                                   PermissionStatus dirPerms,
                                   boolean createParent, boolean logRetryCache)
       throws UnresolvedLinkException, FileAlreadyExistsException,
@@ -4524,15 +4518,17 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
     final long modTime = now();
     if (createParent) {
-      final String parent = new Path(path).getParent().toString();
-      if (!FSDirMkdirOp.mkdirsRecursively(dir, parent, dirPerms, true,
-          modTime)) {
+      INodesInPath parentIIP = iip.getParentINodesInPath();
+      if (parentIIP == null || (parentIIP = FSDirMkdirOp.mkdirsRecursively(dir,
+          parentIIP, dirPerms, true, modTime)) == null) {
         return null;
+      } else {
+        iip = INodesInPath.append(parentIIP, null, iip.getLastLocalName());
       }
     }
     final String userName = dirPerms.getUserName();
     long id = dir.allocateNewInodeId();
-    INodeSymlink newNode = dir.addSymlink(id, path, target, modTime, modTime,
+    INodeSymlink newNode = dir.addSymlink(iip, id, target, modTime, modTime,
             new PermissionStatus(userName, null, FsPermission.getDefault()));
     if (newNode == null) {
       NameNode.stateChangeLog.info("addSymlink: failed to add " + path);


[03/50] [abbrv] hadoop git commit: HADOOP-11368. Fix SSLFactory truststore reloader thread leak in KMSClientProvider. Contributed by Arun Suresh.

Posted by ka...@apache.org.
HADOOP-11368. Fix SSLFactory truststore reloader thread leak in KMSClientProvider. Contributed by Arun Suresh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74d4bfde
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74d4bfde
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74d4bfde

Branch: refs/heads/YARN-2139
Commit: 74d4bfded98239507511dedb515bc6a54958d5a8
Parents: d777a1e
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Dec 9 10:46:50 2014 -0800
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Dec 9 10:47:24 2014 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../crypto/key/kms/KMSClientProvider.java       |  4 +++
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 26 ++++++++++++++++++++
 3 files changed, 33 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74d4bfde/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5e2ff8d..2051698 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -539,6 +539,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11369. Fix new findbugs warnings in hadoop-mapreduce-client,
     non-core directories. (Li Lu via wheat9)
 
+    HADOOP-11368. Fix SSLFactory truststore reloader thread leak in
+    KMSClientProvider. (Arun Suresh via wang)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74d4bfde/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index cb03683..50dd1ad 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -827,6 +827,10 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
       encKeyVersionQueue.shutdown();
     } catch (Exception e) {
       throw new IOException(e);
+    } finally {
+      if (sslFactory != null) {
+        sslFactory.destroy();
+      }
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74d4bfde/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index 61ce807..f487e98 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -303,6 +303,32 @@ public class TestKMS {
             url.getProtocol().equals("https"));
         final URI uri = createKMSUri(getKMSUrl());
 
+        if (ssl) {
+          KeyProvider testKp = new KMSClientProvider(uri, conf);
+          ThreadGroup threadGroup = Thread.currentThread().getThreadGroup();
+          while (threadGroup.getParent() != null) {
+            threadGroup = threadGroup.getParent();
+          }
+          Thread[] threads = new Thread[threadGroup.activeCount()];
+          threadGroup.enumerate(threads);
+          Thread reloaderThread = null;
+          for (Thread thread : threads) {
+            if ((thread.getName() != null)
+                && (thread.getName().contains("Truststore reloader thread"))) {
+              reloaderThread = thread;
+            }
+          }
+          Assert.assertTrue("Reloader is not alive", reloaderThread.isAlive());
+          testKp.close();
+          boolean reloaderStillAlive = true;
+          for (int i = 0; i < 10; i++) {
+            reloaderStillAlive = reloaderThread.isAlive();
+            if (!reloaderStillAlive) break;
+            Thread.sleep(1000);
+          }
+          Assert.assertFalse("Reloader is still alive", reloaderStillAlive);
+        }
+
         if (kerberos) {
           for (String user : new String[]{"client", "client/host"}) {
             doAs(user, new PrivilegedExceptionAction<Void>() {