You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2018/02/15 23:50:30 UTC

[01/50] [abbrv] hadoop git commit: HDFS-12935. Get ambiguous result for DFSAdmin command in HA mode when only one namenode is up. Contributed by Jianfei Jiang.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 fc84744f7 -> a2ffd9cea


HDFS-12935. Get ambiguous result for DFSAdmin command in HA mode when only one namenode is up. Contributed by Jianfei Jiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01bd6ab1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01bd6ab1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01bd6ab1

Branch: refs/heads/HDFS-7240
Commit: 01bd6ab18fa48f4c7cac1497905b52e547962599
Parents: 266da25
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Wed Feb 7 23:10:33 2018 +0530
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Wed Feb 7 23:10:33 2018 +0530

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/HAUtil.java     |   9 +-
 .../hdfs/server/namenode/FSNamesystem.java      |   2 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 192 +++++---
 .../hadoop/hdfs/tools/TestDFSAdminWithHA.java   | 464 ++++++++++++++++++-
 4 files changed, 602 insertions(+), 65 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bd6ab1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
index 3556086..1d294be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
+import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.StandbyException;
@@ -325,6 +326,7 @@ public class HAUtil {
    */
   public static boolean isAtLeastOneActive(List<ClientProtocol> namenodes)
       throws IOException {
+    List<IOException> exceptions = new ArrayList<>();
     for (ClientProtocol namenode : namenodes) {
       try {
         namenode.getFileInfo("/");
@@ -334,10 +336,15 @@ public class HAUtil {
         if (cause instanceof StandbyException) {
           // This is expected to happen for a standby NN.
         } else {
-          throw re;
+          exceptions.add(re);
         }
+      } catch (IOException ioe) {
+        exceptions.add(ioe);
       }
     }
+    if(!exceptions.isEmpty()){
+      throw MultipleIOException.createIOException(exceptions);
+    }
     return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bd6ab1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index ece649d..0c9b875 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4437,7 +4437,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
   void setBalancerBandwidth(long bandwidth) throws IOException {
     String operationName = "setBalancerBandwidth";
-    checkOperation(OperationCategory.UNCHECKED);
+    checkOperation(OperationCategory.WRITE);
     checkSuperuserPrivilege(operationName);
     getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
     logAuditEvent(true, operationName, null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bd6ab1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 1bedd82..023fea9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -50,7 +50,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.FsStatus;
-import org.apache.hadoop.fs.FsTracer;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.shell.Command;
@@ -86,6 +85,7 @@ import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
+import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
@@ -811,16 +811,26 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<ClientProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
           nsId, ClientProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
-        boolean saved = proxy.getProxy().saveNamespace(timeWindow, txGap);
-        if (saved) {
-          System.out.println("Save namespace successful for " +
+        try{
+          boolean saved = proxy.getProxy().saveNamespace(timeWindow, txGap);
+          if (saved) {
+            System.out.println("Save namespace successful for " +
+                proxy.getAddress());
+          } else {
+            System.out.println("No extra checkpoint has been made for "
+                + proxy.getAddress());
+          }
+        }catch (IOException ioe){
+          System.out.println("Save namespace failed for " +
               proxy.getAddress());
-        } else {
-          System.out.println("No extra checkpoint has been made for "
-              + proxy.getAddress());
+          exceptions.add(ioe);
         }
       }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
+      }
     } else {
       boolean saved = dfs.saveNamespace(timeWindow, txGap);
       if (saved) {
@@ -863,10 +873,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<ClientProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
           nsId, ClientProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
-        Boolean res = proxy.getProxy().restoreFailedStorage(arg);
-        System.out.println("restoreFailedStorage is set to " + res + " for "
-            + proxy.getAddress());
+        try{
+          Boolean res = proxy.getProxy().restoreFailedStorage(arg);
+          System.out.println("restoreFailedStorage is set to " + res + " for "
+              + proxy.getAddress());
+        } catch (IOException ioe){
+          System.out.println("restoreFailedStorage failed for "
+              + proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       Boolean res = dfs.restoreFailedStorage(arg);
@@ -896,10 +916,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<ClientProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
           nsId, ClientProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<ClientProtocol> proxy: proxies) {
-        proxy.getProxy().refreshNodes();
-        System.out.println("Refresh nodes successful for " +
-            proxy.getAddress());
+        try{
+          proxy.getProxy().refreshNodes();
+          System.out.println("Refresh nodes successful for " +
+              proxy.getAddress());
+        }catch (IOException ioe){
+          System.out.println("Refresh nodes failed for " +
+              proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       dfs.refreshNodes();
@@ -944,21 +974,14 @@ public class DFSAdmin extends FsShell {
     EnumSet<OpenFilesType> openFilesTypes = EnumSet.copyOf(types);
 
     DistributedFileSystem dfs = getDFS();
-    Configuration dfsConf = dfs.getConf();
-    URI dfsUri = dfs.getUri();
-    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
-
     RemoteIterator<OpenFileEntry> openFilesRemoteIterator;
-    if (isHaEnabled) {
-      ProxyAndInfo<ClientProtocol> proxy = NameNodeProxies.createNonHAProxy(
-          dfsConf, HAUtil.getAddressOfActive(getDFS()), ClientProtocol.class,
-          UserGroupInformation.getCurrentUser(), false);
-      openFilesRemoteIterator = new OpenFilesIterator(proxy.getProxy(),
-          FsTracer.get(dfsConf), openFilesTypes, path);
-    } else {
+    try{
       openFilesRemoteIterator = dfs.listOpenFiles(openFilesTypes, path);
+      printOpenFiles(openFilesRemoteIterator);
+    } catch (IOException ioe){
+      System.out.println("List open files failed.");
+      throw ioe;
     }
-    printOpenFiles(openFilesRemoteIterator);
     return 0;
   }
 
@@ -976,8 +999,7 @@ public class DFSAdmin extends FsShell {
   }
 
   /**
-   * Command to ask the namenode to set the balancer bandwidth for all of the
-   * datanodes.
+   * Command to ask the active namenode to set the balancer bandwidth.
    * Usage: hdfs dfsadmin -setBalancerBandwidth bandwidth
    * @param argv List of of command line parameters.
    * @param idx The index of the command that is being processed.
@@ -1008,23 +1030,12 @@ public class DFSAdmin extends FsShell {
     }
 
     DistributedFileSystem dfs = (DistributedFileSystem) fs;
-    Configuration dfsConf = dfs.getConf();
-    URI dfsUri = dfs.getUri();
-    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
-
-    if (isHaEnabled) {
-      String nsId = dfsUri.getHost();
-      List<ProxyAndInfo<ClientProtocol>> proxies =
-          HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
-          nsId, ClientProtocol.class);
-      for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
-        proxy.getProxy().setBalancerBandwidth(bandwidth);
-        System.out.println("Balancer bandwidth is set to " + bandwidth +
-            " for " + proxy.getAddress());
-      }
-    } else {
+    try{
       dfs.setBalancerBandwidth(bandwidth);
       System.out.println("Balancer bandwidth is set to " + bandwidth);
+    } catch (IOException ioe){
+      System.err.println("Balancer bandwidth is set failed.");
+      throw ioe;
     }
     exitCode = 0;
 
@@ -1382,10 +1393,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<ClientProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
           nsId, ClientProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
-        proxy.getProxy().finalizeUpgrade();
-        System.out.println("Finalize upgrade successful for " +
-            proxy.getAddress());
+        try{
+          proxy.getProxy().finalizeUpgrade();
+          System.out.println("Finalize upgrade successful for " +
+              proxy.getAddress());
+        }catch (IOException ioe){
+          System.out.println("Finalize upgrade failed for " +
+              proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       dfs.finalizeUpgrade();
@@ -1415,10 +1436,21 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<ClientProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
           nsId, ClientProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
-        proxy.getProxy().metaSave(pathname);
-        System.out.println("Created metasave file " + pathname + " in the log "
-            + "directory of namenode " + proxy.getAddress());
+        try{
+          proxy.getProxy().metaSave(pathname);
+          System.out.println("Created metasave file " + pathname
+              + " in the log directory of namenode " + proxy.getAddress());
+        } catch (IOException ioe){
+          System.out.println("Created metasave file " + pathname
+              + " in the log directory of namenode " + proxy.getAddress()
+              + " failed");
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       dfs.metaSave(pathname);
@@ -1503,10 +1535,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
               RefreshAuthorizationPolicyProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) {
-        proxy.getProxy().refreshServiceAcl();
-        System.out.println("Refresh service acl successful for "
-            + proxy.getAddress());
+        try{
+          proxy.getProxy().refreshServiceAcl();
+          System.out.println("Refresh service acl successful for "
+              + proxy.getAddress());
+        }catch (IOException ioe){
+          System.out.println("Refresh service acl failed for "
+              + proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()) {
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       // Create the client
@@ -1546,10 +1588,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
               RefreshUserMappingsProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
-        proxy.getProxy().refreshUserToGroupsMappings();
-        System.out.println("Refresh user to groups mapping successful for "
-            + proxy.getAddress());
+        try{
+          proxy.getProxy().refreshUserToGroupsMappings();
+          System.out.println("Refresh user to groups mapping successful for "
+              + proxy.getAddress());
+        }catch (IOException ioe){
+          System.out.println("Refresh user to groups mapping failed for "
+              + proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       // Create the client
@@ -1591,10 +1643,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
               RefreshUserMappingsProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
-        proxy.getProxy().refreshSuperUserGroupsConfiguration();
-        System.out.println("Refresh super user groups configuration " +
-            "successful for " + proxy.getAddress());
+        try{
+          proxy.getProxy().refreshSuperUserGroupsConfiguration();
+          System.out.println("Refresh super user groups configuration " +
+              "successful for " + proxy.getAddress());
+        }catch (IOException ioe){
+          System.out.println("Refresh super user groups configuration " +
+              "failed for " + proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       // Create the client
@@ -1630,10 +1692,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
               RefreshCallQueueProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) {
-        proxy.getProxy().refreshCallQueue();
-        System.out.println("Refresh call queue successful for "
-            + proxy.getAddress());
+        try{
+          proxy.getProxy().refreshCallQueue();
+          System.out.println("Refresh call queue successful for "
+              + proxy.getAddress());
+        }catch (IOException ioe){
+          System.out.println("Refresh call queue failed for "
+              + proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       // Create the client

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bd6ab1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
index 74f5e7a..97daf09 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
@@ -33,6 +33,7 @@ import org.junit.After;
 import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -50,7 +51,7 @@ public class TestDFSAdminWithHA {
   private static String newLine = System.getProperty("line.separator");
 
   private void assertOutputMatches(String string) {
-    String errOutput = new String(out.toByteArray(), Charsets.UTF_8);
+    String errOutput = new String(err.toByteArray(), Charsets.UTF_8);
     String output = new String(out.toByteArray(), Charsets.UTF_8);
 
     if (!errOutput.matches(string) && !output.matches(string)) {
@@ -156,6 +157,60 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testSaveNamespaceNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    // Safe mode should be turned ON in order to create namespace image.
+    int exitCode = admin.run(new String[] {"-safemode", "enter"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+    String message = "Safe mode is ON in.*";
+    assertOutputMatches(message + newLine + message + newLine);
+
+    cluster.getDfsCluster().shutdownNameNode(1);
+//
+    exitCode = admin.run(new String[] {"-saveNamespace"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "Save namespace successful for.*" + newLine
+        + "Save namespace failed for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testSaveNamespaceNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    // Safe mode should be turned ON in order to create namespace image.
+    int exitCode = admin.run(new String[] {"-safemode", "enter"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+    String message = "Safe mode is ON in.*";
+    assertOutputMatches(message + newLine + message + newLine);
+
+    cluster.getDfsCluster().shutdownNameNode(0);
+
+    exitCode = admin.run(new String[] {"-saveNamespace"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "Save namespace failed for.*" + newLine
+        + "Save namespace successful for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testSaveNamespaceNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    // Safe mode should be turned ON in order to create namespace image.
+    int exitCode = admin.run(new String[] {"-safemode", "enter"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+    String message = "Safe mode is ON in.*";
+    assertOutputMatches(message + newLine + message + newLine);
+
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+
+    exitCode = admin.run(new String[] {"-saveNamespace"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "Save namespace failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
   public void testRestoreFailedStorage() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
@@ -176,6 +231,76 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testRestoreFailedStorageNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "restoreFailedStorage is set to false for.*" + newLine
+        + "restoreFailedStorage failed for.*" + newLine;
+    // Default is false
+    assertOutputMatches(message);
+
+    exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "restoreFailedStorage is set to true for.*" + newLine
+        + "restoreFailedStorage failed for.*" + newLine;
+    assertOutputMatches(message);
+
+    exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "restoreFailedStorage is set to false for.*" + newLine
+        + "restoreFailedStorage failed for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRestoreFailedStorageNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "restoreFailedStorage failed for.*" + newLine
+        + "restoreFailedStorage is set to false for.*" + newLine;
+    // Default is false
+    assertOutputMatches(message);
+
+    exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "restoreFailedStorage failed for.*" + newLine
+        + "restoreFailedStorage is set to true for.*" + newLine;
+    assertOutputMatches(message);
+
+    exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "restoreFailedStorage failed for.*" + newLine
+        + "restoreFailedStorage is set to false for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRestoreFailedStorageNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "restoreFailedStorage failed for.*";
+    // Default is false
+    assertOutputMatches(message + newLine + message + newLine);
+
+    exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "restoreFailedStorage failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+
+    exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "restoreFailedStorage failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
   public void testRefreshNodes() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-refreshNodes"});
@@ -185,12 +310,81 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testRefreshNodesNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshNodes"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh nodes successful for.*" + newLine
+        + "Refresh nodes failed for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshNodesNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(new String[] {"-refreshNodes"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh nodes failed for.*" + newLine
+        + "Refresh nodes successful for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshNodesNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshNodes"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh nodes failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
   public void testSetBalancerBandwidth() throws Exception {
     setUpHaCluster(false);
+    cluster.getDfsCluster().transitionToActive(0);
+
     int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
     assertEquals(err.toString().trim(), 0, exitCode);
-    String message = "Balancer bandwidth is set to 10 for.*";
-    assertOutputMatches(message + newLine + message + newLine);
+    String message = "Balancer bandwidth is set to 10";
+    assertOutputMatches(message + newLine);
+  }
+
+  @Test (timeout = 30000)
+  public void testSetBalancerBandwidthNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    cluster.getDfsCluster().transitionToActive(0);
+    int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+    String message = "Balancer bandwidth is set to 10";
+    assertOutputMatches(message + newLine);
+  }
+
+  @Test (timeout = 30000)
+  public void testSetBalancerBandwidthNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().transitionToActive(1);
+    int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+    String message = "Balancer bandwidth is set to 10";
+    assertOutputMatches(message + newLine);
+  }
+
+  @Test
+  public void testSetBalancerBandwidthNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Balancer bandwidth is set failed." + newLine
+        + ".*" + newLine;
+    assertOutputMatches(message);
   }
 
   @Test (timeout = 30000)
@@ -211,6 +405,44 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testMetaSaveNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Created metasave file dfs.meta in the log directory"
+        + " of namenode.*" + newLine
+        + "Created metasave file dfs.meta in the log directory"
+        + " of namenode.*failed" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testMetaSaveNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Created metasave file dfs.meta in the log directory"
+        + " of namenode.*failed" + newLine
+        + "Created metasave file dfs.meta in the log directory"
+        + " of namenode.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testMetaSaveNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Created metasave file dfs.meta in the log directory"
+        + " of namenode.*failed";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
   public void testRefreshServiceAcl() throws Exception {
     setUpHaCluster(true);
     int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
@@ -220,6 +452,40 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testRefreshServiceAclNN1UpNN2Down() throws Exception {
+    setUpHaCluster(true);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh service acl successful for.*" + newLine
+        + "Refresh service acl failed for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshServiceAclNN1DownNN2Up() throws Exception {
+    setUpHaCluster(true);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh service acl failed for.*" + newLine
+        + "Refresh service acl successful for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshServiceAclNN1DownNN2Down() throws Exception {
+    setUpHaCluster(true);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh service acl failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+
+  @Test (timeout = 30000)
   public void testRefreshUserToGroupsMappings() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
@@ -229,6 +495,43 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testRefreshUserToGroupsMappingsNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh user to groups mapping successful for.*"
+        + newLine
+        + "Refresh user to groups mapping failed for.*"
+        + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshUserToGroupsMappingsNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh user to groups mapping failed for.*"
+        + newLine
+        + "Refresh user to groups mapping successful for.*"
+        + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshUserToGroupsMappingsNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh user to groups mapping failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
   public void testRefreshSuperUserGroupsConfiguration() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(
@@ -239,6 +542,49 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testRefreshSuperUserGroupsConfigurationNN1UpNN2Down()
+      throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(
+        new String[] {"-refreshSuperUserGroupsConfiguration"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh super user groups configuration successful for.*"
+        + newLine
+        + "Refresh super user groups configuration failed for.*"
+        + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshSuperUserGroupsConfigurationNN1DownNN2Up()
+      throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(
+        new String[] {"-refreshSuperUserGroupsConfiguration"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh super user groups configuration failed for.*"
+        + newLine
+        + "Refresh super user groups configuration successful for.*"
+        + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshSuperUserGroupsConfigurationNN1DownNN2Down()
+      throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(
+        new String[] {"-refreshSuperUserGroupsConfiguration"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh super user groups configuration failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
   public void testRefreshCallQueue() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-refreshCallQueue"});
@@ -246,4 +592,116 @@ public class TestDFSAdminWithHA {
     String message = "Refresh call queue successful for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
+
+  @Test (timeout = 30000)
+  public void testRefreshCallQueueNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshCallQueue"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh call queue successful for.*" + newLine
+        + "Refresh call queue failed for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshCallQueueNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(new String[] {"-refreshCallQueue"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh call queue failed for.*" + newLine
+        + "Refresh call queue successful for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshCallQueueNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshCallQueue"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh call queue failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
+  public void testFinalizeUpgrade() throws Exception {
+    setUpHaCluster(false);
+    int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = ".*Cannot finalize with no NameNode active";
+    assertOutputMatches(message + newLine);
+
+    cluster.getDfsCluster().transitionToActive(0);
+    exitCode = admin.run(new String[] {"-finalizeUpgrade"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+    message = "Finalize upgrade successful for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
+  public void testFinalizeUpgradeNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    cluster.getDfsCluster().transitionToActive(0);
+    int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Finalize upgrade successful for .*" + newLine
+        + "Finalize upgrade failed for .*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testFinalizeUpgradeNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().transitionToActive(1);
+    int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Finalize upgrade failed for .*" + newLine
+        + "Finalize upgrade successful for .*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testFinalizeUpgradeNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = ".*2 exceptions.*";
+    assertOutputMatches(message + newLine);
+  }
+
+  @Test (timeout = 30000)
+  public void testListOpenFilesNN1UpNN2Down() throws Exception{
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    cluster.getDfsCluster().transitionToActive(0);
+    int exitCode = admin.run(new String[] {"-listOpenFiles"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+  }
+
+  @Test (timeout = 30000)
+  public void testListOpenFilesNN1DownNN2Up() throws Exception{
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().transitionToActive(1);
+    int exitCode = admin.run(new String[] {"-listOpenFiles"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+  }
+
+  @Test
+  public void testListOpenFilesNN1DownNN2Down() throws Exception{
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-listOpenFiles"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = ".*" + newLine + "List open files failed." + newLine;
+    assertOutputMatches(message);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: HADOOP-12897. KerberosAuthenticator.authenticate to include URL on IO failures. Contributed by Ajay Kumar.

Posted by ae...@apache.org.
HADOOP-12897. KerberosAuthenticator.authenticate to include URL on IO failures. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/332269de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/332269de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/332269de

Branch: refs/heads/HDFS-7240
Commit: 332269de065d0f40eb54ee5e53b765217c24081e
Parents: c5e6e3d
Author: Arpit Agarwal <ar...@apache.org>
Authored: Tue Feb 13 10:14:16 2018 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Tue Feb 13 10:14:16 2018 -0800

----------------------------------------------------------------------
 .../client/KerberosAuthenticator.java           | 80 +++++++++++++-------
 .../client/TestKerberosAuthenticator.java       | 29 +++++++
 2 files changed, 82 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/332269de/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
index 942d13c..64d4330 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
@@ -13,6 +13,8 @@
  */
 package org.apache.hadoop.security.authentication.client;
 
+import com.google.common.annotations.VisibleForTesting;
+import java.lang.reflect.Constructor;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.security.authentication.server.HttpConstants;
 import org.apache.hadoop.security.authentication.util.AuthToken;
@@ -177,41 +179,65 @@ public class KerberosAuthenticator implements Authenticator {
    */
   @Override
   public void authenticate(URL url, AuthenticatedURL.Token token)
-    throws IOException, AuthenticationException {
+      throws IOException, AuthenticationException {
     if (!token.isSet()) {
       this.url = url;
       base64 = new Base64(0);
-      HttpURLConnection conn = token.openConnection(url, connConfigurator);
-      conn.setRequestMethod(AUTH_HTTP_METHOD);
-      conn.connect();
-      
-      boolean needFallback = false;
-      if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
-        LOG.debug("JDK performed authentication on our behalf.");
-        // If the JDK already did the SPNEGO back-and-forth for
-        // us, just pull out the token.
-        AuthenticatedURL.extractToken(conn, token);
-        if (isTokenKerberos(token)) {
-          return;
+      try {
+        HttpURLConnection conn = token.openConnection(url, connConfigurator);
+        conn.setRequestMethod(AUTH_HTTP_METHOD);
+        conn.connect();
+
+        boolean needFallback = false;
+        if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
+          LOG.debug("JDK performed authentication on our behalf.");
+          // If the JDK already did the SPNEGO back-and-forth for
+          // us, just pull out the token.
+          AuthenticatedURL.extractToken(conn, token);
+          if (isTokenKerberos(token)) {
+            return;
+          }
+          needFallback = true;
         }
-        needFallback = true;
-      }
-      if (!needFallback && isNegotiate(conn)) {
-        LOG.debug("Performing our own SPNEGO sequence.");
-        doSpnegoSequence(token);
-      } else {
-        LOG.debug("Using fallback authenticator sequence.");
-        Authenticator auth = getFallBackAuthenticator();
-        // Make sure that the fall back authenticator have the same
-        // ConnectionConfigurator, since the method might be overridden.
-        // Otherwise the fall back authenticator might not have the information
-        // to make the connection (e.g., SSL certificates)
-        auth.setConnectionConfigurator(connConfigurator);
-        auth.authenticate(url, token);
+        if (!needFallback && isNegotiate(conn)) {
+          LOG.debug("Performing our own SPNEGO sequence.");
+          doSpnegoSequence(token);
+        } else {
+          LOG.debug("Using fallback authenticator sequence.");
+          Authenticator auth = getFallBackAuthenticator();
+          // Make sure that the fall back authenticator have the same
+          // ConnectionConfigurator, since the method might be overridden.
+          // Otherwise the fall back authenticator might not have the
+          // information to make the connection (e.g., SSL certificates)
+          auth.setConnectionConfigurator(connConfigurator);
+          auth.authenticate(url, token);
+        }
+      } catch (IOException ex){
+        throw wrapExceptionWithMessage(ex,
+            "Error while authenticating with endpoint: " + url);
+      } catch (AuthenticationException ex){
+        throw wrapExceptionWithMessage(ex,
+            "Error while authenticating with endpoint: " + url);
       }
     }
   }
 
+  @VisibleForTesting
+   static <T extends Exception> T wrapExceptionWithMessage(
+      T exception, String msg) {
+    Class<? extends Throwable> exceptionClass = exception.getClass();
+    try {
+      Constructor<? extends Throwable> ctor = exceptionClass
+          .getConstructor(String.class);
+      Throwable t = ctor.newInstance(msg);
+      return (T) (t.initCause(exception));
+    } catch (Throwable e) {
+      LOG.debug("Unable to wrap exception of type {}, it has "
+          + "no (String) constructor.", exceptionClass, e);
+      return exception;
+    }
+  }
+
   /**
    * If the specified URL does not support SPNEGO authentication, a fallback {@link Authenticator} will be used.
    * <p>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/332269de/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
index 7db53ba..4aabb34 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
@@ -20,6 +20,9 @@ import static org.apache.hadoop.security.authentication.server.KerberosAuthentic
 import static org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler.KEYTAB;
 import static org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler.NAME_RULES;
 
+import java.io.IOException;
+import java.nio.charset.CharacterCodingException;
+import javax.security.sasl.AuthenticationException;
 import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
 import org.apache.hadoop.security.authentication.KerberosTestUtils;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
@@ -218,4 +221,30 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
     });
   }
 
+  @Test(timeout = 60000)
+  public void testWrapExceptionWithMessage() {
+    IOException ex;
+    ex = new IOException("Induced exception");
+    ex = KerberosAuthenticator.wrapExceptionWithMessage(ex, "Error while "
+        + "authenticating with endpoint: localhost");
+    Assert.assertEquals("Induced exception", ex.getCause().getMessage());
+    Assert.assertEquals("Error while authenticating with endpoint: localhost",
+        ex.getMessage());
+
+    ex = new AuthenticationException("Auth exception");
+    ex = KerberosAuthenticator.wrapExceptionWithMessage(ex, "Error while "
+        + "authenticating with endpoint: localhost");
+    Assert.assertEquals("Auth exception", ex.getCause().getMessage());
+    Assert.assertEquals("Error while authenticating with endpoint: localhost",
+        ex.getMessage());
+
+    // Test for Exception with  no (String) constructor
+    // redirect the LOG to and check log message
+    ex = new CharacterCodingException();
+    Exception ex2 = KerberosAuthenticator.wrapExceptionWithMessage(ex,
+        "Error while authenticating with endpoint: localhost");
+    Assert.assertTrue(ex instanceof CharacterCodingException);
+    Assert.assertTrue(ex.equals(ex2));
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: YARN-7827. Stop and Delete Yarn Service from RM UI fails with HTTP ERROR 404. Contributed by Sunil G

Posted by ae...@apache.org.
YARN-7827. Stop and Delete Yarn Service from RM UI fails with HTTP ERROR 404. Contributed by Sunil G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ddec08d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ddec08d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ddec08d7

Branch: refs/heads/HDFS-7240
Commit: ddec08d7ccc8e43492fca2784203bd8af5e968cc
Parents: 1bc03dd
Author: Jian He <ji...@apache.org>
Authored: Thu Feb 8 21:32:02 2018 -0800
Committer: Jian He <ji...@apache.org>
Committed: Thu Feb 8 21:32:40 2018 -0800

----------------------------------------------------------------------
 .../src/main/webapp/app/adapters/yarn-servicedef.js     |  9 ++++++---
 .../src/main/webapp/app/components/deploy-service.js    | 12 +++++++++---
 .../src/main/webapp/app/controllers/yarn-app.js         |  4 ++--
 .../src/main/webapp/app/controllers/yarn-app/info.js    |  4 ++--
 .../main/webapp/app/controllers/yarn-deploy-service.js  | 12 ++++++------
 .../webapp/app/templates/components/deploy-service.hbs  | 10 ++++++++++
 .../src/main/webapp/app/templates/yarn-app.hbs          |  4 ++--
 .../src/main/webapp/app/templates/yarn-app/info.hbs     |  4 ++--
 .../src/main/webapp/app/utils/info-seeder.js            |  3 ++-
 .../src/main/webapp/config/default-config.js            |  2 +-
 10 files changed, 42 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddec08d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
index 3fb4a81..03685fb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js
@@ -24,21 +24,24 @@ export default RESTAbstractAdapter.extend({
   restNameSpace: "dashService",
   serverName: "DASH",
 
-  deployService(request) {
+  deployService(request, user) {
     var url = this.buildURL();
+    url += "/?user.name=" + user;
     return this.ajax(url, "POST", {data: request});
   },
 
-  stopService(serviceName) {
+  stopService(serviceName, user) {
     var url = this.buildURL();
     url += "/" + serviceName;
+    url += "/?user.name=" + user;
     var data = {"state": "STOPPED", "name": serviceName};
     return this.ajax(url, "PUT", {data: data});
   },
 
-  deleteService(serviceName) {
+  deleteService(serviceName, user) {
     var url = this.buildURL();
     url += "/" + serviceName;
+    url += "/?user.name=" + user;
     return this.ajax(url, "DELETE", {data: {}});
   }
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddec08d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
index 90e10e5..36895d7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js
@@ -27,6 +27,7 @@ export default Ember.Component.extend({
   customServiceDef: '',
   serviceResp: null,
   isLoading: false,
+  userName: '',
 
   actions: {
     showSaveTemplateModal() {
@@ -36,11 +37,11 @@ export default Ember.Component.extend({
     deployService() {
       this.set('serviceResp', null);
       if (this.get('isStandardViewType')) {
-        this.sendAction("deployServiceDef", this.get('serviceDef'));
+        this.sendAction("deployServiceDef", this.get('serviceDef'), this.get('userName'));
       } else {
         try {
           var parsed = JSON.parse(this.get('customServiceDef'));
-          this.sendAction("deployServiceJson", parsed);
+          this.sendAction("deployServiceJson", parsed, this.get('userName'));
         } catch (err) {
           this.set('serviceResp', {type: 'error', message: 'Invalid JSON: ' + err.message});
           throw err;
@@ -148,16 +149,21 @@ export default Ember.Component.extend({
 
   isValidTemplateName: Ember.computed.notEmpty('savedTemplateName'),
 
+  isUserNameGiven: Ember.computed.empty('userName'),
+
   isValidServiceDef: Ember.computed('serviceDef.name', 'serviceDef.queue', 'serviceDef.serviceComponents.[]', function () {
     return this.get('serviceDef').isValidServiceDef();
   }),
 
   isValidCustomServiceDef: Ember.computed.notEmpty('customServiceDef'),
 
-  enableSaveOrDeployBtn: Ember.computed('isValidServiceDef', 'isValidCustomServiceDef', 'viewType', 'isLoading', function() {
+  enableSaveOrDeployBtn: Ember.computed('isValidServiceDef', 'isValidCustomServiceDef', 'viewType', 'isLoading', 'isUserNameGiven', function() {
     if (this.get('isLoading')) {
       return false;
     }
+    if (this.get('isUserNameGiven')) {
+      return false;
+    }
     if (this.get('isStandardViewType')) {
       return this.get('isValidServiceDef');
     } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddec08d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
index b2b99b1..d80f172 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
@@ -43,7 +43,7 @@ export default Ember.Controller.extend({
       Ember.$("#stopServiceConfirmDialog").modal('hide');
       var adapter = this.store.adapterFor('yarn-servicedef');
       self.set('isLoading', true);
-      adapter.stopService(this.model.serviceName).then(function () {
+      adapter.stopService(this.model.serviceName, this.get('model.app.user')).then(function () {
         self.set('actionResponse', { msg: 'Service stopped successfully. Auto refreshing in 5 seconds.', type: 'success' });
         Ember.run.later(self, function () {
           this.set('actionResponse', null);
@@ -67,7 +67,7 @@ export default Ember.Controller.extend({
       Ember.$("#deleteServiceConfirmDialog").modal('hide');
       var adapter = this.store.adapterFor('yarn-servicedef');
       self.set('isLoading', true);
-      adapter.deleteService(this.model.serviceName).then(function () {
+      adapter.deleteService(this.model.serviceName, this.get('model.app.user')).then(function () {
         self.set('actionResponse', { msg: 'Service deleted successfully. Redirecting to services in 5 seconds.', type: 'success' });
         Ember.run.later(self, function () {
           this.set('actionResponse', null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddec08d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/info.js
index 3de6687..bd8d50a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/info.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/info.js
@@ -35,7 +35,7 @@ export default Ember.Controller.extend({
       Ember.$("#stopServiceConfirmDialog").modal('hide');
       var adapter = this.store.adapterFor('yarn-servicedef');
       self.set('isLoading', true);
-      adapter.stopService(this.get('service')).then(function() {
+      adapter.stopService(this.get('service'),  this.get('model.app.user')).then(function() {
         self.set('actionResponse', {msg: 'Service stopped successfully. Auto refreshing in 5 seconds.', type: 'success'});
         Ember.run.later(self, function() {
           this.set('actionResponse', null);
@@ -59,7 +59,7 @@ export default Ember.Controller.extend({
       Ember.$("#deleteServiceConfirmDialog").modal('hide');
       var adapter = this.store.adapterFor('yarn-servicedef');
       self.set('isLoading', true);
-      adapter.deleteService(this.get('service')).then(function() {
+      adapter.deleteService(this.get('service'),  this.get('model.app.user')).then(function() {
         self.set('actionResponse', {msg: 'Service deleted successfully. Redirecting to services in 5 seconds.', type: 'success'});
         Ember.run.later(self, function() {
           this.set('actionResponse', null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddec08d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-deploy-service.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-deploy-service.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-deploy-service.js
index 25d575f..97cb66f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-deploy-service.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-deploy-service.js
@@ -36,13 +36,13 @@ export default Ember.Controller.extend({
   isLoading: false,
 
   actions: {
-    deployServiceDef(serviceDef) {
+    deployServiceDef(serviceDef, userName) {
       var defjson = serviceDef.getServiceJSON();
-      this.deployServiceApp(defjson);
+      this.deployServiceApp(defjson, userName);
     },
 
-    deployServiceJson(json) {
-      this.deployServiceApp(json);
+    deployServiceJson(json, userName) {
+      this.deployServiceApp(json, userName);
     }
   },
 
@@ -53,11 +53,11 @@ export default Ember.Controller.extend({
     }, 1000);
   },
 
-  deployServiceApp(requestJson) {
+  deployServiceApp(requestJson, userName) {
     var self = this;
     var adapter = this.store.adapterFor('yarn-servicedef');
     this.set('isLoading', true);
-    adapter.deployService(requestJson).then(function() {
+    adapter.deployService(requestJson, userName).then(function() {
       self.set('serviceResponse', {message: 'Service has been accepted successfully. Redirecting to services in a second.', type: 'success'});
       self.gotoServices();
     }, function(errmsg) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddec08d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs
index a098ec3..720074e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs
@@ -29,6 +29,16 @@
       </div>
     </div>
   {{/if}}
+  <div class="row">
+    <div class="col-md-4">
+      <div class="form-group shrink-height">
+        <label class="required">User Name for service</label>
+        <span class="glyphicon glyphicon-info-sign info-icon" data-info="userName"></span>
+        {{input type="text" class="form-control" placeholder="User Name" value=userName}}
+      </div>
+      <br>
+    </div>
+  </div>
   <div class="panel panel-default {{if isLoading 'loading-state'}}">
     {{#if isLoading}}
       <img src="assets/images/spinner.gif" alt="Loading...">

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddec08d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
index d29ec4d..a42dcd3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs
@@ -140,12 +140,12 @@
 
 {{confirm-dialog
   dialogId="stopServiceConfirmDialog"
-  message=(concat 'Are you sure you want to stop service "' model.serviceName '" ?')
+  message=(concat 'Are you sure you want to stop service "' model.serviceName '" for user "' model.app.user '" ?')
   action="stopService"
 }}
 
 {{confirm-dialog
   dialogId="deleteServiceConfirmDialog"
-  message=(concat 'Are you sure you want to delete service "' model.serviceName '" ?')
+  message=(concat 'Are you sure you want to delete service "' model.serviceName '" for user "' model.app.user '" ?')
   action="deleteService"
 }}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddec08d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs
index a4937cc..beae7d3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs
@@ -128,12 +128,12 @@
 
 {{confirm-dialog
   dialogId="stopServiceConfirmDialog"
-  message=(concat 'Are you sure you want to stop service "' model.serviceName '" ?')
+  message=(concat 'Are you sure you want to stop service "' model.serviceName '" for user "' model.app.user '" ?')
   action="stopService"
 }}
 
 {{confirm-dialog
   dialogId="deleteServiceConfirmDialog"
-  message=(concat 'Are you sure you want to delete service "' model.serviceName '" ?')
+  message=(concat 'Are you sure you want to delete service "' model.serviceName '" for user "' model.app.user '" ?')
   action="deleteService"
 }}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddec08d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/info-seeder.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/info-seeder.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/info-seeder.js
index d63b3c5..3d01391 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/info-seeder.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/info-seeder.js
@@ -22,5 +22,6 @@ export default {
   lifetime: "Life time (in seconds) of the application from the time it reaches the STARTED state (after which it is automatically destroyed by YARN). For unlimited lifetime do not set a lifetime value.",
   components: "One or more components of the application. If the application is HBase say, then the component can be a simple role like master or regionserver. If the application is a complex business webapp then a component can be other applications say Kafka or Storm. Thereby it opens up the support for complex and nested applications.",
   configurations: "Set of configuration properties that can be injected into the application components via envs, files and custom pluggable helper docker containers. Files of several standard formats like xml, properties, json, yaml and templates will be supported.",
-  fileConfigs: "Set of file configurations that needs to be created and made available as a volume in an application component container."
+  fileConfigs: "Set of file configurations that needs to be created and made available as a volume in an application component container.",
+  userName: "Name of the user who launches the service."
 };

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddec08d7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js
index e916361..ff95115 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js
@@ -30,7 +30,7 @@ module.exports = { // YARN UI App configurations
       cluster: 'ws/v1/cluster',
       metrics: 'ws/v1/cluster/metrics',
       timelineV2: 'ws/v2/timeline',
-      dashService: 'ws/v1/services',
+      dashService: 'app/v1/services',
       node: '{nodeAddress}/ws/v1/node'
     },
 };


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by Xiaoyu Yao.

Posted by ae...@apache.org.
HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8faf0b50
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8faf0b50
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8faf0b50

Branch: refs/heads/HDFS-7240
Commit: 8faf0b50d435039f69ea35f592856ca04d378809
Parents: f491f71
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Thu Feb 8 08:59:48 2018 -0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Feb 8 08:59:48 2018 -0800

----------------------------------------------------------------------
 .../hdfs/server/namenode/FSDirConcatOp.java     |   4 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 126 +++++++++++++++++++
 2 files changed, 129 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8faf0b50/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index 6a41cd8..4cc5389 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -253,7 +253,9 @@ class FSDirConcatOp {
     for (INodeFile nodeToRemove : srcList) {
       if(nodeToRemove != null) {
         nodeToRemove.clearBlocks();
-        nodeToRemove.getParent().removeChild(nodeToRemove);
+        // Ensure the nodeToRemove is cleared from snapshot diff list
+        nodeToRemove.getParent().removeChild(nodeToRemove,
+            targetIIP.getLatestSnapshotId());
         fsd.getINodeMap().remove(nodeToRemove);
         count++;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8faf0b50/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index ca53788..8bd7967 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -26,18 +26,22 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.security.PrivilegedAction;
 
+import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
@@ -61,11 +65,15 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests snapshot deletion.
  */
 public class TestSnapshotDeletion {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestSnapshotDeletion.class);
   protected static final long seed = 0;
   protected static final short REPLICATION = 3;
   protected static final short REPLICATION_1 = 2;
@@ -1232,4 +1240,122 @@ public class TestSnapshotDeletion {
     // make sure bar has been cleaned from inodeMap
     Assert.assertNull(fsdir.getInode(fileId));
   }
+
+  @Test
+  public void testSnapshotWithConcatException() throws Exception {
+    final Path st = new Path("/st");
+    hdfs.mkdirs(st);
+    hdfs.allowSnapshot(st);
+
+    Path[] files = new Path[3];
+    for (int i = 0; i < 3; i++) {
+      files[i] = new Path(st, i+ ".txt");
+    }
+
+    Path dest = new Path(st, "dest.txt");
+    hdfs.createNewFile(dest);
+    hdfs.createSnapshot(st, "ss");
+
+    for (int j = 0; j < 3; j++) {
+      FileSystem fs = cluster.getFileSystem();
+      DFSTestUtil.createFile(fs, files[j], false, 1024,
+          1024, 512, (short) 1, RandomUtils.nextLong(1, 512), true);
+    }
+
+    hdfs.createSnapshot(st, "s0");
+
+    // Verify the SnapshotException is thrown as expected for HDFS-4529
+    exception.expect(RemoteException.class);
+    String error = "Concat: the source file /st/0.txt is in snapshot";
+    exception.expectMessage(error);
+    hdfs.concat(dest, files);
+
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    hdfs.saveNamespace();
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+    cluster.restartNameNodes();
+  }
+
+  @Test
+  public void testSnapshotDeleteWithConcat() throws Exception {
+    final Path st = new Path("/st");
+    hdfs.mkdirs(st);
+    hdfs.allowSnapshot(st);
+
+    Path[] files = new Path[3];
+    for (int i = 0; i < 3; i++) {
+      files[i] = new Path(st, i+ ".txt");
+    }
+
+    Path dest = new Path(st, "dest.txt");
+    hdfs.createNewFile(dest);
+    hdfs.createSnapshot(st, "ss");
+
+    for (int i = 0; i < 3; i++) {
+      for (int j = 0; j < 3; j++) {
+        FileSystem fs = cluster.getFileSystem();
+        DFSTestUtil.createFile(fs, files[j], false, 1024,
+            1024, 512, (short) 1, RandomUtils.nextLong(1, 512), true);
+      }
+
+      hdfs.concat(dest, files);
+
+      hdfs.createSnapshot(st, "s" + i);
+    }
+
+
+    hdfs.deleteSnapshot(st, "s1");
+
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    hdfs.saveNamespace();
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+    cluster.restartNameNodes();
+  }
+
+  @Test
+  public void testSnapshotDiffReportWithConcat() throws Exception {
+    final Path st = new Path("/st");
+    hdfs.mkdirs(st);
+    hdfs.allowSnapshot(st);
+
+    Path[] files = new Path[3];
+    for (int i = 0; i < 3; i++) {
+      files[i] = new Path(st, i+ ".txt");
+    }
+
+    Path dest = new Path(st, "dest.txt");
+    hdfs.createNewFile(dest);
+    hdfs.createSnapshot(st, "ss");
+
+    for (int i = 0; i < 3; i++) {
+      for (int j = 0; j < 3; j++) {
+        FileSystem fs = cluster.getFileSystem();
+        DFSTestUtil.createFile(fs, files[j], false, 1024,
+            1024, 512, (short) 1, RandomUtils.nextLong(1, 512), true);
+      }
+
+      hdfs.concat(dest, files);
+
+      hdfs.createSnapshot(st, "s" + i);
+
+      SnapshotDiffReport sdr = hdfs.getSnapshotDiffReport(st, "s" + i, "ss");
+      LOG.info("Snapshot Diff s{} to ss : {}", i, sdr);
+      Assert.assertEquals(sdr.getDiffList().size(), 1);
+      Assert.assertTrue(sdr.getDiffList().get(0).getType() ==
+          SnapshotDiffReport.DiffType.MODIFY);
+      Assert.assertTrue(new Path(st, DFSUtilClient.bytes2String(
+          sdr.getDiffList().get(0).getSourcePath())).equals(dest));
+    }
+
+    hdfs.deleteSnapshot(st, "s1");
+
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    hdfs.saveNamespace();
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+    cluster.restartNameNodes();
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: HADOOP-15176. Enhance IAM Assumed Role support in S3A client. Contributed by Steve Loughran

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAssumeRole.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAssumeRole.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAssumeRole.java
deleted file mode 100644
index 7c8760b..0000000
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAssumeRole.java
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3a;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.nio.file.AccessDeniedException;
-import java.util.concurrent.Callable;
-
-import com.amazonaws.auth.AWSCredentials;
-import com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-import static org.apache.hadoop.fs.s3a.Constants.*;
-import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
-import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-
-/**
- * Tests use of assumed roles.
- * Only run if an assumed role is provided.
- */
-public class ITestAssumeRole extends AbstractS3ATestBase {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ITestAssumeRole.class);
-
-  private static final String ARN_EXAMPLE
-      = "arn:aws:kms:eu-west-1:00000000000:key/" +
-      "0000000-16c9-4832-a1a9-c8bbef25ec8b";
-
-  private static final String E_BAD_ROLE
-      = "Not authorized to perform sts:AssumeRole";
-
-  /**
-   * This is AWS policy removes read access.
-   */
-  public static final String RESTRICTED_POLICY = "{\n"
-      + "   \"Version\": \"2012-10-17\",\n"
-      + "   \"Statement\": [{\n"
-      + "      \"Effect\": \"Deny\",\n"
-      + "      \"Action\": \"s3:ListObjects\",\n"
-      + "      \"Resource\": \"*\"\n"
-      + "    }\n"
-      + "   ]\n"
-      + "}";
-
-  private void assumeRoleTests() {
-    assume("No ARN for role tests", !getAssumedRoleARN().isEmpty());
-  }
-
-  private String getAssumedRoleARN() {
-    return getContract().getConf().getTrimmed(ASSUMED_ROLE_ARN, "");
-  }
-
-  /**
-   * Expect a filesystem to fail to instantiate.
-   * @param conf config to use
-   * @param clazz class of exception to expect
-   * @param text text in exception
-   * @param <E> type of exception as inferred from clazz
-   * @throws Exception if the exception was the wrong class
-   */
-  private <E extends Throwable> void expectFileSystemFailure(
-      Configuration conf,
-      Class<E> clazz,
-      String text) throws Exception {
-    interceptC(clazz,
-        text,
-        () -> new Path(getFileSystem().getUri()).getFileSystem(conf));
-  }
-
-  /**
-   * Experimental variant of intercept() which closes any Closeable
-   * returned.
-   */
-  private static <E extends Throwable> E interceptC(
-      Class<E> clazz, String text,
-      Callable<Closeable> eval)
-      throws Exception {
-
-    return intercept(clazz, text,
-        () -> {
-          try (Closeable c = eval.call()) {
-            return c.toString();
-          }
-        });
-  }
-
-  @Test
-  public void testCreateCredentialProvider() throws IOException {
-    assumeRoleTests();
-    describe("Create the credential provider");
-
-    String roleARN = getAssumedRoleARN();
-
-    Configuration conf = new Configuration(getContract().getConf());
-    conf.set(AWS_CREDENTIALS_PROVIDER, AssumedRoleCredentialProvider.NAME);
-    conf.set(ASSUMED_ROLE_ARN, roleARN);
-    conf.set(ASSUMED_ROLE_SESSION_NAME, "valid");
-    conf.set(ASSUMED_ROLE_SESSION_DURATION, "45m");
-    conf.set(ASSUMED_ROLE_POLICY, RESTRICTED_POLICY);
-    try (AssumedRoleCredentialProvider provider
-             = new AssumedRoleCredentialProvider(conf)) {
-      LOG.info("Provider is {}", provider);
-      AWSCredentials credentials = provider.getCredentials();
-      assertNotNull("Null credentials from " + provider, credentials);
-    }
-  }
-
-  @Test
-  public void testAssumeRoleCreateFS() throws IOException {
-    assumeRoleTests();
-    describe("Create an FS client with the role and do some basic IO");
-
-    String roleARN = getAssumedRoleARN();
-    Configuration conf = createAssumedRoleConfig(roleARN);
-    conf.set(ASSUMED_ROLE_SESSION_NAME, "valid");
-    conf.set(ASSUMED_ROLE_SESSION_DURATION, "45m");
-    Path path = new Path(getFileSystem().getUri());
-    LOG.info("Creating test FS and user {} with assumed role {}",
-        conf.get(ACCESS_KEY), roleARN);
-
-    try (FileSystem fs = path.getFileSystem(conf)) {
-      fs.getFileStatus(new Path("/"));
-      fs.mkdirs(path("testAssumeRoleFS"));
-    }
-  }
-
-  @Test
-  public void testAssumeRoleRestrictedPolicyFS() throws Exception {
-    assumeRoleTests();
-    describe("Restrict the policy for this session; verify that reads fail");
-
-    String roleARN = getAssumedRoleARN();
-    Configuration conf = createAssumedRoleConfig(roleARN);
-    conf.set(ASSUMED_ROLE_POLICY, RESTRICTED_POLICY);
-    Path path = new Path(getFileSystem().getUri());
-    try (FileSystem fs = path.getFileSystem(conf)) {
-      intercept(AccessDeniedException.class, "getFileStatus",
-          () -> fs.getFileStatus(new Path("/")));
-      intercept(AccessDeniedException.class, "getFileStatus",
-          () -> fs.listStatus(new Path("/")));
-      intercept(AccessDeniedException.class, "getFileStatus",
-          () -> fs.mkdirs(path("testAssumeRoleFS")));
-    }
-  }
-
-  @Test
-  public void testAssumeRoleFSBadARN() throws Exception {
-    assumeRoleTests();
-    describe("Attemnpt to create the FS with an invalid ARN");
-    Configuration conf = createAssumedRoleConfig(getAssumedRoleARN());
-    conf.set(ASSUMED_ROLE_ARN, ARN_EXAMPLE);
-    expectFileSystemFailure(conf, AccessDeniedException.class, E_BAD_ROLE);
-  }
-
-  @Test
-  public void testAssumeRoleNoARN() throws Exception {
-    assumeRoleTests();
-    describe("Attemnpt to create the FS with no ARN");
-    Configuration conf = createAssumedRoleConfig(getAssumedRoleARN());
-    conf.unset(ASSUMED_ROLE_ARN);
-    expectFileSystemFailure(conf,
-        IOException.class,
-        AssumedRoleCredentialProvider.E_NO_ROLE);
-  }
-
-  @Test
-  public void testAssumeRoleFSBadPolicy() throws Exception {
-    assumeRoleTests();
-    describe("Attemnpt to create the FS with malformed JSON");
-    Configuration conf = createAssumedRoleConfig(getAssumedRoleARN());
-    // add some malformed JSON
-    conf.set(ASSUMED_ROLE_POLICY, "}");
-    expectFileSystemFailure(conf,
-        AWSBadRequestException.class,
-        "JSON");
-  }
-
-  @Test
-  public void testAssumeRoleFSBadPolicy2() throws Exception {
-    assumeRoleTests();
-    describe("Attemnpt to create the FS with valid but non-compliant JSON");
-    Configuration conf = createAssumedRoleConfig(getAssumedRoleARN());
-    // add some invalid JSON
-    conf.set(ASSUMED_ROLE_POLICY, "{'json':'but not what AWS wants}");
-    expectFileSystemFailure(conf,
-        AWSBadRequestException.class,
-        "Syntax errors in policy");
-  }
-
-  @Test
-  public void testAssumeRoleCannotAuthAssumedRole() throws Exception {
-    assumeRoleTests();
-    describe("Assert that you can't use assumed roles to auth assumed roles");
-
-    Configuration conf = createAssumedRoleConfig(getAssumedRoleARN());
-    conf.set(ASSUMED_ROLE_CREDENTIALS_PROVIDER,
-        AssumedRoleCredentialProvider.NAME);
-    expectFileSystemFailure(conf,
-        IOException.class,
-        AssumedRoleCredentialProvider.E_FORBIDDEN_PROVIDER);
-  }
-
-  @Test
-  public void testAssumeRoleBadInnerAuth() throws Exception {
-    assumeRoleTests();
-    describe("Try to authenticate with a keypair with spaces");
-
-    Configuration conf = createAssumedRoleConfig(getAssumedRoleARN());
-    conf.set(ASSUMED_ROLE_CREDENTIALS_PROVIDER,
-        SimpleAWSCredentialsProvider.NAME);
-    conf.set(ACCESS_KEY, "not valid");
-    conf.set(SECRET_KEY, "not secret");
-    expectFileSystemFailure(conf, AWSBadRequestException.class, "not a valid " +
-        "key=value pair (missing equal-sign) in Authorization header");
-  }
-
-  @Test
-  public void testAssumeRoleBadInnerAuth2() throws Exception {
-    assumeRoleTests();
-    describe("Try to authenticate with an invalid keypair");
-
-    Configuration conf = createAssumedRoleConfig(getAssumedRoleARN());
-    conf.set(ASSUMED_ROLE_CREDENTIALS_PROVIDER,
-        SimpleAWSCredentialsProvider.NAME);
-    conf.set(ACCESS_KEY, "notvalid");
-    conf.set(SECRET_KEY, "notsecret");
-    expectFileSystemFailure(conf, AccessDeniedException.class,
-        "The security token included in the request is invalid");
-  }
-
-  @Test
-  public void testAssumeRoleBadSession() throws Exception {
-    assumeRoleTests();
-    describe("Try to authenticate with an invalid session");
-
-    Configuration conf = createAssumedRoleConfig(getAssumedRoleARN());
-    conf.set(ASSUMED_ROLE_SESSION_NAME,
-        "Session Names cannot Hava Spaces!");
-    expectFileSystemFailure(conf, AWSBadRequestException.class,
-        "Member must satisfy regular expression pattern");
-  }
-
-  /**
-   * Create a config for an assumed role; it also disables FS caching.
-   * @param roleARN ARN of role
-   * @return the configuration
-   */
-  private Configuration createAssumedRoleConfig(String roleARN) {
-    Configuration conf = new Configuration(getContract().getConf());
-    conf.set(AWS_CREDENTIALS_PROVIDER, AssumedRoleCredentialProvider.NAME);
-    conf.set(ASSUMED_ROLE_ARN, roleARN);
-    disableFilesystemCaching(conf);
-    return conf;
-  }
-
-  @Test
-  public void testAssumedRoleCredentialProviderValidation() throws Throwable {
-    Configuration conf = new Configuration();
-    conf.set(ASSUMED_ROLE_ARN, "");
-    interceptC(IOException.class,
-        AssumedRoleCredentialProvider.E_NO_ROLE,
-        () -> new AssumedRoleCredentialProvider(conf));
-  }
-
-  @Test
-  public void testAssumedDuration() throws Throwable {
-    assumeRoleTests();
-    describe("Expect the constructor to fail if the session is to short");
-    Configuration conf = new Configuration();
-    conf.set(ASSUMED_ROLE_SESSION_DURATION, "30s");
-    interceptC(IllegalArgumentException.class, "",
-        () -> new AssumedRoleCredentialProvider(conf));
-  }
-
-  @Test
-  public void testAssumedInvalidRole() throws Throwable {
-    assumeRoleTests();
-    describe("Expect the constructor to fail if the role is invalid");
-    Configuration conf = new Configuration();
-    conf.set(ASSUMED_ROLE_ARN, ARN_EXAMPLE);
-    interceptC(AWSSecurityTokenServiceException.class,
-        E_BAD_ROLE,
-        () -> new AssumedRoleCredentialProvider(conf));
-  }
-
-  /**
-   * This is here to check up on the S3ATestUtils probes themselves.
-   * @see S3ATestUtils#authenticationContains(Configuration, String).
-   */
-  @Test
-  public void testauthenticationContainsProbes() {
-    Configuration conf = new Configuration(false);
-    assertFalse("found AssumedRoleCredentialProvider",
-        authenticationContains(conf, AssumedRoleCredentialProvider.NAME));
-
-    conf.set(AWS_CREDENTIALS_PROVIDER, AssumedRoleCredentialProvider.NAME);
-    assertTrue("didn't find AssumedRoleCredentialProvider",
-        authenticationContains(conf, AssumedRoleCredentialProvider.NAME));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
index d6533bf..da0060e 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
@@ -19,6 +19,8 @@
 package org.apache.hadoop.fs.s3a;
 
 import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -39,23 +41,28 @@ import org.junit.internal.AssumptionViolatedException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.Closeable;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.text.DateFormat;
 import java.text.SimpleDateFormat;
 import java.util.List;
+import java.util.concurrent.Callable;
 
 import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
 import static org.apache.hadoop.fs.s3a.InconsistentAmazonS3Client.*;
 import static org.apache.hadoop.fs.s3a.S3ATestConstants.*;
 import static org.apache.hadoop.fs.s3a.Constants.*;
 import static org.apache.hadoop.fs.s3a.S3AUtils.propagateBucketOptions;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
 import static org.junit.Assert.*;
 
 /**
  * Utilities for the S3A tests.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 public final class S3ATestUtils {
   private static final Logger LOG = LoggerFactory.getLogger(
       S3ATestUtils.class);
@@ -456,6 +463,33 @@ public final class S3ATestUtils {
   }
 
   /**
+   * Variant of {@code LambdaTestUtils#intercept() which closes the Closeable
+   * returned by the invoked operation, and using its toString() value
+   * for exception messages.
+   * @param clazz class of exception; the raised exception must be this class
+   * <i>or a subclass</i>.
+   * @param contained string which must be in the {@code toString()} value
+   * of the exception
+   * @param eval expression to eval
+   * @param <T> return type of expression
+   * @param <E> exception class
+   * @return the caught exception if it was of the expected type and contents
+   */
+  public static <E extends Throwable, T extends Closeable> E interceptClosing(
+      Class<E> clazz,
+      String contained,
+      Callable<T> eval)
+      throws Exception {
+
+    return intercept(clazz, contained,
+        () -> {
+          try (Closeable c = eval.call()) {
+            return c.toString();
+          }
+        });
+  }
+
+  /**
    * Helper class to do diffs of metrics.
    */
   public static final class MetricDiff {
@@ -762,21 +796,23 @@ public final class S3ATestUtils {
   }
 
   /**
-   * List a directory.
+   * List a directory/directory tree.
    * @param fileSystem FS
    * @param path path
+   * @param recursive do a recursive listing?
+   * @return the number of files found.
    * @throws IOException failure.
    */
-  public static void lsR(FileSystem fileSystem, Path path, boolean recursive)
+  public static long lsR(FileSystem fileSystem, Path path, boolean recursive)
       throws Exception {
     if (path == null) {
       // surfaces when someone calls getParent() on something at the top
       // of the path
       LOG.info("Empty path");
-      return;
+      return 0;
     }
-    S3AUtils.applyLocatedFiles(fileSystem.listFiles(path, recursive),
-        (status) -> LOG.info("  {}", status));
+    return S3AUtils.applyLocatedFiles(fileSystem.listFiles(path, recursive),
+        (status) -> LOG.info("{}", status));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java
index a5be5de..d731ae7 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java
@@ -18,12 +18,6 @@
 
 package org.apache.hadoop.fs.s3a;
 
-import static org.apache.hadoop.fs.s3a.Constants.*;
-import static org.apache.hadoop.fs.s3a.S3ATestConstants.*;
-import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
-import static org.apache.hadoop.fs.s3a.S3AUtils.*;
-import static org.junit.Assert.*;
-
 import java.io.IOException;
 import java.net.URI;
 import java.util.Arrays;
@@ -33,13 +27,19 @@ import com.amazonaws.auth.AWSCredentials;
 import com.amazonaws.auth.AWSCredentialsProvider;
 import com.amazonaws.auth.EnvironmentVariableCredentialsProvider;
 import com.amazonaws.auth.InstanceProfileCredentialsProvider;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider;
 
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
+import static org.apache.hadoop.fs.s3a.Constants.*;
+import static org.apache.hadoop.fs.s3a.S3ATestConstants.*;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
+import static org.apache.hadoop.fs.s3a.S3AUtils.*;
+import static org.junit.Assert.*;
 
 /**
  * Unit tests for {@link Constants#AWS_CREDENTIALS_PROVIDER} logic.
@@ -248,10 +248,10 @@ public class TestS3AAWSCredentialsProvider {
       AWSCredentialsProvider provider = providers.get(i);
       assertNotNull(
           String.format("At position %d, expected class is %s, but found null.",
-          i, expectedClass), provider);
+              i, expectedClass), provider);
       assertTrue(
           String.format("At position %d, expected class is %s, but found %s.",
-          i, expectedClass, provider.getClass()),
+              i, expectedClass, provider.getClass()),
           expectedClass.isAssignableFrom(provider.getClass()));
     }
   }
@@ -269,7 +269,23 @@ public class TestS3AAWSCredentialsProvider {
     assertNotNull(provider2);
     assertInstanceOf(InstanceProfileCredentialsProvider.class, provider2);
     assertSame("Expected all usage of InstanceProfileCredentialsProvider to "
-        + "share a singleton instance, but found unique instances.",
+            + "share a singleton instance, but found unique instances.",
         provider1, provider2);
   }
+
+  /**
+   * This is here to check up on the S3ATestUtils probes themselves.
+   * @see S3ATestUtils#authenticationContains(Configuration, String).
+   */
+  @Test
+  public void testAuthenticationContainsProbes() {
+    Configuration conf = new Configuration(false);
+    assertFalse("found AssumedRoleCredentialProvider",
+        authenticationContains(conf, AssumedRoleCredentialProvider.NAME));
+
+    conf.set(AWS_CREDENTIALS_PROVIDER, AssumedRoleCredentialProvider.NAME);
+    assertTrue("didn't find AssumedRoleCredentialProvider",
+        authenticationContains(conf, AssumedRoleCredentialProvider.NAME));
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java
new file mode 100644
index 0000000..08171b0
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java
@@ -0,0 +1,789 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.auth;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.file.AccessDeniedException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import com.amazonaws.auth.AWSCredentials;
+import com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.s3a.AWSBadRequestException;
+import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
+import org.apache.hadoop.fs.s3a.MultipartUtils;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.fs.s3a.S3AUtils;
+import org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider;
+import org.apache.hadoop.fs.s3a.commit.CommitConstants;
+import org.apache.hadoop.fs.s3a.commit.CommitOperations;
+import org.apache.hadoop.fs.s3a.commit.files.PendingSet;
+import org.apache.hadoop.fs.s3a.commit.files.SinglePendingCommit;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertRenameOutcome;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
+import static org.apache.hadoop.fs.s3a.Constants.*;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
+import static org.apache.hadoop.fs.s3a.S3AUtils.*;
+import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.*;
+import static org.apache.hadoop.fs.s3a.auth.RoleModel.*;
+import static org.apache.hadoop.fs.s3a.auth.RolePolicies.*;
+import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.forbidden;
+import static org.apache.hadoop.test.LambdaTestUtils.*;
+
+/**
+ * Tests use of assumed roles.
+ * Only run if an assumed role is provided.
+ */
+@SuppressWarnings({"IOResourceOpenedButNotSafelyClosed", "ThrowableNotThrown"})
+public class ITestAssumeRole extends AbstractS3ATestBase {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ITestAssumeRole.class);
+
+  private static final Path ROOT = new Path("/");
+
+  /**
+   * A role FS; if non-null it is closed in teardown.
+   */
+  private S3AFileSystem roleFS;
+
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+    assumeRoleTests();
+  }
+
+  @Override
+  public void teardown() throws Exception {
+    S3AUtils.closeAll(LOG, roleFS);
+    super.teardown();
+  }
+
+  private void assumeRoleTests() {
+    assume("No ARN for role tests", !getAssumedRoleARN().isEmpty());
+  }
+
+  private String getAssumedRoleARN() {
+    return getContract().getConf().getTrimmed(ASSUMED_ROLE_ARN, "");
+  }
+
+  /**
+   * Expect a filesystem to fail to instantiate.
+   * @param conf config to use
+   * @param clazz class of exception to expect
+   * @param text text in exception
+   * @param <E> type of exception as inferred from clazz
+   * @throws Exception if the exception was the wrong class
+   */
+  private <E extends Throwable> void expectFileSystemCreateFailure(
+      Configuration conf,
+      Class<E> clazz,
+      String text) throws Exception {
+    interceptClosing(clazz,
+        text,
+        () -> new Path(getFileSystem().getUri()).getFileSystem(conf));
+  }
+
+  @Test
+  public void testCreateCredentialProvider() throws IOException {
+    describe("Create the credential provider");
+
+    String roleARN = getAssumedRoleARN();
+
+    Configuration conf = new Configuration(getContract().getConf());
+    conf.set(AWS_CREDENTIALS_PROVIDER, AssumedRoleCredentialProvider.NAME);
+    conf.set(ASSUMED_ROLE_ARN, roleARN);
+    conf.set(ASSUMED_ROLE_SESSION_NAME, "valid");
+    conf.set(ASSUMED_ROLE_SESSION_DURATION, "45m");
+    bindRolePolicy(conf, RESTRICTED_POLICY);
+    try (AssumedRoleCredentialProvider provider
+             = new AssumedRoleCredentialProvider(conf)) {
+      LOG.info("Provider is {}", provider);
+      AWSCredentials credentials = provider.getCredentials();
+      assertNotNull("Null credentials from " + provider, credentials);
+    }
+  }
+
+  @Test
+  public void testAssumedInvalidRole() throws Throwable {
+    Configuration conf = new Configuration();
+    conf.set(ASSUMED_ROLE_ARN, ROLE_ARN_EXAMPLE);
+    interceptClosing(AWSSecurityTokenServiceException.class,
+        E_BAD_ROLE,
+        () -> new AssumedRoleCredentialProvider(conf));
+  }
+
+  @Test
+  public void testAssumeRoleFSBadARN() throws Exception {
+    describe("Attemnpt to create the FS with an invalid ARN");
+    Configuration conf = createAssumedRoleConfig();
+    conf.set(ASSUMED_ROLE_ARN, ROLE_ARN_EXAMPLE);
+    expectFileSystemCreateFailure(conf, AccessDeniedException.class,
+        E_BAD_ROLE);
+  }
+
+  @Test
+  public void testAssumeRoleNoARN() throws Exception {
+    describe("Attemnpt to create the FS with no ARN");
+    Configuration conf = createAssumedRoleConfig();
+    conf.unset(ASSUMED_ROLE_ARN);
+    expectFileSystemCreateFailure(conf,
+        IOException.class,
+        AssumedRoleCredentialProvider.E_NO_ROLE);
+  }
+
+  @Test
+  public void testAssumeRoleFSBadPolicy() throws Exception {
+    describe("Attemnpt to create the FS with malformed JSON");
+    Configuration conf = createAssumedRoleConfig();
+    // add some malformed JSON
+    conf.set(ASSUMED_ROLE_POLICY,  "}");
+    expectFileSystemCreateFailure(conf,
+        AWSBadRequestException.class,
+        "JSON");
+  }
+
+  @Test
+  public void testAssumeRoleFSBadPolicy2() throws Exception {
+    describe("Attempt to create the FS with valid but non-compliant JSON");
+    Configuration conf = createAssumedRoleConfig();
+    // add some invalid JSON
+    conf.set(ASSUMED_ROLE_POLICY, "{'json':'but not what AWS wants}");
+    expectFileSystemCreateFailure(conf,
+        AWSBadRequestException.class,
+        "Syntax errors in policy");
+  }
+
+  @Test
+  public void testAssumeRoleCannotAuthAssumedRole() throws Exception {
+    describe("Assert that you can't use assumed roles to auth assumed roles");
+
+    Configuration conf = createAssumedRoleConfig();
+    conf.set(ASSUMED_ROLE_CREDENTIALS_PROVIDER,
+        AssumedRoleCredentialProvider.NAME);
+    expectFileSystemCreateFailure(conf,
+        IOException.class,
+        AssumedRoleCredentialProvider.E_FORBIDDEN_PROVIDER);
+  }
+
+  @Test
+  public void testAssumeRoleBadInnerAuth() throws Exception {
+    describe("Try to authenticate with a keypair with spaces");
+
+    Configuration conf = createAssumedRoleConfig();
+    conf.set(ASSUMED_ROLE_CREDENTIALS_PROVIDER,
+        SimpleAWSCredentialsProvider.NAME);
+    conf.set(ACCESS_KEY, "not valid");
+    conf.set(SECRET_KEY, "not secret");
+    expectFileSystemCreateFailure(conf,
+        AWSBadRequestException.class,
+        "not a valid " +
+        "key=value pair (missing equal-sign) in Authorization header");
+  }
+
+  @Test
+  public void testAssumeRoleBadInnerAuth2() throws Exception {
+    describe("Try to authenticate with an invalid keypair");
+
+    Configuration conf = createAssumedRoleConfig();
+    conf.set(ASSUMED_ROLE_CREDENTIALS_PROVIDER,
+        SimpleAWSCredentialsProvider.NAME);
+    conf.set(ACCESS_KEY, "notvalid");
+    conf.set(SECRET_KEY, "notsecret");
+    expectFileSystemCreateFailure(conf,
+        AccessDeniedException.class,
+        "The security token included in the request is invalid");
+  }
+
+  @Test
+  public void testAssumeRoleBadSession() throws Exception {
+    describe("Try to authenticate with an invalid session");
+
+    Configuration conf = createAssumedRoleConfig();
+    conf.set(ASSUMED_ROLE_SESSION_NAME,
+        "Session names cannot hava spaces!");
+    expectFileSystemCreateFailure(conf,
+        AWSBadRequestException.class,
+        "Member must satisfy regular expression pattern");
+  }
+
+
+  /**
+   * Create the assumed role configuration.
+   * @return a config bonded to the ARN of the assumed role
+   */
+  public Configuration createAssumedRoleConfig() {
+    return createAssumedRoleConfig(getAssumedRoleARN());
+  }
+
+  /**
+   * Create a config for an assumed role; it also disables FS caching.
+   * @param roleARN ARN of role
+   * @return the new configuration
+   */
+  private Configuration createAssumedRoleConfig(String roleARN) {
+    return newAssumedRoleConfig(getContract().getConf(), roleARN);
+  }
+
+  @Test
+  public void testAssumeRoleUndefined() throws Throwable {
+    describe("Verify that you cannot instantiate the"
+        + " AssumedRoleCredentialProvider without a role ARN");
+    Configuration conf = new Configuration();
+    conf.set(ASSUMED_ROLE_ARN, "");
+    interceptClosing(IOException.class,
+        AssumedRoleCredentialProvider.E_NO_ROLE,
+        () -> new AssumedRoleCredentialProvider(conf));
+  }
+
+  @Test
+  public void testAssumedIllegalDuration() throws Throwable {
+    describe("Expect the constructor to fail if the session is to short");
+    Configuration conf = new Configuration();
+    conf.set(ASSUMED_ROLE_SESSION_DURATION, "30s");
+    interceptClosing(IllegalArgumentException.class, "",
+        () -> new AssumedRoleCredentialProvider(conf));
+  }
+
+
+  @Test
+  public void testAssumeRoleCreateFS() throws IOException {
+    describe("Create an FS client with the role and do some basic IO");
+
+    String roleARN = getAssumedRoleARN();
+    Configuration conf = createAssumedRoleConfig(roleARN);
+    Path path = new Path(getFileSystem().getUri());
+    LOG.info("Creating test FS and user {} with assumed role {}",
+        conf.get(ACCESS_KEY), roleARN);
+
+    try (FileSystem fs = path.getFileSystem(conf)) {
+      fs.getFileStatus(new Path("/"));
+      fs.mkdirs(path("testAssumeRoleFS"));
+    }
+  }
+
+  @Test
+  public void testAssumeRoleRestrictedPolicyFS() throws Exception {
+    describe("Restrict the policy for this session; verify that reads fail");
+
+    Configuration conf = createAssumedRoleConfig();
+    bindRolePolicy(conf, RESTRICTED_POLICY);
+    Path path = new Path(getFileSystem().getUri());
+    try (FileSystem fs = path.getFileSystem(conf)) {
+      forbidden("getFileStatus",
+          () -> fs.getFileStatus(new Path("/")));
+      forbidden("getFileStatus",
+          () -> fs.listStatus(new Path("/")));
+      forbidden("getFileStatus",
+          () -> fs.mkdirs(path("testAssumeRoleFS")));
+    }
+  }
+
+  /**
+   * Tighten the extra policy on the assumed role call for torrent access,
+   * and verify that it blocks all other operations.
+   * That is: any non empty policy in the assumeRole API call overrides
+   * all of the policies attached to the role before.
+   * switches the role instance to only those policies in the
+   */
+  @Test
+  public void testAssumeRolePoliciesOverrideRolePerms() throws Throwable {
+
+    describe("extra policies in assumed roles need;"
+        + " all required policies stated");
+    Configuration conf = createAssumedRoleConfig();
+
+    bindRolePolicy(conf,
+        policy(statement(false, S3_ALL_BUCKETS, S3_GET_OBJECT_TORRENT)));
+    Path path = path("testAssumeRoleStillIncludesRolePerms");
+    roleFS = (S3AFileSystem) path.getFileSystem(conf);
+    assertTouchForbidden(roleFS, path);
+  }
+
+  /**
+   * After blocking all write verbs used by S3A, try to write data (fail)
+   * and read data (succeed).
+   */
+  @Test
+  public void testReadOnlyOperations() throws Throwable {
+
+    describe("Restrict role to read only");
+    Configuration conf = createAssumedRoleConfig();
+
+    bindRolePolicy(conf,
+        policy(
+            statement(false, S3_ALL_BUCKETS, S3_PATH_WRITE_OPERATIONS),
+            STATEMENT_ALL_S3, STATEMENT_ALL_DDB));
+    Path path = methodPath();
+    roleFS = (S3AFileSystem) path.getFileSystem(conf);
+    // list the root path, expect happy
+    roleFS.listStatus(ROOT);
+
+    // touch will fail
+    assertTouchForbidden(roleFS, path);
+    // you can delete it, because it's not there and getFileStatus() is allowed
+    roleFS.delete(path, true);
+
+    //create it with the full FS
+    getFileSystem().mkdirs(path);
+
+    // and delete will not
+    assertDeleteForbidden(this.roleFS, path);
+
+    // list multipart uploads.
+    // This is part of the read policy.
+    int counter = 0;
+    MultipartUtils.UploadIterator iterator = roleFS.listUploads("/");
+    while (iterator.hasNext()) {
+      counter++;
+      iterator.next();
+    }
+    LOG.info("Found {} outstanding MPUs", counter);
+  }
+
+  /**
+   * Write successfully to the directory with full R/W access,
+   * fail to write or delete data elsewhere.
+   */
+  @SuppressWarnings("StringConcatenationMissingWhitespace")
+  @Test
+  public void testRestrictedWriteSubdir() throws Throwable {
+
+    describe("Attempt writing to paths where a role only has"
+        + " write access to a subdir of the bucket");
+    Path restrictedDir = methodPath();
+    Path child = new Path(restrictedDir, "child");
+    // the full FS
+    S3AFileSystem fs = getFileSystem();
+    fs.delete(restrictedDir, true);
+
+    Configuration conf = createAssumedRoleConfig();
+
+    bindRolePolicyStatements(conf,
+        STATEMENT_ALL_DDB,
+        statement(true, S3_ALL_BUCKETS, S3_ROOT_READ_OPERATIONS),
+        new Statement(Effects.Allow)
+          .addActions(S3_ALL_OPERATIONS)
+          .addResources(directory(restrictedDir)));
+    roleFS = (S3AFileSystem) restrictedDir.getFileSystem(conf);
+
+    roleFS.getFileStatus(ROOT);
+    roleFS.mkdirs(restrictedDir);
+    assertIsDirectory(restrictedDir);
+    // you can create an adjacent child
+    touch(roleFS, child);
+    assertIsFile(child);
+    // child delete rights
+    ContractTestUtils.assertDeleted(roleFS, child, true);
+    // parent delete rights
+    ContractTestUtils.assertDeleted(roleFS, restrictedDir, true);
+    // delete will try to create an empty parent directory marker, and may fail
+    roleFS.delete(restrictedDir, false);
+    // this sibling path has the same prefix as restrictedDir, but is
+    // adjacent. This verifies that a restrictedDir* pattern isn't matching
+    // siblings, so granting broader rights
+    Path sibling = new Path(restrictedDir.toUri() + "sibling");
+    touch(fs, sibling);
+    assertTouchForbidden(roleFS, sibling);
+    assertDeleteForbidden(roleFS, sibling);
+  }
+
+  public Path methodPath() throws IOException {
+    return path(getMethodName());
+  }
+
+  @Test
+  public void testRestrictedRename() throws Throwable {
+    describe("rename with parent paths not writeable");
+    executeRestrictedRename(createAssumedRoleConfig());
+  }
+
+  @Test
+  public void testRestrictedSingleDeleteRename() throws Throwable {
+    describe("rename with parent paths not writeable"
+        + " and multi-object delete disabled");
+    Configuration conf = createAssumedRoleConfig();
+    conf.setBoolean(ENABLE_MULTI_DELETE, false);
+    executeRestrictedRename(conf);
+  }
+
+  /**
+   * Execute a sequence of rename operations.
+   * @param conf FS configuration
+   */
+  public void executeRestrictedRename(final Configuration conf)
+      throws IOException {
+    Path basePath = methodPath();
+    Path restrictedDir = new Path(basePath, "renameSrc");
+    Path destPath = new Path(basePath, "renameDest");
+    Path child = new Path(restrictedDir, "child");
+    // the full FS
+    S3AFileSystem fs = getFileSystem();
+    fs.delete(basePath, true);
+
+    bindRolePolicyStatements(conf,
+        STATEMENT_ALL_DDB,
+        statement(true, S3_ALL_BUCKETS, S3_ROOT_READ_OPERATIONS),
+        new Statement(Effects.Allow)
+          .addActions(S3_PATH_RW_OPERATIONS)
+          .addResources(directory(restrictedDir))
+          .addResources(directory(destPath))
+    );
+    roleFS = (S3AFileSystem) restrictedDir.getFileSystem(conf);
+
+    roleFS.getFileStatus(ROOT);
+    roleFS.mkdirs(restrictedDir);
+    // you can create an adjacent child
+    touch(roleFS, child);
+
+    roleFS.delete(destPath, true);
+    // as dest doesn't exist, this will map child -> dest
+    assertRenameOutcome(roleFS, child, destPath, true);
+
+    assertIsFile(destPath);
+    assertIsDirectory(restrictedDir);
+    Path renamedDestPath = new Path(restrictedDir, destPath.getName());
+    assertRenameOutcome(roleFS, destPath, restrictedDir, true);
+    assertIsFile(renamedDestPath);
+    roleFS.delete(restrictedDir, true);
+    roleFS.delete(destPath, true);
+  }
+
+  @Test
+  public void testRestrictedRenameReadOnlyData() throws Throwable {
+    describe("rename with source read only, multidelete");
+    executeRenameReadOnlyData(createAssumedRoleConfig());
+  }
+
+  @Test
+  public void testRestrictedRenameReadOnlySingleDelete() throws Throwable {
+    describe("rename with source read only single delete");
+    Configuration conf = createAssumedRoleConfig();
+    conf.setBoolean(ENABLE_MULTI_DELETE, false);
+    executeRenameReadOnlyData(conf);
+  }
+
+  /**
+   * Execute a sequence of rename operations where the source
+   * data is read only to the client calling rename().
+   * This will cause the inner delete() operations to fail, whose outcomes
+   * are explored.
+   * Multiple files are created (in parallel) for some renames, so exploring
+   * the outcome on bulk delete calls, including verifying that a
+   * MultiObjectDeleteException is translated to an AccessDeniedException.
+   * <ol>
+   *   <li>The exception raised is AccessDeniedException,
+   *   from single and multi DELETE calls.</li>
+   *   <li>It happens after the COPY. Not ideal, but, well, we can't pretend
+   *   it's a filesystem forever.</li>
+   * </ol>
+   * @param conf FS configuration
+   */
+  public void executeRenameReadOnlyData(final Configuration conf)
+      throws Exception {
+    assume("Does not work with S3Guard", !getFileSystem().hasMetadataStore());
+    Path basePath = methodPath();
+    Path destDir = new Path(basePath, "renameDest");
+    Path readOnlyDir = new Path(basePath, "readonlyDir");
+    Path readOnlyFile = new Path(readOnlyDir, "readonlyChild");
+
+    // the full FS
+    S3AFileSystem fs = getFileSystem();
+    fs.delete(basePath, true);
+
+    // this file is readable by the roleFS, but cannot be deleted
+    touch(fs, readOnlyFile);
+
+    bindRolePolicyStatements(conf,
+        STATEMENT_ALL_DDB,
+        statement(true, S3_ALL_BUCKETS, S3_ROOT_READ_OPERATIONS),
+          new Statement(Effects.Allow)
+            .addActions(S3_PATH_RW_OPERATIONS)
+            .addResources(directory(destDir))
+    );
+    roleFS = (S3AFileSystem) destDir.getFileSystem(conf);
+
+    roleFS.delete(destDir, true);
+    roleFS.mkdirs(destDir);
+    // rename will fail in the delete phase
+    forbidden(readOnlyFile.toString(),
+        () -> roleFS.rename(readOnlyFile, destDir));
+
+    // and the source file is still there
+    assertIsFile(readOnlyFile);
+
+    // but so is the copied version, because there's no attempt
+    // at rollback, or preflight checking on the delete permissions
+    Path renamedFile = new Path(destDir, readOnlyFile.getName());
+
+    assertIsFile(renamedFile);
+
+    ContractTestUtils.assertDeleted(roleFS, renamedFile, true);
+    assertFileCount("Empty Dest Dir", roleFS,
+        destDir, 0);
+    // create a set of files
+    // this is done in parallel as it is 10x faster on a long-haul test run.
+    int range = 10;
+    touchFiles(fs, readOnlyDir, range);
+    // don't forget about that original file!
+    final long createdFiles = range + 1;
+    // are they all there?
+    assertFileCount("files ready to rename", roleFS,
+        readOnlyDir, createdFiles);
+
+    // try to rename the directory
+    LOG.info("Renaming readonly files {} to {}", readOnlyDir, destDir);
+    AccessDeniedException ex = forbidden("",
+        () -> roleFS.rename(readOnlyDir, destDir));
+    LOG.info("Result of renaming read-only files is AccessDeniedException", ex);
+    assertFileCount("files copied to the destination", roleFS,
+        destDir, createdFiles);
+    assertFileCount("files in the source directory", roleFS,
+        readOnlyDir, createdFiles);
+
+    // and finally (so as to avoid the delay of POSTing some more objects,
+    // delete that r/o source
+    forbidden("", () -> roleFS.delete(readOnlyDir, true));
+  }
+
+  /**
+   * Parallel-touch a set of files in the destination directory.
+   * @param fs filesystem
+   * @param destDir destination
+   * @param range range 1..range inclusive of files to create.
+   */
+  public void touchFiles(final S3AFileSystem fs,
+      final Path destDir,
+      final int range) {
+    IntStream.rangeClosed(1, range).parallel().forEach(
+        (i) -> eval(() -> touch(fs, new Path(destDir, "file-" + i))));
+  }
+
+  @Test
+  public void testRestrictedCommitActions() throws Throwable {
+    describe("Attempt commit operations against a path with restricted rights");
+    Configuration conf = createAssumedRoleConfig();
+    conf.setBoolean(CommitConstants.MAGIC_COMMITTER_ENABLED, true);
+    final int uploadPartSize = 5 * 1024 * 1024;
+
+    Path basePath = methodPath();
+    Path readOnlyDir = new Path(basePath, "readOnlyDir");
+    Path writeableDir = new Path(basePath, "writeableDir");
+    // the full FS
+    S3AFileSystem fs = getFileSystem();
+    fs.delete(basePath, true);
+    fs.mkdirs(readOnlyDir);
+
+    bindRolePolicyStatements(conf,
+        STATEMENT_ALL_DDB,
+        statement(true, S3_ALL_BUCKETS, S3_ROOT_READ_OPERATIONS),
+        new Statement(Effects.Allow)
+            .addActions(S3_PATH_RW_OPERATIONS)
+            .addResources(directory(writeableDir))
+    );
+    roleFS = (S3AFileSystem) writeableDir.getFileSystem(conf);
+    CommitOperations fullOperations = new CommitOperations(fs);
+    CommitOperations operations = new CommitOperations(roleFS);
+
+    File localSrc = File.createTempFile("source", "");
+    writeCSVData(localSrc);
+    Path uploadDest = new Path(readOnlyDir, "restricted.csv");
+
+    forbidden("initiate MultiPartUpload",
+        () -> {
+          return operations.uploadFileToPendingCommit(localSrc,
+              uploadDest, "", uploadPartSize);
+        });
+    // delete the file
+    localSrc.delete();
+    // create a directory there
+    localSrc.mkdirs();
+
+    // create some local files and upload them with permissions
+
+    int range = 2;
+    IntStream.rangeClosed(1, range)
+        .parallel()
+        .forEach((i) -> eval(() -> {
+          String name = "part-000" + i;
+          File src = new File(localSrc, name);
+          Path dest = new Path(readOnlyDir, name);
+          writeCSVData(src);
+          SinglePendingCommit pending =
+              fullOperations.uploadFileToPendingCommit(src, dest, "",
+                  uploadPartSize);
+          pending.save(fs, new Path(readOnlyDir,
+              name + CommitConstants.PENDING_SUFFIX), true);
+          assertTrue(src.delete());
+        }));
+
+    try {
+      // we expect to be able to list all the files here
+      Pair<PendingSet, List<Pair<LocatedFileStatus, IOException>>>
+          pendingCommits = operations.loadSinglePendingCommits(readOnlyDir,
+          true);
+
+      // all those commits must fail
+      List<SinglePendingCommit> commits = pendingCommits.getLeft().getCommits();
+      assertEquals(range, commits.size());
+      commits.parallelStream().forEach(
+          (c) -> {
+            CommitOperations.MaybeIOE maybeIOE = operations.commit(c, "origin");
+            Path path = c.destinationPath();
+            assertCommitAccessDenied(path, maybeIOE);
+          });
+
+      // fail of all list and abort of .pending files.
+      LOG.info("abortAllSinglePendingCommits({})", readOnlyDir);
+      assertCommitAccessDenied(readOnlyDir,
+          operations.abortAllSinglePendingCommits(readOnlyDir, true));
+
+      // try writing a magic file
+      Path magicDestPath = new Path(readOnlyDir,
+          CommitConstants.MAGIC + "/" + "magic.txt");
+      forbidden("", () -> {
+        touch(roleFS, magicDestPath);
+        // shouldn't get here; if we do: return the existence of the 0-byte
+        // dest file.
+        return fs.getFileStatus(magicDestPath);
+      });
+
+      // a recursive list and abort is blocked.
+      forbidden("",
+          () -> operations.abortPendingUploadsUnderPath(readOnlyDir));
+    } finally {
+      LOG.info("Cleanup");
+      fullOperations.abortPendingUploadsUnderPath(readOnlyDir);
+    }
+  }
+
+  /**
+   * Verifies that an operation returning a "MaybeIOE" failed
+   * with an AccessDeniedException in the maybe instance.
+   * @param path path operated on
+   * @param maybeIOE result to inspect
+   */
+  public void assertCommitAccessDenied(final Path path,
+      final CommitOperations.MaybeIOE maybeIOE) {
+    IOException ex = maybeIOE.getException();
+    assertNotNull("no IOE in " + maybeIOE + " for " + path, ex);
+    if (!(ex instanceof AccessDeniedException)) {
+      ContractTestUtils.fail("Wrong exception class for commit to "
+          + path, ex);
+    }
+  }
+
+  /**
+   * Write some CSV data to a local file.
+   * @param localSrc local file
+   * @throws IOException failure
+   */
+  public void writeCSVData(final File localSrc) throws IOException {
+    try(FileOutputStream fo = new FileOutputStream(localSrc)) {
+      fo.write("1, true".getBytes());
+    }
+  }
+
+  @Test
+  public void testPartialDelete() throws Throwable {
+    describe("delete with part of the child tree read only; multidelete");
+    executePartialDelete(createAssumedRoleConfig());
+  }
+
+  @Test
+  public void testPartialDeleteSingleDelete() throws Throwable {
+    describe("delete with part of the child tree read only");
+    Configuration conf = createAssumedRoleConfig();
+    conf.setBoolean(ENABLE_MULTI_DELETE, false);
+    executePartialDelete(conf);
+  }
+
+  /**
+   * Have a directory with full R/W permissions, but then remove
+   * write access underneath, and try to delete it.
+   * @param conf FS configuration
+   */
+  public void executePartialDelete(final Configuration conf)
+      throws Exception {
+    Path destDir = methodPath();
+    Path readOnlyDir = new Path(destDir, "readonlyDir");
+
+    // the full FS
+    S3AFileSystem fs = getFileSystem();
+    fs.delete(destDir, true);
+
+    bindRolePolicyStatements(conf,
+        STATEMENT_ALL_DDB,
+        statement(true, S3_ALL_BUCKETS, S3_ALL_OPERATIONS),
+        new Statement(Effects.Deny)
+            .addActions(S3_PATH_WRITE_OPERATIONS)
+            .addResources(directory(readOnlyDir))
+    );
+    roleFS = (S3AFileSystem) destDir.getFileSystem(conf);
+
+    int range = 10;
+    touchFiles(fs, readOnlyDir, range);
+    touchFiles(roleFS, destDir, range);
+    forbidden("", () -> roleFS.delete(readOnlyDir, true));
+    forbidden("", () -> roleFS.delete(destDir, true));
+
+    // and although you can't delete under the path, if the file doesn't
+    // exist, the delete call fails fast.
+    Path pathWhichDoesntExist = new Path(readOnlyDir, "no-such-path");
+    assertFalse("deleting " + pathWhichDoesntExist,
+        roleFS.delete(pathWhichDoesntExist, true));
+  }
+
+  /**
+   * Assert that the number of files in a destination matches that expected.
+   * @param text text to use in the message
+   * @param fs filesystem
+   * @param path path to list (recursively)
+   * @param expected expected count
+   * @throws IOException IO problem
+   */
+  private static void assertFileCount(String text, FileSystem fs,
+      Path path, long expected)
+      throws IOException {
+    List<String> files = new ArrayList<>();
+    applyLocatedFiles(fs.listFiles(path, true),
+        (status) -> files.add(status.getPath().toString()));
+    long actual = files.size();
+    if (actual != expected) {
+      String ls = files.stream().collect(Collectors.joining("\n"));
+      fail(text + ": expected " + expected + " files in " + path
+          + " but got " + actual + "\n" + ls);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java
new file mode 100644
index 0000000..bb66268
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.auth;
+
+import java.io.IOException;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.fs.s3a.S3AUtils;
+import org.apache.hadoop.fs.s3a.commit.ITestCommitOperations;
+
+import static org.apache.hadoop.fs.s3a.Constants.ASSUMED_ROLE_ARN;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.assume;
+import static org.apache.hadoop.fs.s3a.auth.RoleModel.*;
+import static org.apache.hadoop.fs.s3a.auth.RolePolicies.*;
+import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.*;
+
+/**
+ * Verify that the commit operations work with a restricted set of operations.
+ * The superclass, {@link ITestCommitOperations} turns on an inconsistent client
+ * to see how things work in the presence of inconsistency.
+ * These tests disable it, to remove that as a factor in these tests, which are
+ * verifying that the policy settings to enabled MPU list/commit/abort are all
+ * enabled properly.
+ */
+public class ITestAssumedRoleCommitOperations extends ITestCommitOperations {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ITestAssumedRoleCommitOperations.class);
+
+  /**
+   * The restricted directory.
+   */
+  private Path restrictedDir;
+
+  /**
+   * A role FS; if non-null it is closed in teardown.
+   */
+  private S3AFileSystem roleFS;
+
+  @Override
+  public boolean useInconsistentClient() {
+    return false;
+  }
+
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+    assumeRoleTests();
+
+    restrictedDir = super.path("restricted");
+    Configuration conf = newAssumedRoleConfig(getConfiguration(),
+        getAssumedRoleARN());
+    bindRolePolicyStatements(conf,
+        STATEMENT_ALL_DDB,
+        statement(true, S3_ALL_BUCKETS, S3_ROOT_READ_OPERATIONS),
+        new RoleModel.Statement(RoleModel.Effects.Allow)
+            .addActions(S3_PATH_RW_OPERATIONS)
+            .addResources(directory(restrictedDir))
+    );
+    roleFS = (S3AFileSystem) restrictedDir.getFileSystem(conf);
+  }
+
+
+  @Override
+  public void teardown() throws Exception {
+    S3AUtils.closeAll(LOG, roleFS);
+    // switches getFileSystem() back to the full FS.
+    roleFS = null;
+    super.teardown();
+  }
+
+  private void assumeRoleTests() {
+    assume("No ARN for role tests", !getAssumedRoleARN().isEmpty());
+  }
+
+  /**
+   * The overridden operation returns the roleFS, so that test cases
+   * in the superclass run under restricted rights.
+   * There's special handling in startup to avoid NPEs
+   * @return {@link #roleFS}
+   */
+  @Override
+  public S3AFileSystem getFileSystem() {
+    return roleFS != null ? roleFS : getFullFileSystem();
+  }
+
+  /**
+   * Get the FS with full access rights.
+   * @return the FS created by the superclass.
+   */
+  public S3AFileSystem getFullFileSystem() {
+    return super.getFileSystem();
+  }
+
+  /**
+   * switch to an inconsistent path if in inconsistent mode.
+   * {@inheritDoc}
+   */
+  @Override
+  protected Path path(String filepath) throws IOException {
+    return new Path(restrictedDir, filepath);
+  }
+
+
+  private String getAssumedRoleARN() {
+    return getContract().getConf().getTrimmed(ASSUMED_ROLE_ARN, "");
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/RoleTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/RoleTestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/RoleTestUtils.java
new file mode 100644
index 0000000..9fa2600
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/RoleTestUtils.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.auth;
+
+import java.nio.file.AccessDeniedException;
+import java.util.concurrent.Callable;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
+import static org.apache.hadoop.fs.s3a.Constants.*;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.disableFilesystemCaching;
+import static org.apache.hadoop.fs.s3a.auth.RoleModel.*;
+import static org.apache.hadoop.fs.s3a.auth.RolePolicies.*;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+
+/**
+ * Helper class for testing roles.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class RoleTestUtils {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(RoleTestUtils.class);
+
+  private static final RoleModel MODEL = new RoleModel();
+
+
+  /** Example ARN of a role. */
+  public static final String ROLE_ARN_EXAMPLE
+      = "arn:aws:iam::9878543210123:role/role-s3-restricted";
+
+
+  /** Deny GET requests to all buckets. */
+  public static final Statement DENY_GET_ALL =
+      statement(false, S3_ALL_BUCKETS, S3_GET_OBJECT);
+
+  /**
+   * This is AWS policy removes read access.
+   */
+  public static final Policy RESTRICTED_POLICY = policy(DENY_GET_ALL);
+
+
+  /**
+   * Error message to get from the AWS SDK if you can't assume the role.
+   */
+  public static final String E_BAD_ROLE
+      = "Not authorized to perform sts:AssumeRole";
+
+  private RoleTestUtils() {
+  }
+
+  /**
+   * Bind the configuration's {@code ASSUMED_ROLE_POLICY} option to
+   * the given policy.
+   * @param conf configuration to patch
+   * @param policy policy to apply
+   * @return the modified configuration
+   * @throws JsonProcessingException JSON marshalling error
+   */
+  public static Configuration bindRolePolicy(final Configuration conf,
+      final Policy policy) throws JsonProcessingException {
+    String p = MODEL.toJson(policy);
+    LOG.info("Setting role policy to policy of size {}:\n{}", p.length(), p);
+    conf.set(ASSUMED_ROLE_POLICY, p);
+    return conf;
+  }
+
+  /**
+   * Wrap a set of statements with a policy and bind the configuration's
+   * {@code ASSUMED_ROLE_POLICY} option to it.
+   * @param conf configuration to patch
+   * @param statements statements to aggregate
+   * @return the modified configuration
+   * @throws JsonProcessingException JSON marshalling error
+   */
+  public static Configuration bindRolePolicyStatements(
+      final Configuration conf,
+      final Statement... statements) throws JsonProcessingException {
+    return bindRolePolicy(conf, policy(statements));
+  }
+
+
+  /**
+   * Try to delete a file, verify that it is not allowed.
+   * @param fs filesystem
+   * @param path path
+   */
+  public static void assertDeleteForbidden(final FileSystem fs, final Path path)
+      throws Exception {
+    intercept(AccessDeniedException.class, "",
+        () -> fs.delete(path, true));
+  }
+
+  /**
+   * Try to touch a file, verify that it is not allowed.
+   * @param fs filesystem
+   * @param path path
+   */
+  public static void assertTouchForbidden(final FileSystem fs, final Path path)
+      throws Exception {
+    intercept(AccessDeniedException.class, "",
+        "Caller could create file at " + path,
+        () -> {
+          touch(fs, path);
+          return fs.getFileStatus(path);
+        });
+  }
+
+  /**
+   * Create a config for an assumed role; it also disables FS caching.
+   * @param srcConf source config: this is not modified
+   * @param roleARN ARN of role
+   * @return the new configuration
+   */
+  public static Configuration newAssumedRoleConfig(
+      final Configuration srcConf,
+      final String roleARN) {
+    Configuration conf = new Configuration(srcConf);
+    conf.set(AWS_CREDENTIALS_PROVIDER, AssumedRoleCredentialProvider.NAME);
+    conf.set(ASSUMED_ROLE_ARN, roleARN);
+    conf.set(ASSUMED_ROLE_SESSION_NAME, "valid");
+    conf.set(ASSUMED_ROLE_SESSION_DURATION, "15m");
+    disableFilesystemCaching(conf);
+    return conf;
+  }
+
+  /**
+   * Assert that an operation is forbidden.
+   * @param contained contained text, may be null
+   * @param eval closure to evaluate
+   * @param <T> type of closure
+   * @return the access denied exception
+   * @throws Exception any other exception
+   */
+  public static <T> AccessDeniedException forbidden(
+      String contained,
+      Callable<T> eval)
+      throws Exception {
+    AccessDeniedException ex = intercept(AccessDeniedException.class, eval);
+    GenericTestUtils.assertExceptionContains(contained, ex);
+    return ex;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java
index 04676db..4730a90 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java
@@ -208,7 +208,9 @@ public abstract class AbstractCommitITest extends AbstractS3ATestBase {
    * @param p probability of a throttling occurring: 0-1.0
    */
   protected void setThrottling(float p) {
-    inconsistentClient.setThrottleProbability(p);
+    if (inconsistentClient != null) {
+      inconsistentClient.setThrottleProbability(p);
+    }
   }
 
   /**
@@ -217,7 +219,9 @@ public abstract class AbstractCommitITest extends AbstractS3ATestBase {
    * @param limit limit to number of calls which fail
    */
   protected void setThrottling(float p, int limit) {
-    inconsistentClient.setThrottleProbability(p);
+    if (inconsistentClient != null) {
+      inconsistentClient.setThrottleProbability(p);
+    }
     setFailureLimit(limit);
   }
 
@@ -235,7 +239,9 @@ public abstract class AbstractCommitITest extends AbstractS3ATestBase {
    * @param limit limit to number of calls which fail
    */
   private void setFailureLimit(int limit) {
-    inconsistentClient.setFailureLimit(limit);
+    if (inconsistentClient != null) {
+      inconsistentClient.setFailureLimit(limit);
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java
index 2a98382..2886a99 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java
@@ -528,7 +528,9 @@ public class ITestCommitOperations extends AbstractCommitITest {
   @Test
   public void testWriteNormalStream() throws Throwable {
     S3AFileSystem fs = getFileSystem();
-    Assume.assumeTrue(fs.hasCapability(STREAM_CAPABILITY_MAGIC_OUTPUT));
+    Assume.assumeTrue(
+        "Filesystem does not have magic support enabled: " + fs,
+        fs.hasCapability(STORE_CAPABILITY_MAGIC_COMMITTER));
 
     Path destFile = path("normal");
     try (FSDataOutputStream out = fs.create(destFile, true)) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: Revert "YARN-7813: Capacity Scheduler Intra-queue Preemption should be configurable for each queue"

Posted by ae...@apache.org.
Revert "YARN-7813: Capacity Scheduler Intra-queue Preemption should be configurable for each queue"

This reverts commit c5e6e3de1c31eda052f89eddd7bba288625936b9.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bddfe42e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bddfe42e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bddfe42e

Branch: refs/heads/HDFS-7240
Commit: bddfe42e2ccda2e6cc9d149461640ba36eca5922
Parents: 1f20f43
Author: Eric Payne <ep...@apache.org>
Authored: Wed Feb 14 14:29:20 2018 -0600
Committer: Eric Payne <ep...@apache.org>
Committed: Wed Feb 14 14:29:20 2018 -0600

----------------------------------------------------------------------
 .../hadoop/yarn/api/records/QueueInfo.java      | 35 -------
 .../src/main/proto/yarn_protos.proto            |  1 -
 .../apache/hadoop/yarn/client/cli/QueueCLI.java |  6 --
 .../hadoop/yarn/client/ProtocolHATestBase.java  |  3 +-
 .../hadoop/yarn/client/cli/TestYarnCLI.java     | 96 ++------------------
 .../api/records/impl/pb/QueueInfoPBImpl.java    | 13 ---
 .../hadoop/yarn/api/TestPBImplRecords.java      |  2 +-
 .../capacity/IntraQueueCandidatesSelector.java  |  4 +-
 .../scheduler/capacity/AbstractCSQueue.java     | 72 ++-------------
 .../scheduler/capacity/CSQueue.java             | 16 +---
 .../CapacitySchedulerConfiguration.java         | 15 ---
 .../webapp/CapacitySchedulerPage.java           |  5 +-
 .../dao/CapacitySchedulerLeafQueueInfo.java     |  6 --
 .../TestConfigurationMutationACLPolicies.java   |  2 +-
 .../TestSchedulerApplicationAttempt.java        |  2 +-
 .../scheduler/capacity/TestLeafQueue.java       |  2 +-
 .../src/site/markdown/CapacityScheduler.md      |  3 +-
 17 files changed, 26 insertions(+), 257 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe42e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
index 57ea9bf..897b442 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
@@ -94,26 +94,6 @@ public abstract class QueueInfo {
     return queueInfo;
   }
 
-  @Private
-  @Unstable
-  public static QueueInfo newInstance(String queueName, float capacity,
-      float maximumCapacity, float currentCapacity,
-      List<QueueInfo> childQueues, List<ApplicationReport> applications,
-      QueueState queueState, Set<String> accessibleNodeLabels,
-      String defaultNodeLabelExpression, QueueStatistics queueStatistics,
-      boolean preemptionDisabled,
-      Map<String, QueueConfigurations> queueConfigurations,
-      boolean intraQueuePreemptionDisabled) {
-    QueueInfo queueInfo = QueueInfo.newInstance(queueName, capacity,
-        maximumCapacity, currentCapacity,
-        childQueues, applications,
-        queueState, accessibleNodeLabels,
-        defaultNodeLabelExpression, queueStatistics,
-        preemptionDisabled, queueConfigurations);
-    queueInfo.setIntraQueuePreemptionDisabled(intraQueuePreemptionDisabled);
-    return queueInfo;
-  }
-
   /**
    * Get the <em>name</em> of the queue.
    * @return <em>name</em> of the queue
@@ -281,19 +261,4 @@ public abstract class QueueInfo {
   @Unstable
   public abstract void setQueueConfigurations(
       Map<String, QueueConfigurations> queueConfigurations);
-
-
-  /**
-   * Get the intra-queue preemption status of the queue.
-   * @return if property is not in proto, return null;
-   *        otherwise, return intra-queue preemption status of the queue
-   */
-  @Public
-  @Stable
-  public abstract Boolean getIntraQueuePreemptionDisabled();
-
-  @Private
-  @Unstable
-  public abstract void setIntraQueuePreemptionDisabled(
-      boolean intraQueuePreemptionDisabled);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe42e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index b978761..25c8569 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -569,7 +569,6 @@ message QueueInfoProto {
   optional QueueStatisticsProto queueStatistics = 10;
   optional bool preemptionDisabled = 11;
   repeated QueueConfigurationsMapProto queueConfigurationsMap = 12;
-  optional bool intraQueuePreemptionDisabled = 13;
 }
 
 message QueueConfigurationsProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe42e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java
index 2c3dfd0..330b081 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java
@@ -158,11 +158,5 @@ public class QueueCLI extends YarnCLI {
       writer.print("\tPreemption : ");
       writer.println(preemptStatus ? "disabled" : "enabled");
     }
-
-    Boolean intraQueuePreemption = queueInfo.getIntraQueuePreemptionDisabled();
-    if (intraQueuePreemption != null) {
-      writer.print("\tIntra-queue Preemption : ");
-      writer.println(intraQueuePreemption ? "disabled" : "enabled");
-    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe42e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
index 7937b15..54537ce 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
@@ -665,8 +665,7 @@ public abstract class ProtocolHATestBase extends ClientBaseWithFixes {
 
     public QueueInfo createFakeQueueInfo() {
       return QueueInfo.newInstance("root", 100f, 100f, 50f, null,
-          createFakeAppReports(), QueueState.RUNNING, null, null, null, false,
-          null, false);
+          createFakeAppReports(), QueueState.RUNNING, null, null, null, false);
     }
 
     public List<QueueUserACLInfo> createFakeQueueUserACLInfoList() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe42e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
index 1f6488d..fdd3fc8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
@@ -1712,8 +1712,7 @@ public class TestYarnCLI {
     nodeLabels.add("GPU");
     nodeLabels.add("JDK_7");
     QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f,
-        null, null, QueueState.RUNNING, nodeLabels, "GPU", null, false, null,
-        false);
+        null, null, QueueState.RUNNING, nodeLabels, "GPU", null, false, null);
     when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo);
     int result = cli.run(new String[] { "-status", "queueA" });
     assertEquals(0, result);
@@ -1729,83 +1728,12 @@ public class TestYarnCLI {
     pw.println("\tDefault Node Label expression : " + "GPU");
     pw.println("\tAccessible Node Labels : " + "JDK_7,GPU");
     pw.println("\tPreemption : " + "enabled");
-    pw.println("\tIntra-queue Preemption : " + "enabled");
     pw.close();
     String queueInfoStr = baos.toString("UTF-8");
     Assert.assertEquals(queueInfoStr, sysOutStream.toString());
   }
 
   @Test
-  public void testGetQueueInfoOverrideIntraQueuePreemption() throws Exception {
-    CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
-    ReservationSystemTestUtil.setupQueueConfiguration(conf);
-    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
-        ResourceScheduler.class);
-    conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
-    conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
-        "org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity."
-        + "ProportionalCapacityPreemptionPolicy");
-    // Turn on cluster-wide intra-queue preemption
-    conf.setBoolean(
-        CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ENABLED, true);
-    // Disable intra-queue preemption for all queues
-    conf.setBoolean(CapacitySchedulerConfiguration.PREFIX
-        + "root.intra-queue-preemption.disable_preemption", true);
-    // Enable intra-queue preemption for the a1 queue
-    conf.setBoolean(CapacitySchedulerConfiguration.PREFIX
-        + "root.a.a1.intra-queue-preemption.disable_preemption", false);
-    MiniYARNCluster cluster =
-        new MiniYARNCluster("testGetQueueInfoOverrideIntraQueuePreemption",
-            2, 1, 1);
-
-    YarnClient yarnClient = null;
-    try {
-      cluster.init(conf);
-      cluster.start();
-      final Configuration yarnConf = cluster.getConfig();
-      yarnClient = YarnClient.createYarnClient();
-      yarnClient.init(yarnConf);
-      yarnClient.start();
-
-      QueueCLI cli = new QueueCLI();
-      cli.setClient(yarnClient);
-      cli.setSysOutPrintStream(sysOut);
-      cli.setSysErrPrintStream(sysErr);
-      sysOutStream.reset();
-      // Get status for the root.a queue
-      int result = cli.run(new String[] { "-status", "a" });
-      assertEquals(0, result);
-      String queueStatusOut = sysOutStream.toString();
-      Assert.assertTrue(queueStatusOut
-          .contains("\tPreemption : enabled"));
-      // In-queue preemption is disabled at the "root.a" queue level
-      Assert.assertTrue(queueStatusOut
-          .contains("Intra-queue Preemption : disabled"));
-      cli = new QueueCLI();
-      cli.setClient(yarnClient);
-      cli.setSysOutPrintStream(sysOut);
-      cli.setSysErrPrintStream(sysErr);
-      sysOutStream.reset();
-      // Get status for the root.a.a1 queue
-      result = cli.run(new String[] { "-status", "a1" });
-      assertEquals(0, result);
-      queueStatusOut = sysOutStream.toString();
-      Assert.assertTrue(queueStatusOut
-          .contains("\tPreemption : enabled"));
-      // In-queue preemption is enabled at the "root.a.a1" queue level
-      Assert.assertTrue(queueStatusOut
-          .contains("Intra-queue Preemption : enabled"));
-    } finally {
-      // clean-up
-      if (yarnClient != null) {
-        yarnClient.stop();
-      }
-      cluster.stop();
-      cluster.close();
-    }
-  }
-
-  @Test
   public void testGetQueueInfoPreemptionEnabled() throws Exception {
     CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
     ReservationSystemTestUtil.setupQueueConfiguration(conf);
@@ -1815,10 +1743,9 @@ public class TestYarnCLI {
     conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
         "org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity."
         + "ProportionalCapacityPreemptionPolicy");
-    conf.setBoolean(
-        CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ENABLED, true);
+    conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
     MiniYARNCluster cluster =
-        new MiniYARNCluster("testGetQueueInfoPreemptionEnabled", 2, 1, 1);
+        new MiniYARNCluster("testReservationAPIs", 2, 1, 1);
 
     YarnClient yarnClient = null;
     try {
@@ -1836,11 +1763,8 @@ public class TestYarnCLI {
       sysOutStream.reset();
       int result = cli.run(new String[] { "-status", "a1" });
       assertEquals(0, result);
-      String queueStatusOut = sysOutStream.toString();
-      Assert.assertTrue(queueStatusOut
-          .contains("\tPreemption : enabled"));
-      Assert.assertTrue(queueStatusOut
-          .contains("Intra-queue Preemption : enabled"));
+      Assert.assertTrue(sysOutStream.toString()
+          .contains("Preemption : enabled"));
     } finally {
       // clean-up
       if (yarnClient != null) {
@@ -1880,11 +1804,8 @@ public class TestYarnCLI {
       sysOutStream.reset();
       int result = cli.run(new String[] { "-status", "a1" });
       assertEquals(0, result);
-      String queueStatusOut = sysOutStream.toString();
-      Assert.assertTrue(queueStatusOut
-          .contains("\tPreemption : disabled"));
-      Assert.assertTrue(queueStatusOut
-          .contains("Intra-queue Preemption : disabled"));
+      Assert.assertTrue(sysOutStream.toString()
+          .contains("Preemption : disabled"));
     }
   }
   
@@ -1892,7 +1813,7 @@ public class TestYarnCLI {
   public void testGetQueueInfoWithEmptyNodeLabel() throws Exception {
     QueueCLI cli = createAndGetQueueCLI();
     QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f,
-        null, null, QueueState.RUNNING, null, null, null, true, null, true);
+        null, null, QueueState.RUNNING, null, null, null, true, null);
     when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo);
     int result = cli.run(new String[] { "-status", "queueA" });
     assertEquals(0, result);
@@ -1909,7 +1830,6 @@ public class TestYarnCLI {
         + NodeLabel.DEFAULT_NODE_LABEL_PARTITION);
     pw.println("\tAccessible Node Labels : ");
     pw.println("\tPreemption : " + "disabled");
-    pw.println("\tIntra-queue Preemption : " + "disabled");
     pw.close();
     String queueInfoStr = baos.toString("UTF-8");
     Assert.assertEquals(queueInfoStr, sysOutStream.toString());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe42e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java
index f735139..1d2a6dd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java
@@ -500,17 +500,4 @@ public class QueueInfoPBImpl extends QueueInfo {
     this.queueConfigurations.putAll(queueConfigurations);
   }
 
-  @Override
-  public Boolean getIntraQueuePreemptionDisabled() {
-    QueueInfoProtoOrBuilder p = viaProto ? proto : builder;
-    return (p.hasIntraQueuePreemptionDisabled()) ? p
-        .getIntraQueuePreemptionDisabled() : null;
-  }
-
-  @Override
-  public void setIntraQueuePreemptionDisabled(
-      boolean intraQueuePreemptionDisabled) {
-    maybeInitBuilder();
-    builder.setIntraQueuePreemptionDisabled(intraQueuePreemptionDisabled);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe42e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
index 8c41906..ae80910 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
@@ -414,7 +414,7 @@ public class TestPBImplRecords extends BasePBImplRecordsTest {
     // it is recursive(has sub queues)
     typeValueCache.put(QueueInfo.class, QueueInfo.newInstance("root", 1.0f,
         1.0f, 0.1f, null, null, QueueState.RUNNING, ImmutableSet.of("x", "y"),
-        "x && y", null, false, null, false));
+        "x && y", null, false));
     generateByNewInstance(QueueStatistics.class);
     generateByNewInstance(QueueUserACLInfo.class);
     generateByNewInstance(YarnClusterMetrics.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe42e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
index 5b6932e..44fa736 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
@@ -114,8 +114,8 @@ public class IntraQueueCandidatesSelector extends PreemptionCandidatesSelector {
           continue;
         }
 
-        // Don't preempt if intra-queue preemption is disabled for this queue.
-        if (leafQueue.getIntraQueuePreemptionDisabled()) {
+        // Don't preempt if disabled for this queue.
+        if (leafQueue.getPreemptionDisabled()) {
           continue;
         }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe42e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index b29ee29..9afbdd5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -97,9 +97,6 @@ public abstract class AbstractCSQueue implements CSQueue {
       new HashMap<AccessType, AccessControlList>();
   volatile boolean reservationsContinueLooking;
   private volatile boolean preemptionDisabled;
-  // Indicates if the in-queue preemption setting is ever disabled within the
-  // hierarchy of this queue.
-  private boolean intraQueuePreemptionDisabledInHierarchy;
 
   // Track resource usage-by-label like used-resource/pending-resource, etc.
   volatile ResourceUsage queueUsage;
@@ -408,8 +405,6 @@ public abstract class AbstractCSQueue implements CSQueue {
 
       this.preemptionDisabled = isQueueHierarchyPreemptionDisabled(this,
           configuration);
-      this.intraQueuePreemptionDisabledInHierarchy =
-          isIntraQueueHierarchyPreemptionDisabled(this, configuration);
 
       this.priority = configuration.getQueuePriority(
           getQueuePath());
@@ -618,8 +613,6 @@ public abstract class AbstractCSQueue implements CSQueue {
     queueInfo.setCurrentCapacity(getUsedCapacity());
     queueInfo.setQueueStatistics(getQueueStatistics());
     queueInfo.setPreemptionDisabled(preemptionDisabled);
-    queueInfo.setIntraQueuePreemptionDisabled(
-        getIntraQueuePreemptionDisabled());
     queueInfo.setQueueConfigurations(getQueueConfigurations());
     return queueInfo;
   }
@@ -742,16 +735,6 @@ public abstract class AbstractCSQueue implements CSQueue {
   public boolean getPreemptionDisabled() {
     return preemptionDisabled;
   }
-
-  @Private
-  public boolean getIntraQueuePreemptionDisabled() {
-    return intraQueuePreemptionDisabledInHierarchy || preemptionDisabled;
-  }
-
-  @Private
-  public boolean getIntraQueuePreemptionDisabledInHierarchy() {
-    return intraQueuePreemptionDisabledInHierarchy;
-  }
   
   @Private
   public QueueCapacities getQueueCapacities() {
@@ -774,19 +757,17 @@ public abstract class AbstractCSQueue implements CSQueue {
   }
 
   /**
-   * The specified queue is cross-queue preemptable if system-wide cross-queue
-   * preemption is turned on unless any queue in the <em>qPath</em> hierarchy
-   * has explicitly turned cross-queue preemption off.
-   * NOTE: Cross-queue preemptability is inherited from a queue's parent.
-   *
-   * @param q queue to check preemption state
-   * @param configuration capacity scheduler config
-   * @return true if queue has cross-queue preemption disabled, false otherwise
+   * The specified queue is preemptable if system-wide preemption is turned on
+   * unless any queue in the <em>qPath</em> hierarchy has explicitly turned
+   * preemption off.
+   * NOTE: Preemptability is inherited from a queue's parent.
+   * 
+   * @return true if queue has preemption disabled, false otherwise
    */
   private boolean isQueueHierarchyPreemptionDisabled(CSQueue q,
       CapacitySchedulerConfiguration configuration) {
     boolean systemWidePreemption =
-        configuration
+        csContext.getConfiguration()
             .getBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS,
                        YarnConfiguration.DEFAULT_RM_SCHEDULER_ENABLE_MONITORS);
     CSQueue parentQ = q.getParent();
@@ -809,44 +790,7 @@ public abstract class AbstractCSQueue implements CSQueue {
     return configuration.getPreemptionDisabled(q.getQueuePath(),
                                         parentQ.getPreemptionDisabled());
   }
-
-  /**
-   * The specified queue is intra-queue preemptable if
-   * 1) system-wide intra-queue preemption is turned on
-   * 2) no queue in the <em>qPath</em> hierarchy has explicitly turned off intra
-   *    queue preemption.
-   * NOTE: Intra-queue preemptability is inherited from a queue's parent.
-   *
-   * @param q queue to check intra-queue preemption state
-   * @param configuration capacity scheduler config
-   * @return true if queue has intra-queue preemption disabled, false otherwise
-   */
-  private boolean isIntraQueueHierarchyPreemptionDisabled(CSQueue q,
-      CapacitySchedulerConfiguration configuration) {
-    boolean systemWideIntraQueuePreemption =
-        configuration.getBoolean(
-            CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ENABLED,
-            CapacitySchedulerConfiguration
-            .DEFAULT_INTRAQUEUE_PREEMPTION_ENABLED);
-    // Intra-queue preemption is disabled for this queue if the system-wide
-    // intra-queue preemption flag is false
-    if (!systemWideIntraQueuePreemption) return true;
-
-    // Check if this is the root queue and the root queue's intra-queue
-    // preemption disable switch is set
-    CSQueue parentQ = q.getParent();
-    if (parentQ == null) {
-      return configuration
-          .getIntraQueuePreemptionDisabled(q.getQueuePath(), false);
-    }
-
-    // At this point, the master preemption switch is enabled down to this
-    // queue's level. Determine whether or not intra-queue preemption is enabled
-    // down to this queu's level and return that value.
-    return configuration.getIntraQueuePreemptionDisabled(q.getQueuePath(),
-        parentQ.getIntraQueuePreemptionDisabledInHierarchy());
-  }
-
+  
   private Resource getCurrentLimitResource(String nodePartition,
       Resource clusterResource, ResourceLimits currentResourceLimits,
       SchedulingMode schedulingMode) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe42e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
index 3963dc0..5dd307c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
@@ -276,21 +276,7 @@ public interface CSQueue extends SchedulerQueue<CSQueue> {
    * @return true if <em>disable_preemption</em> is set, false if not
    */
   public boolean getPreemptionDisabled();
-
-  /**
-   * Check whether intra-queue preemption is disabled for this queue
-   * @return true if either intra-queue preemption or inter-queue preemption
-   * is disabled for this queue, false if neither is disabled.
-   */
-  public boolean getIntraQueuePreemptionDisabled();
-
-  /**
-   * Determines whether or not the intra-queue preemption disabled switch is set
-   *  at any level in this queue's hierarchy.
-   * @return state of the intra-queue preemption switch at this queue level
-   */
-  public boolean getIntraQueuePreemptionDisabledInHierarchy();
-
+  
   /**
    * Get QueueCapacities of this queue
    * @return queueCapacities

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe42e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index a9cf714..00733a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -1216,21 +1216,6 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
   }
 
   /**
-   * Indicates whether intra-queue preemption is disabled on the specified queue
-   *
-   * @param queue queue path to query
-   * @param defaultVal used as default if the property is not set in the
-   * configuration
-   * @return true if preemption is disabled on queue, false otherwise
-   */
-  public boolean getIntraQueuePreemptionDisabled(String queue,
-      boolean defaultVal) {
-    return
-        getBoolean(getQueuePrefix(queue) + INTRA_QUEUE_PREEMPTION_CONFIG_PREFIX
-            + QUEUE_PREEMPTION_DISABLED, defaultVal);
-  }
-
-  /**
    * Get configured node labels in a given queuePath
    */
   public Set<String> getConfiguredNodeLabels(String queuePath) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe42e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index ed2f64e..7f025a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -200,10 +200,7 @@ class CapacitySchedulerPage extends RmView {
           __("Configured User Limit Factor:", lqinfo.getUserLimitFactor()).
           __("Accessible Node Labels:", StringUtils.join(",", lqinfo.getNodeLabels())).
           __("Ordering Policy: ", lqinfo.getOrderingPolicyInfo()).
-          __("Preemption:",
-              lqinfo.getPreemptionDisabled() ? "disabled" : "enabled").
-          __("Intra-queue Preemption:", lqinfo.getIntraQueuePreemptionDisabled()
-                  ? "disabled" : "enabled").
+          __("Preemption:", lqinfo.getPreemptionDisabled() ? "disabled" : "enabled").
           __("Default Node Label Expression:",
               lqinfo.getDefaultNodeLabelExpression() == null
                   ? NodeLabel.DEFAULT_NODE_LABEL_PARTITION

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe42e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
index a53e921..b5f4e79 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
@@ -49,7 +49,6 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
   protected ResourceInfo usedAMResource;
   protected ResourceInfo userAMResourceLimit;
   protected boolean preemptionDisabled;
-  protected boolean intraQueuePreemptionDisabled;
   protected String defaultNodeLabelExpression;
   protected int defaultPriority;
   protected boolean isAutoCreatedLeafQueue;
@@ -73,7 +72,6 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
     AMResourceLimit = new ResourceInfo(q.getAMResourceLimit());
     usedAMResource = new ResourceInfo(q.getQueueResourceUsage().getAMUsed());
     preemptionDisabled = q.getPreemptionDisabled();
-    intraQueuePreemptionDisabled = q.getIntraQueuePreemptionDisabled();
     orderingPolicyInfo = q.getOrderingPolicy().getInfo();
     defaultNodeLabelExpression = q.getDefaultNodeLabelExpression();
     defaultPriority = q.getDefaultApplicationPriority().getPriority();
@@ -152,10 +150,6 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
   public boolean getPreemptionDisabled() {
     return preemptionDisabled;
   }
-
-  public boolean getIntraQueuePreemptionDisabled() {
-    return intraQueuePreemptionDisabled;
-  }
   
   public String getOrderingPolicyInfo() {
     return orderingPolicyInfo;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe42e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
index 9a23c1d..398e909 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
@@ -67,7 +67,7 @@ public class TestConfigurationMutationACLPolicies {
   private void mockQueue(String queueName, MutableConfScheduler scheduler)
       throws IOException {
     QueueInfo queueInfo = QueueInfo.newInstance(queueName, 0, 0, 0, null, null,
-        null, null, null, null, false, null, false);
+        null, null, null, null, false);
     when(scheduler.getQueueInfo(eq(queueName), anyBoolean(), anyBoolean()))
         .thenReturn(queueInfo);
     Queue queue = mock(Queue.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe42e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
index 17f9d23..fa16eff 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
@@ -165,7 +165,7 @@ public class TestSchedulerApplicationAttempt {
   private Queue createQueue(String name, Queue parent, float capacity) {
     QueueMetrics metrics = QueueMetrics.forQueue(name, parent, false, conf);
     QueueInfo queueInfo = QueueInfo.newInstance(name, capacity, 1.0f, 0, null,
-        null, QueueState.RUNNING, null, "", null, false, null, false);
+        null, QueueState.RUNNING, null, "", null, false);
     ActiveUsersManager activeUsersManager = new ActiveUsersManager(metrics);
     Queue queue = mock(Queue.class);
     when(queue.getMetrics()).thenReturn(metrics);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe42e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index 04bb791..c45bdb4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -4103,7 +4103,7 @@ public class TestLeafQueue {
       float absCap, Resource res) {
     CSQueueMetrics metrics = CSQueueMetrics.forQueue(name, parent, false, cs.getConf());
     QueueInfo queueInfo = QueueInfo.newInstance(name, capacity, 1.0f, 0, null,
-        null, QueueState.RUNNING, null, "", null, false, null, false);
+        null, QueueState.RUNNING, null, "", null, false);
     ActiveUsersManager activeUsersManager = new ActiveUsersManager(metrics);
     AbstractCSQueue queue = mock(AbstractCSQueue.class);
     when(queue.getMetrics()).thenReturn(metrics);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe42e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index 4ecc97a..87cfd39 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -236,7 +236,6 @@ The following configuration parameters can be configured in yarn-site.xml to con
 | Property | Description |
 |:---- |:---- |
 | `yarn.scheduler.capacity.<queue-path>.disable_preemption` | This configuration can be set to `true` to selectively disable preemption of application containers submitted to a given queue. This property applies only when system wide preemption is enabled by configuring `yarn.resourcemanager.scheduler.monitor.enable` to *true* and `yarn.resourcemanager.scheduler.monitor.policies` to *ProportionalCapacityPreemptionPolicy*. If this property is not set for a queue, then the property value is inherited from the queue's parent. Default value is false.
-| `yarn.scheduler.capacity.<queue-path>.intra-queue-preemption.disable_preemption` | This configuration can be set to *true* to selectively disable intra-queue preemption of application containers submitted to a given queue. This property applies only when system wide preemption is enabled by configuring `yarn.resourcemanager.scheduler.monitor.enable` to *true*, `yarn.resourcemanager.scheduler.monitor.policies` to *ProportionalCapacityPreemptionPolicy*, and `yarn.resourcemanager.monitor.capacity.preemption.intra-queue-preemption.enabled` to *true*. If this property is not set for a queue, then the property value is inherited from the queue's parent. Default value is *false*.
 
 ###Reservation Properties
 
@@ -478,4 +477,4 @@ Updating a Container (Experimental - API may change in the future)
   
   The **DECREASE_RESOURCE** and **DEMOTE_EXECUTION_TYPE** container updates are automatic - the AM does not explicitly have to ask the NM to decrease the resources of the container. The other update types require the AM to explicitly ask the NM to update the container.
   
-  If the **yarn.resourcemanager.auto-update.containers** configuration parameter is set to **true** (false by default), The RM will ensure that all container updates are automatic.
+  If the **yarn.resourcemanager.auto-update.containers** configuration parameter is set to **true** (false by default), The RM will ensure that all container updates are automatic.  
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: YARN-7906. Fix mvn site fails with error: Multiple sources of package comments found for package o.a.h.y.client.api.impl. (Akira Ajisaka via wangda)

Posted by ae...@apache.org.
YARN-7906. Fix mvn site fails with error: Multiple sources of package comments found for package o.a.h.y.client.api.impl. (Akira Ajisaka via wangda)

Change-Id: I20221d97446e97f208d587eacbc60448c11ffd48


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e795833d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e795833d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e795833d

Branch: refs/heads/HDFS-7240
Commit: e795833d8c1981cab85a10b4e516cd0c5423c792
Parents: a08c048
Author: Wangda Tan <wa...@apache.org>
Authored: Mon Feb 12 10:25:22 2018 +0800
Committer: Wangda Tan <wa...@apache.org>
Committed: Mon Feb 12 10:25:22 2018 +0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/yarn/api/resource/package-info.java     | 2 --
 1 file changed, 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e795833d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
index 660dc02..a9388b2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
@@ -18,6 +18,4 @@
 /**
  * API related to resources.
  */
-@InterfaceAudience.Private
 package org.apache.hadoop.yarn.api.resource;
-import org.apache.hadoop.classification.InterfaceAudience;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: MAPREDUCE-7048. Uber AM can crash due to unknown task in statusUpdate. Contributed by Peter Bacsko

Posted by ae...@apache.org.
MAPREDUCE-7048. Uber AM can crash due to unknown task in statusUpdate. Contributed by Peter Bacsko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87e2570a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87e2570a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87e2570a

Branch: refs/heads/HDFS-7240
Commit: 87e2570a1419d3616de2de3b553108ad1a8af425
Parents: 35c1735
Author: Jason Lowe <jl...@apache.org>
Authored: Mon Feb 12 13:21:09 2018 -0600
Committer: Jason Lowe <jl...@apache.org>
Committed: Mon Feb 12 13:21:09 2018 -0600

----------------------------------------------------------------------
 .../java/org/apache/hadoop/mapred/Task.java     | 16 ++--
 .../java/org/apache/hadoop/mapred/TestTask.java | 89 ++++++++++++++++++++
 2 files changed, 100 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87e2570a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
index 5b98b35..d83a6b0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
@@ -200,6 +200,7 @@ abstract public class Task implements Writable, Configurable {
   protected SecretKey shuffleSecret;
   protected GcTimeUpdater gcUpdater;
   final AtomicBoolean mustPreempt = new AtomicBoolean(false);
+  private boolean uberized = false;
 
   ////////////////////////////////////////////
   // Constructors
@@ -855,9 +856,6 @@ abstract public class Task implements Writable, Configurable {
       long taskProgressInterval = MRJobConfUtil.
           getTaskProgressReportInterval(conf);
 
-      boolean uberized = conf.getBoolean("mapreduce.task.uberized",
-          false);
-
       while (!taskDone.get()) {
         synchronized (lock) {
           done = false;
@@ -1301,11 +1299,17 @@ abstract public class Task implements Writable, Configurable {
   public void statusUpdate(TaskUmbilicalProtocol umbilical) 
   throws IOException {
     int retries = MAX_RETRIES;
+
     while (true) {
       try {
         if (!umbilical.statusUpdate(getTaskID(), taskStatus).getTaskFound()) {
-          LOG.warn("Parent died.  Exiting "+taskId);
-          System.exit(66);
+          if (uberized) {
+            LOG.warn("Task no longer available: " + taskId);
+            break;
+          } else {
+            LOG.warn("Parent died.  Exiting " + taskId);
+            ExitUtil.terminate(66);
+          }
         }
         taskStatus.clearStatus();
         return;
@@ -1518,6 +1522,8 @@ abstract public class Task implements Writable, Configurable {
         NetUtils.addStaticResolution(name, resolvedName);
       }
     }
+
+    uberized = conf.getBoolean("mapreduce.task.uberized", false);
   }
 
   public Configuration getConf() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87e2570a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTask.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTask.java
new file mode 100644
index 0000000..500229c
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTask.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ExitUtil;
+import org.apache.hadoop.util.ExitUtil.ExitException;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.mockito.runners.MockitoJUnitRunner;
+
+@RunWith(MockitoJUnitRunner.class)
+public class TestTask {
+  @Mock
+  private TaskUmbilicalProtocol umbilical;
+
+  @Mock
+  private AMFeedback feedback;
+
+  private Task task;
+
+  @Before
+  public void setup() {
+    task = new StubTask();
+    ExitUtil.disableSystemExit();
+  }
+
+  @Test
+  public void testStatusUpdateDoesNotExitInUberMode() throws Exception {
+    setupTest(true);
+
+    task.statusUpdate(umbilical);
+  }
+
+  @Test(expected = ExitException.class)
+  public void testStatusUpdateExitsInNonUberMode() throws Exception {
+    setupTest(false);
+
+    task.statusUpdate(umbilical);
+  }
+
+  private void setupTest(boolean uberized)
+      throws IOException, InterruptedException {
+    Configuration conf = new Configuration(false);
+    conf.setBoolean("mapreduce.task.uberized", uberized);
+    task.setConf(conf);
+    when(umbilical.statusUpdate(any(TaskAttemptID.class),
+        any(TaskStatus.class))).thenReturn(feedback);
+
+    // to avoid possible infinite loop
+    when(feedback.getTaskFound()).thenReturn(false, true);
+  }
+
+  public class StubTask extends Task {
+    @Override
+    public void run(JobConf job, TaskUmbilicalProtocol umbilical)
+        throws IOException, ClassNotFoundException, InterruptedException {
+      // nop
+    }
+
+    @Override
+    public boolean isMapTask() {
+      return false;
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: HADOOP-15187. Remove ADL mock test dependency on REST call invoked from Java SDK. Contributed by Vishwajeet Dusane.

Posted by ae...@apache.org.
HADOOP-15187. Remove ADL mock test dependency on REST call invoked from Java SDK.
Contributed by Vishwajeet Dusane.

(cherry picked from commit bd5ab5912564d2d687651b01f552b8e4ca8c145a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8cf88fcd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8cf88fcd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8cf88fcd

Branch: refs/heads/HDFS-7240
Commit: 8cf88fcd1f63d3d4e9736b1b687a4f4e663f6125
Parents: d02e42c
Author: Steve Loughran <st...@apache.org>
Authored: Mon Feb 12 15:13:00 2018 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Mon Feb 12 15:13:00 2018 +0000

----------------------------------------------------------------------
 .../apache/hadoop/fs/adl/AdlMockWebServer.java  | 102 -------
 .../apache/hadoop/fs/adl/TestACLFeatures.java   | 262 ----------------
 .../org/apache/hadoop/fs/adl/TestAdlRead.java   | 196 ------------
 .../adl/TestConcurrentDataReadOperations.java   | 299 -------------------
 .../hadoop/fs/adl/TestCustomTokenProvider.java  | 140 ---------
 .../apache/hadoop/fs/adl/TestGetFileStatus.java | 102 -------
 .../apache/hadoop/fs/adl/TestListStatus.java    | 137 ---------
 .../hadoop/fs/adl/TestableAdlFileSystem.java    |  30 --
 .../hadoop/fs/adl/common/ExpectedResponse.java  |  71 -----
 .../hadoop/fs/adl/common/TestDataForRead.java   | 122 --------
 10 files changed, 1461 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cf88fcd/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/AdlMockWebServer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/AdlMockWebServer.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/AdlMockWebServer.java
deleted file mode 100644
index d843d55..0000000
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/AdlMockWebServer.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.adl;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.net.URL;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.adl.common.CustomMockTokenProvider;
-import org.apache.hadoop.fs.adl.oauth2.AzureADTokenProvider;
-import static org.apache.hadoop.fs.adl.AdlConfKeys
-    .AZURE_AD_TOKEN_PROVIDER_CLASS_KEY;
-import static org.apache.hadoop.fs.adl.AdlConfKeys
-    .AZURE_AD_TOKEN_PROVIDER_TYPE_KEY;
-
-import com.squareup.okhttp.mockwebserver.MockWebServer;
-
-import org.junit.After;
-import org.junit.Before;
-
-/**
- * Mock server to simulate Adls backend calls. This infrastructure is expandable
- * to override expected server response based on the derived test functionality.
- * Common functionality to generate token information before request is send to
- * adls backend is also managed within AdlMockWebServer implementation using
- * {@link org.apache.hadoop.fs.adl.common.CustomMockTokenProvider}.
- */
-public class AdlMockWebServer {
-  // Create a MockWebServer. These are lean enough that you can create a new
-  // instance for every unit test.
-  private MockWebServer server = null;
-  private TestableAdlFileSystem fs = null;
-  private int port = 0;
-  private Configuration conf = new Configuration();
-
-  public MockWebServer getMockServer() {
-    return server;
-  }
-
-  public TestableAdlFileSystem getMockAdlFileSystem() {
-    return fs;
-  }
-
-  public int getPort() {
-    return port;
-  }
-
-  public Configuration getConf() {
-    return conf;
-  }
-
-  public void setConf(Configuration conf) {
-    this.conf = conf;
-  }
-
-  @Before
-  public void preTestSetup() throws IOException, URISyntaxException {
-    server = new MockWebServer();
-
-    // Start the server.
-    server.start();
-
-    // Ask the server for its URL. You'll need this to make HTTP requests.
-    URL baseUrl = server.getUrl("");
-    port = baseUrl.getPort();
-
-    // Exercise your application code, which should make those HTTP requests.
-    // Responses are returned in the same order that they are enqueued.
-    fs = new TestableAdlFileSystem();
-
-    conf.setEnum(AZURE_AD_TOKEN_PROVIDER_TYPE_KEY, TokenProviderType.Custom);
-    conf.setClass(AZURE_AD_TOKEN_PROVIDER_CLASS_KEY,
-        CustomMockTokenProvider.class, AzureADTokenProvider.class);
-
-    URI uri = new URI("adl://localhost:" + port);
-    fs.initialize(uri, conf);
-  }
-
-  @After
-  public void postTestSetup() throws IOException {
-    fs.close();
-    server.shutdown();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cf88fcd/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestACLFeatures.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestACLFeatures.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestACLFeatures.java
deleted file mode 100644
index b420daa..0000000
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestACLFeatures.java
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.fs.adl;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclEntryScope;
-import org.apache.hadoop.fs.permission.AclEntryType;
-import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.security.AccessControlException;
-
-import com.squareup.okhttp.mockwebserver.MockResponse;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Stub adl server and test acl data conversion within SDK and Hadoop adl
- * client.
- */
-public class TestACLFeatures extends AdlMockWebServer {
-
-  @Test(expected=AccessControlException.class)
-  public void testModifyAclEntries() throws URISyntaxException, IOException {
-    getMockServer().enqueue(new MockResponse().setResponseCode(200));
-    List<AclEntry> entries = new ArrayList<AclEntry>();
-    AclEntry.Builder aclEntryBuilder = new AclEntry.Builder();
-    aclEntryBuilder.setName("hadoop");
-    aclEntryBuilder.setType(AclEntryType.USER);
-    aclEntryBuilder.setPermission(FsAction.ALL);
-    aclEntryBuilder.setScope(AclEntryScope.ACCESS);
-    entries.add(aclEntryBuilder.build());
-
-    aclEntryBuilder.setName("hdfs");
-    aclEntryBuilder.setType(AclEntryType.GROUP);
-    aclEntryBuilder.setPermission(FsAction.READ_WRITE);
-    aclEntryBuilder.setScope(AclEntryScope.DEFAULT);
-    entries.add(aclEntryBuilder.build());
-
-    getMockAdlFileSystem().modifyAclEntries(new Path("/test1/test2"), entries);
-
-    getMockServer().enqueue(new MockResponse().setResponseCode(403)
-        .setBody(TestADLResponseData.getAccessControlException()));
-
-    getMockAdlFileSystem()
-        .modifyAclEntries(new Path("/test1/test2"), entries);
-  }
-
-  @Test(expected=AccessControlException.class)
-  public void testRemoveAclEntriesWithOnlyUsers()
-      throws URISyntaxException, IOException {
-    getMockServer().enqueue(new MockResponse().setResponseCode(200));
-    List<AclEntry> entries = new ArrayList<AclEntry>();
-    AclEntry.Builder aclEntryBuilder = new AclEntry.Builder();
-    aclEntryBuilder.setName("hadoop");
-    aclEntryBuilder.setType(AclEntryType.USER);
-    entries.add(aclEntryBuilder.build());
-
-    getMockAdlFileSystem().removeAclEntries(new Path("/test1/test2"), entries);
-
-    getMockServer().enqueue(new MockResponse().setResponseCode(403)
-        .setBody(TestADLResponseData.getAccessControlException()));
-
-    getMockAdlFileSystem()
-        .removeAclEntries(new Path("/test1/test2"), entries);
-  }
-
-  @Test(expected=AccessControlException.class)
-  public void testRemoveAclEntries() throws URISyntaxException, IOException {
-    getMockServer().enqueue(new MockResponse().setResponseCode(200));
-    List<AclEntry> entries = new ArrayList<AclEntry>();
-    AclEntry.Builder aclEntryBuilder = new AclEntry.Builder();
-    aclEntryBuilder.setName("hadoop");
-    aclEntryBuilder.setType(AclEntryType.USER);
-    aclEntryBuilder.setPermission(FsAction.ALL);
-    aclEntryBuilder.setScope(AclEntryScope.ACCESS);
-    entries.add(aclEntryBuilder.build());
-
-    aclEntryBuilder.setName("hdfs");
-    aclEntryBuilder.setType(AclEntryType.GROUP);
-    aclEntryBuilder.setPermission(FsAction.READ_WRITE);
-    aclEntryBuilder.setScope(AclEntryScope.DEFAULT);
-    entries.add(aclEntryBuilder.build());
-
-    getMockAdlFileSystem().removeAclEntries(new Path("/test1/test2"), entries);
-
-    getMockServer().enqueue(new MockResponse().setResponseCode(403)
-        .setBody(TestADLResponseData.getAccessControlException()));
-
-    getMockAdlFileSystem()
-        .removeAclEntries(new Path("/test1/test2"), entries);
-  }
-
-  @Test(expected=AccessControlException.class)
-  public void testRemoveDefaultAclEntries()
-      throws URISyntaxException, IOException {
-    getMockServer().enqueue(new MockResponse().setResponseCode(200));
-    getMockAdlFileSystem().removeDefaultAcl(new Path("/test1/test2"));
-
-    getMockServer().enqueue(new MockResponse().setResponseCode(403)
-        .setBody(TestADLResponseData.getAccessControlException()));
-
-    getMockAdlFileSystem().removeDefaultAcl(new Path("/test1/test2"));
-  }
-
-  @Test(expected=AccessControlException.class)
-  public void testRemoveAcl() throws URISyntaxException, IOException {
-    getMockServer().enqueue(new MockResponse().setResponseCode(200));
-    getMockAdlFileSystem().removeAcl(new Path("/test1/test2"));
-
-    getMockServer().enqueue(new MockResponse().setResponseCode(403)
-        .setBody(TestADLResponseData.getAccessControlException()));
-
-    getMockAdlFileSystem().removeAcl(new Path("/test1/test2"));
-  }
-
-  @Test(expected=AccessControlException.class)
-  public void testSetAcl() throws URISyntaxException, IOException {
-    getMockServer().enqueue(new MockResponse().setResponseCode(200));
-    List<AclEntry> entries = new ArrayList<AclEntry>();
-    AclEntry.Builder aclEntryBuilder = new AclEntry.Builder();
-    aclEntryBuilder.setName("hadoop");
-    aclEntryBuilder.setType(AclEntryType.USER);
-    aclEntryBuilder.setPermission(FsAction.ALL);
-    aclEntryBuilder.setScope(AclEntryScope.ACCESS);
-    entries.add(aclEntryBuilder.build());
-
-    aclEntryBuilder.setName("hdfs");
-    aclEntryBuilder.setType(AclEntryType.GROUP);
-    aclEntryBuilder.setPermission(FsAction.READ_WRITE);
-    aclEntryBuilder.setScope(AclEntryScope.DEFAULT);
-    entries.add(aclEntryBuilder.build());
-
-    getMockAdlFileSystem().setAcl(new Path("/test1/test2"), entries);
-
-    getMockServer().enqueue(new MockResponse().setResponseCode(403)
-        .setBody(TestADLResponseData.getAccessControlException()));
-
-    getMockAdlFileSystem().setAcl(new Path("/test1/test2"), entries);
-  }
-
-  @Test(expected=AccessControlException.class)
-  public void testCheckAccess() throws URISyntaxException, IOException {
-    getMockServer().enqueue(new MockResponse().setResponseCode(200));
-    getMockAdlFileSystem().access(new Path("/test1/test2"), FsAction.ALL);
-
-    getMockServer().enqueue(new MockResponse().setResponseCode(200));
-    getMockAdlFileSystem().access(new Path("/test1/test2"), FsAction.EXECUTE);
-
-    getMockServer().enqueue(new MockResponse().setResponseCode(200));
-    getMockAdlFileSystem().access(new Path("/test1/test2"), FsAction.READ);
-
-    getMockServer().enqueue(new MockResponse().setResponseCode(200));
-    getMockAdlFileSystem()
-        .access(new Path("/test1/test2"), FsAction.READ_EXECUTE);
-
-    getMockServer().enqueue(new MockResponse().setResponseCode(200));
-    getMockAdlFileSystem()
-        .access(new Path("/test1/test2"), FsAction.READ_WRITE);
-
-    getMockServer().enqueue(new MockResponse().setResponseCode(200));
-    getMockAdlFileSystem().access(new Path("/test1/test2"), FsAction.NONE);
-
-    getMockServer().enqueue(new MockResponse().setResponseCode(200));
-    getMockAdlFileSystem().access(new Path("/test1/test2"), FsAction.WRITE);
-
-    getMockServer().enqueue(new MockResponse().setResponseCode(200));
-    getMockAdlFileSystem()
-        .access(new Path("/test1/test2"), FsAction.WRITE_EXECUTE);
-
-    getMockServer().enqueue(new MockResponse().setResponseCode(403)
-        .setBody(TestADLResponseData.getAccessControlException()));
-
-    getMockAdlFileSystem()
-        .access(new Path("/test1/test2"), FsAction.WRITE_EXECUTE);
-  }
-
-  @Test(expected=AccessControlException.class)
-  public void testSetPermission() throws URISyntaxException, IOException {
-    getMockServer().enqueue(new MockResponse().setResponseCode(200));
-    getMockAdlFileSystem()
-        .setPermission(new Path("/test1/test2"), FsPermission.getDefault());
-
-    getMockServer().enqueue(new MockResponse().setResponseCode(403)
-        .setBody(TestADLResponseData.getAccessControlException()));
-
-    getMockAdlFileSystem()
-        .setPermission(new Path("/test1/test2"), FsPermission.getDefault());
-  }
-
-  @Test(expected=AccessControlException.class)
-  public void testSetOwner() throws URISyntaxException, IOException {
-    getMockServer().enqueue(new MockResponse().setResponseCode(200));
-    getMockAdlFileSystem().setOwner(new Path("/test1/test2"), "hadoop", "hdfs");
-
-    getMockServer().enqueue(new MockResponse().setResponseCode(403)
-        .setBody(TestADLResponseData.getAccessControlException()));
-
-    getMockAdlFileSystem()
-        .setOwner(new Path("/test1/test2"), "hadoop", "hdfs");
-  }
-
-  @Test
-  public void getAclStatusAsExpected() throws URISyntaxException, IOException {
-    getMockServer().enqueue(new MockResponse().setResponseCode(200)
-        .setBody(TestADLResponseData.getGetAclStatusJSONResponse()));
-    AclStatus aclStatus = getMockAdlFileSystem()
-        .getAclStatus(new Path("/test1/test2"));
-    Assert.assertEquals(aclStatus.getGroup(), "supergroup");
-    Assert.assertEquals(aclStatus.getOwner(), "hadoop");
-    Assert.assertEquals((Short) aclStatus.getPermission().toShort(),
-        Short.valueOf("775", 8));
-
-    for (AclEntry entry : aclStatus.getEntries()) {
-      if (!(entry.toString().equalsIgnoreCase("user:carla:rw-") || entry
-          .toString().equalsIgnoreCase("group::r-x"))) {
-        Assert.fail("Unexpected entry : " + entry.toString());
-      }
-    }
-  }
-
-  @Test(expected=FileNotFoundException.class)
-  public void getAclStatusNotExists() throws URISyntaxException, IOException {
-    getMockServer().enqueue(new MockResponse().setResponseCode(404)
-        .setBody(TestADLResponseData.getFileNotFoundException()));
-
-    getMockAdlFileSystem().getAclStatus(new Path("/test1/test2"));
-  }
-
-  @Test(expected=AccessControlException.class)
-  public void testAclStatusDenied() throws URISyntaxException, IOException {
-    getMockServer().enqueue(new MockResponse().setResponseCode(403)
-        .setBody(TestADLResponseData.getAccessControlException()));
-
-    getMockAdlFileSystem().getAclStatus(new Path("/test1/test2"));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cf88fcd/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAdlRead.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAdlRead.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAdlRead.java
deleted file mode 100644
index 172663c..0000000
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAdlRead.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.adl;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.adl.common.Parallelized;
-import org.apache.hadoop.fs.adl.common.TestDataForRead;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.io.ByteArrayInputStream;
-import java.io.EOFException;
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Random;
-
-import static org.apache.hadoop.fs.adl.AdlConfKeys.READ_AHEAD_BUFFER_SIZE_KEY;
-
-/**
- * This class is responsible for stress positional reads vs number of network
- * calls required by to fetch the amount of data. Test does ensure the data
- * integrity and order of the data is maintained.
- */
-@RunWith(Parallelized.class)
-public class TestAdlRead extends AdlMockWebServer {
-
-  private TestDataForRead testData;
-
-  public TestAdlRead(TestDataForRead testData) {
-    Configuration configuration = new Configuration();
-    configuration.setInt(READ_AHEAD_BUFFER_SIZE_KEY, 4 * 1024);
-    setConf(configuration);
-    this.testData = testData;
-  }
-
-  @Parameterized.Parameters(name = "{index}")
-  public static Collection testDataForReadOperation() {
-    return Arrays.asList(new Object[][] {
-
-        //--------------------------
-        // Test Data
-        //--------------------------
-        {new TestDataForRead("Hello World".getBytes(), 2, 1000, true)},
-        {new TestDataForRead(
-            ("the problem you appear to be wrestling with is that this doesn't "
-                + "display very well. ").getBytes(), 2, 1000, true)},
-        {new TestDataForRead(("您的數據是寶貴的資產,以您的組織,並有當前和未來價值。由於這個原因,"
-            + "所有的數據應存儲以供將來分析。今天,這往往是不這樣做," + "因為傳統的分析基礎架構的限制,"
-            + "像模式的預定義,存儲大數據集和不同的數據筒倉的傳播的成本。"
-            + "為了應對這一挑戰,數據湖面概念被引入作為一個企業級存儲庫來存儲所有"
-            + "類型的在一個地方收集到的數據。對於運作和探索性分析的目的,所有類型的" + "數據可以定義需求或模式之前被存儲在數據湖。")
-            .getBytes(), 2, 1000, true)}, {new TestDataForRead(
-        TestADLResponseData.getRandomByteArrayData(4 * 1024), 2, 10, true)},
-        {new TestDataForRead(TestADLResponseData.getRandomByteArrayData(100), 2,
-            1000, true)}, {new TestDataForRead(
-        TestADLResponseData.getRandomByteArrayData(1 * 1024), 2, 50, true)},
-        {new TestDataForRead(
-            TestADLResponseData.getRandomByteArrayData(8 * 1024), 3, 10,
-            false)}, {new TestDataForRead(
-        TestADLResponseData.getRandomByteArrayData(16 * 1024), 5, 10, false)},
-        {new TestDataForRead(
-            TestADLResponseData.getRandomByteArrayData(32 * 1024), 9, 10,
-            false)}, {new TestDataForRead(
-        TestADLResponseData.getRandomByteArrayData(64 * 1024), 17, 10,
-        false)}});
-  }
-
-  @Test
-  public void testEntireBytes() throws IOException, InterruptedException {
-    getMockServer().setDispatcher(testData.getDispatcher());
-    FSDataInputStream in = getMockAdlFileSystem().open(new Path("/test"));
-    byte[] expectedData = new byte[testData.getActualData().length];
-    int n = 0;
-    int len = expectedData.length;
-    int off = 0;
-    while (n < len) {
-      int count = in.read(expectedData, off + n, len - n);
-      if (count < 0) {
-        throw new EOFException();
-      }
-      n += count;
-    }
-
-    Assert.assertEquals(expectedData.length, testData.getActualData().length);
-    Assert.assertArrayEquals(expectedData, testData.getActualData());
-    in.close();
-    if (testData.isCheckOfNoOfCalls()) {
-      Assert.assertEquals(testData.getExpectedNoNetworkCall(),
-          getMockServer().getRequestCount());
-    }
-  }
-
-  @Test
-  public void testSeekOperation() throws IOException, InterruptedException {
-    getMockServer().setDispatcher(testData.getDispatcher());
-    FSDataInputStream in = getMockAdlFileSystem().open(new Path("/test"));
-    Random random = new Random();
-    for (int i = 0; i < 1000; ++i) {
-      int position = random.nextInt(testData.getActualData().length);
-      in.seek(position);
-      Assert.assertEquals(position, in.getPos());
-      Assert.assertEquals(testData.getActualData()[position] & 0xFF, in.read());
-    }
-    in.close();
-    if (testData.isCheckOfNoOfCalls()) {
-      Assert.assertEquals(testData.getExpectedNoNetworkCall(),
-          getMockServer().getRequestCount());
-    }
-  }
-
-  @Test
-  public void testReadServerCalls() throws IOException, InterruptedException {
-    getMockServer().setDispatcher(testData.getDispatcher());
-    FSDataInputStream in = getMockAdlFileSystem().open(new Path("/test"));
-    byte[] expectedData = new byte[testData.getActualData().length];
-    in.readFully(expectedData);
-    Assert.assertArrayEquals(expectedData, testData.getActualData());
-    Assert.assertEquals(testData.getExpectedNoNetworkCall(),
-        getMockServer().getRequestCount());
-    in.close();
-  }
-
-  @Test
-  public void testReadFully() throws IOException, InterruptedException {
-    getMockServer().setDispatcher(testData.getDispatcher());
-    FSDataInputStream in = getMockAdlFileSystem().open(new Path("/test"));
-    byte[] expectedData = new byte[testData.getActualData().length];
-    in.readFully(expectedData);
-    Assert.assertArrayEquals(expectedData, testData.getActualData());
-
-    in.readFully(0, expectedData);
-    Assert.assertArrayEquals(expectedData, testData.getActualData());
-
-    in.readFully(0, expectedData, 0, expectedData.length);
-    Assert.assertArrayEquals(expectedData, testData.getActualData());
-    in.close();
-  }
-
-  @Test
-  public void testRandomPositionalReadUsingReadFully()
-      throws IOException, InterruptedException {
-    getMockServer().setDispatcher(testData.getDispatcher());
-    FSDataInputStream in = getMockAdlFileSystem().open(new Path("/test"));
-    ByteArrayInputStream actualData = new ByteArrayInputStream(
-        testData.getActualData());
-    Random random = new Random();
-    for (int i = 0; i < testData.getIntensityOfTest(); ++i) {
-      int offset = random.nextInt(testData.getActualData().length);
-      int length = testData.getActualData().length - offset;
-      byte[] expectedData = new byte[length];
-      byte[] actualDataSubset = new byte[length];
-      actualData.reset();
-      actualData.skip(offset);
-      actualData.read(actualDataSubset, 0, length);
-
-      in.readFully(offset, expectedData, 0, length);
-      Assert.assertArrayEquals(expectedData, actualDataSubset);
-    }
-
-    for (int i = 0; i < testData.getIntensityOfTest(); ++i) {
-      int offset = random.nextInt(testData.getActualData().length);
-      int length = random.nextInt(testData.getActualData().length - offset);
-      byte[] expectedData = new byte[length];
-      byte[] actualDataSubset = new byte[length];
-      actualData.reset();
-      actualData.skip(offset);
-      actualData.read(actualDataSubset, 0, length);
-
-      in.readFully(offset, expectedData, 0, length);
-      Assert.assertArrayEquals(expectedData, actualDataSubset);
-    }
-
-    in.close();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cf88fcd/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestConcurrentDataReadOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestConcurrentDataReadOperations.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestConcurrentDataReadOperations.java
deleted file mode 100644
index b790562..0000000
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestConcurrentDataReadOperations.java
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.adl;
-
-import com.squareup.okhttp.mockwebserver.Dispatcher;
-import com.squareup.okhttp.mockwebserver.MockResponse;
-import com.squareup.okhttp.mockwebserver.RecordedRequest;
-import okio.Buffer;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.Path;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Random;
-import java.util.UUID;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * This class is responsible for testing multiple threads trying to access same
- * or multiple files from the offset.
- */
-@RunWith(Parameterized.class)
-public class TestConcurrentDataReadOperations extends AdlMockWebServer {
-  private static final Logger LOG = LoggerFactory
-      .getLogger(TestConcurrentDataReadOperations.class);
-  private static final Object LOCK = new Object();
-  private static FSDataInputStream commonHandle = null;
-  private int concurrencyLevel;
-
-  public TestConcurrentDataReadOperations(int concurrencyLevel) {
-    this.concurrencyLevel = concurrencyLevel;
-  }
-
-  @Parameterized.Parameters(name = "{index}")
-  public static Collection<?> testDataNumberOfConcurrentRun() {
-    return Arrays.asList(new Object[][] {{1}, {2}, {3}, {4}, {5}});
-  }
-
-  public static byte[] getRandomByteArrayData(int size) {
-    byte[] b = new byte[size];
-    Random rand = new Random();
-    rand.nextBytes(b);
-    return b;
-  }
-
-  private void setDispatcher(final ArrayList<CreateTestData> testData) {
-    getMockServer().setDispatcher(new Dispatcher() {
-      @Override
-      public MockResponse dispatch(RecordedRequest recordedRequest)
-          throws InterruptedException {
-        CreateTestData currentRequest = null;
-        for (CreateTestData local : testData) {
-          if (recordedRequest.getPath().contains(local.path.toString())) {
-            currentRequest = local;
-            break;
-          }
-        }
-
-        if (currentRequest == null) {
-          new MockResponse().setBody("Request data not found")
-              .setResponseCode(501);
-        }
-
-        if (recordedRequest.getRequestLine().contains("op=GETFILESTATUS")) {
-          return new MockResponse().setResponseCode(200).setBody(
-              TestADLResponseData
-                  .getGetFileStatusJSONResponse(currentRequest.data.length));
-        }
-
-        if (recordedRequest.getRequestLine().contains("op=OPEN")) {
-          String request = recordedRequest.getRequestLine();
-          int offset = 0;
-          int byteCount = 0;
-
-          Pattern pattern = Pattern.compile("offset=([0-9]+)");
-          Matcher matcher = pattern.matcher(request);
-          if (matcher.find()) {
-            LOG.debug(matcher.group(1));
-            offset = Integer.parseInt(matcher.group(1));
-          }
-
-          pattern = Pattern.compile("length=([0-9]+)");
-          matcher = pattern.matcher(request);
-          if (matcher.find()) {
-            LOG.debug(matcher.group(1));
-            byteCount = Integer.parseInt(matcher.group(1));
-          }
-
-          Buffer buf = new Buffer();
-          buf.write(currentRequest.data, offset,
-              Math.min(currentRequest.data.length - offset, byteCount));
-          return new MockResponse().setResponseCode(200)
-              .setChunkedBody(buf, 4 * 1024 * 1024);
-        }
-
-        return new MockResponse().setBody("NOT SUPPORTED").setResponseCode(501);
-      }
-    });
-  }
-
-  @Before
-  public void resetHandle() {
-    commonHandle = null;
-  }
-
-  @Test
-  public void testParallelReadOnDifferentStreams()
-      throws IOException, InterruptedException, ExecutionException {
-
-    ArrayList<CreateTestData> createTestData = new ArrayList<CreateTestData>();
-
-    Random random = new Random();
-
-    for (int i = 0; i < concurrencyLevel; i++) {
-      CreateTestData testData = new CreateTestData();
-      testData
-          .set(new Path("/test/concurrentRead/" + UUID.randomUUID().toString()),
-              getRandomByteArrayData(random.nextInt(1 * 1024 * 1024)));
-      createTestData.add(testData);
-    }
-
-    setDispatcher(createTestData);
-
-    ArrayList<ReadTestData> readTestData = new ArrayList<ReadTestData>();
-    for (CreateTestData local : createTestData) {
-      ReadTestData localReadData = new ReadTestData();
-      localReadData.set(local.path, local.data, 0);
-      readTestData.add(localReadData);
-    }
-
-    runReadTest(readTestData, false);
-  }
-
-  @Test
-  public void testParallelReadOnSameStreams()
-      throws IOException, InterruptedException, ExecutionException {
-    ArrayList<CreateTestData> createTestData = new ArrayList<CreateTestData>();
-
-    Random random = new Random();
-
-    for (int i = 0; i < 1; i++) {
-      CreateTestData testData = new CreateTestData();
-      testData
-          .set(new Path("/test/concurrentRead/" + UUID.randomUUID().toString()),
-              getRandomByteArrayData(1024 * 1024));
-      createTestData.add(testData);
-    }
-
-    setDispatcher(createTestData);
-
-    ArrayList<ReadTestData> readTestData = new ArrayList<ReadTestData>();
-    ByteArrayInputStream buffered = new ByteArrayInputStream(
-        createTestData.get(0).data);
-
-    ReadTestData readInitially = new ReadTestData();
-    byte[] initialData = new byte[1024 * 1024];
-    buffered.read(initialData);
-
-    readInitially.set(createTestData.get(0).path, initialData, 0);
-    readTestData.add(readInitially);
-    runReadTest(readTestData, false);
-
-    readTestData.clear();
-
-    for (int i = 0; i < concurrencyLevel * 5; i++) {
-      ReadTestData localReadData = new ReadTestData();
-      int offset = random.nextInt((1024 * 1024) - 1);
-      int length = 1024 * 1024 - offset;
-      byte[] expectedData = new byte[length];
-      buffered.reset();
-      buffered.skip(offset);
-      buffered.read(expectedData);
-      localReadData.set(createTestData.get(0).path, expectedData, offset);
-      readTestData.add(localReadData);
-    }
-
-    runReadTest(readTestData, true);
-  }
-
-  void runReadTest(ArrayList<ReadTestData> testData, boolean useSameStream)
-      throws InterruptedException, ExecutionException {
-
-    ExecutorService executor = Executors.newFixedThreadPool(testData.size());
-    Future[] subtasks = new Future[testData.size()];
-
-    for (int i = 0; i < testData.size(); i++) {
-      subtasks[i] = executor.submit(
-          new ReadConcurrentRunnable(testData.get(i).data, testData.get(i).path,
-              testData.get(i).offset, useSameStream));
-    }
-
-    executor.shutdown();
-
-    // wait until all tasks are finished
-    executor.awaitTermination(120, TimeUnit.SECONDS);
-
-    for (int i = 0; i < testData.size(); ++i) {
-      Assert.assertTrue((Boolean) subtasks[i].get());
-    }
-  }
-
-  class ReadTestData {
-    private Path path;
-    private byte[] data;
-    private int offset;
-
-    public void set(Path filePath, byte[] dataToBeRead, int fromOffset) {
-      this.path = filePath;
-      this.data = dataToBeRead;
-      this.offset = fromOffset;
-    }
-  }
-
-  class CreateTestData {
-    private Path path;
-    private byte[] data;
-
-    public void set(Path filePath, byte[] dataToBeWritten) {
-      this.path = filePath;
-      this.data = dataToBeWritten;
-    }
-  }
-
-  class ReadConcurrentRunnable implements Callable<Boolean> {
-    private Path path;
-    private int offset;
-    private byte[] expectedData;
-    private boolean useSameStream;
-
-    public ReadConcurrentRunnable(byte[] expectedData, Path path, int offset,
-        boolean useSameStream) {
-      this.path = path;
-      this.offset = offset;
-      this.expectedData = expectedData;
-      this.useSameStream = useSameStream;
-    }
-
-    public Boolean call() throws IOException {
-      try {
-        FSDataInputStream in;
-        if (useSameStream) {
-          synchronized (LOCK) {
-            if (commonHandle == null) {
-              commonHandle = getMockAdlFileSystem().open(path);
-            }
-            in = commonHandle;
-          }
-        } else {
-          in = getMockAdlFileSystem().open(path);
-        }
-
-        byte[] actualData = new byte[expectedData.length];
-        in.readFully(offset, actualData);
-        Assert.assertArrayEquals("Path :" + path.toString() + " did not match.",
-            expectedData, actualData);
-        if (!useSameStream) {
-          in.close();
-        }
-      } catch (IOException e) {
-        e.printStackTrace();
-        return false;
-      }
-      return true;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cf88fcd/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestCustomTokenProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestCustomTokenProvider.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestCustomTokenProvider.java
deleted file mode 100644
index 737534c..0000000
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestCustomTokenProvider.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.adl;
-
-import com.squareup.okhttp.mockwebserver.MockResponse;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.adl.common.CustomMockTokenProvider;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.Arrays;
-import java.util.Collection;
-
-import static org.apache.hadoop.fs.adl.AdlConfKeys.ADL_BLOCK_SIZE;
-import static org.apache.hadoop.fs.adl.AdlConfKeys
-    .AZURE_AD_TOKEN_PROVIDER_CLASS_KEY;
-import static org.apache.hadoop.fs.adl.AdlConfKeys
-    .AZURE_AD_TOKEN_PROVIDER_TYPE_KEY;
-
-/**
- * Test access token provider behaviour with custom token provider and for token
- * provider cache is enabled.
- */
-@RunWith(Parameterized.class)
-public class TestCustomTokenProvider extends AdlMockWebServer {
-  private static final long TEN_MINUTES_IN_MILIS = 600000;
-  private int backendCallCount;
-  private int expectedCallbackToAccessToken;
-  private TestableAdlFileSystem[] fileSystems;
-  private Class typeOfTokenProviderClass;
-  private long expiryFromNow;
-  private int fsObjectCount;
-
-  public TestCustomTokenProvider(Class typeOfTokenProviderClass,
-      long expiryFromNow, int fsObjectCount, int backendCallCount,
-      int expectedCallbackToAccessToken)
-      throws IllegalAccessException, InstantiationException, URISyntaxException,
-      IOException {
-    this.typeOfTokenProviderClass = typeOfTokenProviderClass;
-    this.expiryFromNow = expiryFromNow;
-    this.fsObjectCount = fsObjectCount;
-    this.backendCallCount = backendCallCount;
-    this.expectedCallbackToAccessToken = expectedCallbackToAccessToken;
-  }
-
-  @Parameterized.Parameters(name = "{index}")
-  public static Collection testDataForTokenProvider() {
-    return Arrays.asList(new Object[][] {
-        // Data set in order
-        // INPUT - CustomTokenProvider class to load
-        // INPUT - expiry time in milis. Subtract from current time
-        // INPUT - No. of FileSystem object
-        // INPUT - No. of backend calls per FileSystem object
-        // EXPECTED - Number of callbacks to get token after test finished.
-        {CustomMockTokenProvider.class, 0, 1, 1, 1},
-        {CustomMockTokenProvider.class, TEN_MINUTES_IN_MILIS, 1, 1, 1},
-        {CustomMockTokenProvider.class, TEN_MINUTES_IN_MILIS, 2, 1, 2},
-        {CustomMockTokenProvider.class, TEN_MINUTES_IN_MILIS, 10, 10, 10}});
-  }
-
-  /**
-   * Explicitly invoked init so that base class mock server is setup before
-   * test data initialization is done.
-   *
-   * @throws IOException
-   * @throws URISyntaxException
-   */
-  public void init() throws IOException, URISyntaxException {
-    Configuration configuration = new Configuration();
-    configuration.setEnum(AZURE_AD_TOKEN_PROVIDER_TYPE_KEY,
-        TokenProviderType.Custom);
-    configuration.set(AZURE_AD_TOKEN_PROVIDER_CLASS_KEY,
-        typeOfTokenProviderClass.getName());
-    fileSystems = new TestableAdlFileSystem[fsObjectCount];
-    URI uri = new URI("adl://localhost:" + getPort());
-
-    for (int i = 0; i < fsObjectCount; ++i) {
-      fileSystems[i] = new TestableAdlFileSystem();
-      fileSystems[i].initialize(uri, configuration);
-
-      ((CustomMockTokenProvider) fileSystems[i].getAzureTokenProvider())
-          .setExpiryTimeInMillisAfter(expiryFromNow);
-    }
-  }
-
-  @Test
-  public void testCustomTokenManagement()
-      throws IOException, URISyntaxException {
-    int accessTokenCallbackDuringExec = 0;
-    init();
-    for (TestableAdlFileSystem tfs : fileSystems) {
-      for (int i = 0; i < backendCallCount; ++i) {
-        getMockServer().enqueue(new MockResponse().setResponseCode(200)
-            .setBody(TestADLResponseData.getGetFileStatusJSONResponse()));
-        FileStatus fileStatus = tfs.getFileStatus(new Path("/test1/test2"));
-        Assert.assertTrue(fileStatus.isFile());
-        Assert.assertEquals("adl://" + getMockServer().getHostName() + ":" +
-            getMockServer().getPort() + "/test1/test2",
-            fileStatus.getPath().toString());
-        Assert.assertEquals(4194304, fileStatus.getLen());
-        Assert.assertEquals(ADL_BLOCK_SIZE, fileStatus.getBlockSize());
-        Assert.assertEquals(1, fileStatus.getReplication());
-        Assert
-            .assertEquals(new FsPermission("777"), fileStatus.getPermission());
-        Assert.assertEquals("NotSupportYet", fileStatus.getOwner());
-        Assert.assertEquals("NotSupportYet", fileStatus.getGroup());
-      }
-
-      accessTokenCallbackDuringExec += ((CustomMockTokenProvider) tfs
-          .getAzureTokenProvider()).getAccessTokenRequestCount();
-    }
-
-    Assert.assertEquals(expectedCallbackToAccessToken,
-        accessTokenCallbackDuringExec);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cf88fcd/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestGetFileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestGetFileStatus.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestGetFileStatus.java
deleted file mode 100644
index 95c2363..0000000
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestGetFileStatus.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.fs.adl;
-
-import com.squareup.okhttp.mockwebserver.MockResponse;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.URISyntaxException;
-
-import static org.apache.hadoop.fs.adl.AdlConfKeys.ADL_BLOCK_SIZE;
-
-/**
- * This class is responsible for testing local getFileStatus implementation
- * to cover correct parsing of successful and error JSON response
- * from the server.
- * Adls GetFileStatus operation is in detail covered in
- * org.apache.hadoop.fs.adl.live testing package.
- */
-public class TestGetFileStatus extends AdlMockWebServer {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestGetFileStatus.class);
-
-  @Test
-  public void getFileStatusReturnsAsExpected()
-      throws URISyntaxException, IOException {
-    getMockServer().enqueue(new MockResponse().setResponseCode(200)
-        .setBody(TestADLResponseData.getGetFileStatusJSONResponse()));
-    long startTime = Time.monotonicNow();
-    Path path = new Path("/test1/test2");
-    FileStatus fileStatus = getMockAdlFileSystem().getFileStatus(path);
-    long endTime = Time.monotonicNow();
-    LOG.debug("Time : " + (endTime - startTime));
-    Assert.assertTrue(fileStatus.isFile());
-    Assert.assertEquals("adl://" + getMockServer().getHostName() + ":" +
-        getMockServer().getPort() + "/test1/test2",
-        fileStatus.getPath().toString());
-    Assert.assertEquals(4194304, fileStatus.getLen());
-    Assert.assertEquals(ADL_BLOCK_SIZE, fileStatus.getBlockSize());
-    Assert.assertEquals(1, fileStatus.getReplication());
-    Assert.assertEquals(new FsPermission("777"), fileStatus.getPermission());
-    Assert.assertEquals("NotSupportYet", fileStatus.getOwner());
-    Assert.assertEquals("NotSupportYet", fileStatus.getGroup());
-    Assert.assertTrue(path + " should have Acl!", fileStatus.hasAcl());
-    Assert.assertFalse(path + " should not be encrypted!",
-        fileStatus.isEncrypted());
-    Assert.assertFalse(path + " should not be erasure coded!",
-        fileStatus.isErasureCoded());
-  }
-
-  @Test
-  public void getFileStatusAclBit() throws URISyntaxException, IOException {
-    // With ACLBIT set to true
-    getMockServer().enqueue(new MockResponse().setResponseCode(200)
-            .setBody(TestADLResponseData.getGetFileStatusJSONResponse(true)));
-    long startTime = Time.monotonicNow();
-    FileStatus fileStatus = getMockAdlFileSystem()
-            .getFileStatus(new Path("/test1/test2"));
-    long endTime = Time.monotonicNow();
-    LOG.debug("Time : " + (endTime - startTime));
-    Assert.assertTrue(fileStatus.isFile());
-    Assert.assertTrue(fileStatus.hasAcl());
-    Assert.assertTrue(fileStatus.getPermission().getAclBit());
-
-    // With ACLBIT set to false
-    getMockServer().enqueue(new MockResponse().setResponseCode(200)
-            .setBody(TestADLResponseData.getGetFileStatusJSONResponse(false)));
-    startTime = Time.monotonicNow();
-    fileStatus = getMockAdlFileSystem()
-            .getFileStatus(new Path("/test1/test2"));
-    endTime = Time.monotonicNow();
-    LOG.debug("Time : " + (endTime - startTime));
-    Assert.assertTrue(fileStatus.isFile());
-    Assert.assertFalse(fileStatus.hasAcl());
-    Assert.assertFalse(fileStatus.getPermission().getAclBit());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cf88fcd/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestListStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestListStatus.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestListStatus.java
deleted file mode 100644
index db32476..0000000
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestListStatus.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.fs.adl;
-
-import com.squareup.okhttp.mockwebserver.MockResponse;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.net.URISyntaxException;
-
-/**
- * This class is responsible for testing local listStatus implementation to
- * cover correct parsing of successful and error JSON response from the server.
- * Adls ListStatus functionality is in detail covered in
- * org.apache.hadoop.fs.adl.live testing package.
- */
-public class TestListStatus extends AdlMockWebServer {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(TestListStatus.class);
-
-  @Test
-  public void listStatusReturnsAsExpected() throws IOException {
-    getMockServer().enqueue(new MockResponse().setResponseCode(200)
-        .setBody(TestADLResponseData.getListFileStatusJSONResponse(10)));
-    long startTime = Time.monotonicNow();
-    FileStatus[] ls = getMockAdlFileSystem()
-        .listStatus(new Path("/test1/test2"));
-    long endTime = Time.monotonicNow();
-    LOG.debug("Time : " + (endTime - startTime));
-    Assert.assertEquals(10, ls.length);
-
-    getMockServer().enqueue(new MockResponse().setResponseCode(200)
-        .setBody(TestADLResponseData.getListFileStatusJSONResponse(200)));
-    startTime = Time.monotonicNow();
-    ls = getMockAdlFileSystem().listStatus(new Path("/test1/test2"));
-    endTime = Time.monotonicNow();
-    LOG.debug("Time : " + (endTime - startTime));
-    Assert.assertEquals(200, ls.length);
-
-    getMockServer().enqueue(new MockResponse().setResponseCode(200)
-        .setBody(TestADLResponseData.getListFileStatusJSONResponse(2048)));
-    startTime = Time.monotonicNow();
-    ls = getMockAdlFileSystem().listStatus(new Path("/test1/test2"));
-    endTime = Time.monotonicNow();
-    LOG.debug("Time : " + (endTime - startTime));
-    Assert.assertEquals(2048, ls.length);
-  }
-
-  @Test
-  public void listStatusOnFailure() throws IOException {
-    getMockServer().enqueue(new MockResponse().setResponseCode(403).setBody(
-        TestADLResponseData.getErrorIllegalArgumentExceptionJSONResponse()));
-    FileStatus[] ls = null;
-    long startTime = Time.monotonicNow();
-    try {
-      ls = getMockAdlFileSystem().listStatus(new Path("/test1/test2"));
-    } catch (IOException e) {
-      Assert.assertTrue(e.getMessage().contains("Invalid"));
-    }
-    long endTime = Time.monotonicNow();
-    LOG.debug("Time : " + (endTime - startTime));
-
-    // SDK may increase number of retry attempts before error is propagated
-    // to caller. Adding max 10 error responses in the queue to align with SDK.
-    for (int i = 0; i < 10; ++i) {
-      getMockServer().enqueue(new MockResponse().setResponseCode(500).setBody(
-          TestADLResponseData.getErrorInternalServerExceptionJSONResponse()));
-    }
-
-    startTime = Time.monotonicNow();
-    try {
-      ls = getMockAdlFileSystem().listStatus(new Path("/test1/test2"));
-    } catch (IOException e) {
-      Assert.assertTrue(e.getMessage().contains("Internal Server Error"));
-    }
-    endTime = Time.monotonicNow();
-    LOG.debug("Time : " + (endTime - startTime));
-  }
-
-  @Test
-  public void listStatusAcl()
-          throws URISyntaxException, IOException {
-    // With ACLBIT set to true
-    getMockServer().enqueue(new MockResponse().setResponseCode(200)
-            .setBody(TestADLResponseData.getListFileStatusJSONResponse(true)));
-    FileStatus[] ls = null;
-    long startTime = Time.monotonicNow();
-    ls = getMockAdlFileSystem()
-            .listStatus(new Path("/test1/test2"));
-    long endTime = Time.monotonicNow();
-    LOG.debug("Time : " + (endTime - startTime));
-    for (int i = 0; i < ls.length; i++) {
-      Assert.assertTrue(ls[i].isDirectory());
-      Assert.assertTrue(ls[i].hasAcl());
-      Assert.assertTrue(ls[i].getPermission().getAclBit());
-    }
-
-    // With ACLBIT set to false
-    ls = null;
-    getMockServer().enqueue(new MockResponse().setResponseCode(200)
-            .setBody(TestADLResponseData.getListFileStatusJSONResponse(false)));
-    startTime = Time.monotonicNow();
-    ls = getMockAdlFileSystem()
-            .listStatus(new Path("/test1/test2"));
-    endTime = Time.monotonicNow();
-    LOG.debug("Time : " + (endTime - startTime));
-    for (int i = 0; i < ls.length; i++) {
-      Assert.assertTrue(ls[i].isDirectory());
-      Assert.assertFalse(ls[i].hasAcl());
-      Assert.assertFalse(ls[i].getPermission().getAclBit());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cf88fcd/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestableAdlFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestableAdlFileSystem.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestableAdlFileSystem.java
deleted file mode 100644
index 4acb39b..0000000
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestableAdlFileSystem.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.fs.adl;
-
-/**
- * Mock adl file storage subclass to mock adl storage on local http service.
- */
-public class TestableAdlFileSystem extends AdlFileSystem {
-  @Override
-  protected String getTransportScheme() {
-    return "http";
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cf88fcd/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/common/ExpectedResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/common/ExpectedResponse.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/common/ExpectedResponse.java
deleted file mode 100644
index dc8577d..0000000
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/common/ExpectedResponse.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.fs.adl.common;
-
-import com.squareup.okhttp.mockwebserver.MockResponse;
-
-import java.util.ArrayList;
-
-/**
- * Supporting class to hold expected MockResponse object along with parameters
- * for validation in test methods.
- */
-public class ExpectedResponse {
-  private MockResponse response;
-  private ArrayList<String> expectedQueryParameters = new ArrayList<String>();
-  private int expectedBodySize;
-  private String httpRequestType;
-
-  public int getExpectedBodySize() {
-    return expectedBodySize;
-  }
-
-  public String getHttpRequestType() {
-    return httpRequestType;
-  }
-
-  public ArrayList<String> getExpectedQueryParameters() {
-    return expectedQueryParameters;
-  }
-
-  public MockResponse getResponse() {
-    return response;
-  }
-
-  ExpectedResponse set(MockResponse mockResponse) {
-    this.response = mockResponse;
-    return this;
-  }
-
-  ExpectedResponse addExpectedQueryParam(String param) {
-    expectedQueryParameters.add(param);
-    return this;
-  }
-
-  ExpectedResponse addExpectedBodySize(int bodySize) {
-    this.expectedBodySize = bodySize;
-    return this;
-  }
-
-  ExpectedResponse addExpectedHttpRequestType(String expectedHttpRequestType) {
-    this.httpRequestType = expectedHttpRequestType;
-    return this;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cf88fcd/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/common/TestDataForRead.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/common/TestDataForRead.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/common/TestDataForRead.java
deleted file mode 100644
index 509b3f0..0000000
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/common/TestDataForRead.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.fs.adl.common;
-
-import com.squareup.okhttp.mockwebserver.Dispatcher;
-import com.squareup.okhttp.mockwebserver.MockResponse;
-import com.squareup.okhttp.mockwebserver.RecordedRequest;
-import okio.Buffer;
-import org.apache.hadoop.fs.adl.TestADLResponseData;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * Supporting class for mock test to validate Adls read operation.
- */
-public class TestDataForRead {
-  private static final Logger LOG = LoggerFactory
-      .getLogger(TestDataForRead.class);
-
-  private byte[] actualData;
-  private ArrayList<ExpectedResponse> responses;
-  private Dispatcher dispatcher;
-  private int intensityOfTest;
-  private boolean checkOfNoOfCalls;
-  private int expectedNoNetworkCall;
-
-  public TestDataForRead(final byte[] actualData, int expectedNoNetworkCall,
-      int intensityOfTest, boolean checkOfNoOfCalls) {
-
-    this.checkOfNoOfCalls = checkOfNoOfCalls;
-    this.actualData = actualData;
-    responses = new ArrayList<ExpectedResponse>();
-    this.expectedNoNetworkCall = expectedNoNetworkCall;
-    this.intensityOfTest = intensityOfTest;
-
-    dispatcher = new Dispatcher() {
-      @Override
-      public MockResponse dispatch(RecordedRequest recordedRequest)
-          throws InterruptedException {
-
-        if (recordedRequest.getRequestLine().contains("op=GETFILESTATUS")) {
-          return new MockResponse().setResponseCode(200).setBody(
-              TestADLResponseData
-                  .getGetFileStatusJSONResponse(actualData.length));
-        }
-
-        if (recordedRequest.getRequestLine().contains("op=OPEN")) {
-          String request = recordedRequest.getRequestLine();
-          int offset = 0;
-          int byteCount = 0;
-
-          Pattern pattern = Pattern.compile("offset=([0-9]+)");
-          Matcher matcher = pattern.matcher(request);
-          if (matcher.find()) {
-            LOG.debug(matcher.group(1));
-            offset = Integer.parseInt(matcher.group(1));
-          }
-
-          pattern = Pattern.compile("length=([0-9]+)");
-          matcher = pattern.matcher(request);
-          if (matcher.find()) {
-            LOG.debug(matcher.group(1));
-            byteCount = Integer.parseInt(matcher.group(1));
-          }
-
-          Buffer buf = new Buffer();
-          buf.write(actualData, offset,
-              Math.min(actualData.length - offset, byteCount));
-          return new MockResponse().setResponseCode(200)
-              .setChunkedBody(buf, 4 * 1024 * 1024);
-        }
-
-        return new MockResponse().setBody("NOT SUPPORTED").setResponseCode(501);
-      }
-    };
-  }
-
-  public boolean isCheckOfNoOfCalls() {
-    return checkOfNoOfCalls;
-  }
-
-  public int getExpectedNoNetworkCall() {
-    return expectedNoNetworkCall;
-  }
-
-  public int getIntensityOfTest() {
-    return intensityOfTest;
-  }
-
-  public byte[] getActualData() {
-    return actualData;
-  }
-
-  public ArrayList<ExpectedResponse> getResponses() {
-    return responses;
-  }
-
-  public Dispatcher getDispatcher() {
-    return dispatcher;
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: Preparing for 3.2.0 development

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index 1fafe77..a0e530a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-ui</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN UI</name>
   <packaging>${packagingType}</packaging>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml
index e4e611b..6110476 100644
--- a/hadoop-yarn-project/hadoop-yarn/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/pom.xml
@@ -16,11 +16,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-yarn</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>pom</packaging>
   <name>Apache Hadoop YARN</name>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml
index 3ef9c45..4593441 100644
--- a/hadoop-yarn-project/pom.xml
+++ b/hadoop-yarn-project/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-yarn-project</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>pom</packaging>
   <name>Apache Hadoop YARN Project</name>
   <url>http://hadoop.apache.org/yarn/</url>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index d776678..a51c42a 100644
--- a/pom.xml
+++ b/pom.xml
@@ -18,7 +18,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-main</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Main</description>
   <name>Apache Hadoop Main</name>
   <packaging>pom</packaging>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: YARN-5428. Allow for specifying the docker client configuration directory. Contributed by Shane Kumpf

Posted by ae...@apache.org.
YARN-5428. Allow for specifying the docker client configuration directory. Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb2449d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb2449d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb2449d5

Branch: refs/heads/HDFS-7240
Commit: eb2449d5398e9ac869bc088e10d838a7f13deac0
Parents: 996796f
Author: Jian He <ji...@apache.org>
Authored: Wed Feb 7 10:59:38 2018 -0800
Committer: Jian He <ji...@apache.org>
Committed: Thu Feb 8 11:35:30 2018 -0800

----------------------------------------------------------------------
 .../applications/distributedshell/Client.java   |  38 +++-
 .../DockerCredentialTokenIdentifier.java        | 159 ++++++++++++++++
 .../yarn/util/DockerClientConfigHandler.java    | 183 +++++++++++++++++++
 .../src/main/proto/yarn_security_token.proto    |   5 +
 ...apache.hadoop.security.token.TokenIdentifier |   1 +
 .../security/TestDockerClientConfigHandler.java | 129 +++++++++++++
 .../runtime/DockerLinuxContainerRuntime.java    |  39 ++++
 .../linux/runtime/docker/DockerCommand.java     |  16 ++
 .../runtime/TestDockerContainerRuntime.java     | 109 +++++++++++
 .../runtime/docker/TestDockerRunCommand.java    |   8 +
 .../src/site/markdown/DockerContainers.md       |  13 +-
 11 files changed, 690 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb2449d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
index 2aafa94..0aef83f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
@@ -87,6 +87,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YARNFeatureNotEnabledException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.util.DockerClientConfigHandler;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -225,6 +226,9 @@ public class Client {
   private String flowVersion = null;
   private long flowRunId = 0L;
 
+  // Docker client configuration
+  private String dockerClientConfig = null;
+
   // Command line options
   private Options opts;
 
@@ -368,6 +372,10 @@ public class Client {
         "If container could retry, it specifies max retires");
     opts.addOption("container_retry_interval", true,
         "Interval between each retry, unit is milliseconds");
+    opts.addOption("docker_client_config", true,
+        "The docker client configuration path. The scheme should be supplied"
+            + " (i.e. file:// or hdfs://)."
+            + " Only used when the Docker runtime is enabled and requested.");
     opts.addOption("placement_spec", true,
         "Placement specification. Please note, if this option is specified,"
             + " The \"num_containers\" option will be ignored. All requested"
@@ -585,6 +593,9 @@ public class Client {
             "Flow run is not a valid long value", e);
       }
     }
+    if (cliParser.hasOption("docker_client_config")) {
+      dockerClientConfig = cliParser.getOptionValue("docker_client_config");
+    }
     return true;
   }
 
@@ -884,9 +895,10 @@ public class Client {
     // amContainer.setServiceData(serviceData);
 
     // Setup security tokens
+    Credentials rmCredentials = null;
     if (UserGroupInformation.isSecurityEnabled()) {
       // Note: Credentials class is marked as LimitedPrivate for HDFS and MapReduce
-      Credentials credentials = new Credentials();
+      rmCredentials = new Credentials();
       String tokenRenewer = YarnClientUtils.getRmPrincipal(conf);
       if (tokenRenewer == null || tokenRenewer.length() == 0) {
         throw new IOException(
@@ -895,16 +907,32 @@ public class Client {
 
       // For now, only getting tokens for the default file-system.
       final Token<?> tokens[] =
-          fs.addDelegationTokens(tokenRenewer, credentials);
+          fs.addDelegationTokens(tokenRenewer, rmCredentials);
       if (tokens != null) {
         for (Token<?> token : tokens) {
           LOG.info("Got dt for " + fs.getUri() + "; " + token);
         }
       }
+    }
+
+    // Add the docker client config credentials if supplied.
+    Credentials dockerCredentials = null;
+    if (dockerClientConfig != null) {
+      dockerCredentials =
+          DockerClientConfigHandler.readCredentialsFromConfigFile(
+              new Path(dockerClientConfig), conf, appId.toString());
+    }
+
+    if (rmCredentials != null || dockerCredentials != null) {
       DataOutputBuffer dob = new DataOutputBuffer();
-      credentials.writeTokenStorageToStream(dob);
-      ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
-      amContainer.setTokens(fsTokens);
+      if (rmCredentials != null) {
+        rmCredentials.writeTokenStorageToStream(dob);
+      }
+      if (dockerCredentials != null) {
+        dockerCredentials.writeTokenStorageToStream(dob);
+      }
+      ByteBuffer tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
+      amContainer.setTokens(tokens);
     }
 
     appContext.setAMContainerSpec(amContainer);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb2449d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/DockerCredentialTokenIdentifier.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/DockerCredentialTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/DockerCredentialTokenIdentifier.java
new file mode 100644
index 0000000..6f4deee
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/DockerCredentialTokenIdentifier.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.security;
+
+import com.google.protobuf.TextFormat;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.DockerCredentialTokenIdentifierProto;
+import org.slf4j.LoggerFactory;
+
+import java.io.DataInput;
+import java.io.DataInputStream;
+import java.io.DataOutput;
+import java.io.IOException;
+
+/**
+ * TokenIdentifier for Docker registry credentials.
+ */
+public class DockerCredentialTokenIdentifier extends TokenIdentifier {
+
+  private static final org.slf4j.Logger LOG =
+      LoggerFactory.getLogger(DockerCredentialTokenIdentifier.class);
+
+  private DockerCredentialTokenIdentifierProto proto;
+  public static final Text KIND = new Text("DOCKER_CLIENT_CREDENTIAL_TOKEN");
+
+  public DockerCredentialTokenIdentifier(String registryUrl,
+      String applicationId) {
+    DockerCredentialTokenIdentifierProto.Builder builder =
+        DockerCredentialTokenIdentifierProto.newBuilder();
+    if (registryUrl != null) {
+      builder.setRegistryUrl(registryUrl);
+    }
+    if (applicationId != null) {
+      builder.setApplicationId(applicationId);
+    }
+    proto = builder.build();
+  }
+
+  /**
+   * Default constructor needed for the Service Loader.
+   */
+  public DockerCredentialTokenIdentifier() {
+  }
+
+  /**
+   * Write the TokenIdentifier to the output stream.
+   *
+   * @param out <code>DataOutput</code> to serialize this object into.
+   * @throws IOException if the write fails.
+   */
+  @Override
+  public void write(DataOutput out) throws IOException {
+    out.write(proto.toByteArray());
+  }
+
+  /**
+   * Populate the Proto object with the input.
+   *
+   * @param in <code>DataInput</code> to deserialize this object from.
+   * @throws IOException if the read fails.
+   */
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    proto = DockerCredentialTokenIdentifierProto.parseFrom((DataInputStream)in);
+  }
+
+  /**
+   * Return the ProtoBuf formatted data.
+   *
+   * @return the ProtoBuf representation of the data.
+   */
+  public DockerCredentialTokenIdentifierProto getProto() {
+    return proto;
+  }
+
+  /**
+   * Return the TokenIdentifier kind.
+   *
+   * @return the TokenIdentifier kind.
+   */
+  @Override
+  public Text getKind() {
+    return KIND;
+  }
+
+  /**
+   * Return a remote user based on the registry URL and Application ID.
+   *
+   * @return a remote user based on the registry URL and Application ID.
+   */
+  @Override
+  public UserGroupInformation getUser() {
+    return UserGroupInformation.createRemoteUser(
+        getRegistryUrl() + "-" + getApplicationId());
+  }
+
+  /**
+   * Get the registry URL.
+   *
+   * @return the registry URL.
+   */
+  public String getRegistryUrl() {
+    String registryUrl = null;
+    if (proto.hasRegistryUrl()) {
+      registryUrl = proto.getRegistryUrl();
+    }
+    return registryUrl;
+  }
+
+  /**
+   * Get the application ID.
+   *
+   * @return the application ID.
+   */
+  public String getApplicationId() {
+    String applicationId = null;
+    if (proto.hasApplicationId()) {
+      applicationId = proto.getApplicationId();
+    }
+    return applicationId;
+  }
+
+  @Override
+  public int hashCode() {
+    return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null) {
+      return false;
+    }
+    if (other.getClass().isAssignableFrom(this.getClass())) {
+      return this.getProto().equals(this.getClass().cast(other).getProto());
+    }
+    return false;
+  }
+
+  @Override
+  public String toString() {
+    return TextFormat.shortDebugString(getProto());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb2449d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
new file mode 100644
index 0000000..98bdbdd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.util;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.io.DataInputByteBuffer;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.security.DockerCredentialTokenIdentifier;
+import org.codehaus.jackson.JsonFactory;
+import org.codehaus.jackson.JsonNode;
+import org.codehaus.jackson.JsonParser;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.node.ObjectNode;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
+import java.util.Iterator;
+
+/**
+ * Commonly needed actions for handling the Docker client configurations.
+ *
+ * Credentials that are used to access private Docker registries are supplied.
+ * Actions include:
+ * <ul>
+ *   <li>Read the Docker client configuration json file from a
+ *   {@link FileSystem}.</li>
+ *   <li>Extract the authentication information from the configuration into
+ *   {@link Token} and {@link Credentials} objects.</li>
+ *   <li>Tokens are commonly shipped via the
+ *   {@link org.apache.hadoop.yarn.api.records.ContainerLaunchContext} as a
+ *   {@link ByteBuffer}, extract the {@link Credentials}.</li>
+ *   <li>Write the Docker client configuration json back to the local filesystem
+ *   to be used by the Docker command line.</li>
+ * </ul>
+ */
+public final class DockerClientConfigHandler {
+  private static final org.slf4j.Logger LOG =
+      LoggerFactory.getLogger(DockerClientConfigHandler.class);
+
+  private static final String CONFIG_AUTHS_KEY = "auths";
+  private static final String CONFIG_AUTH_KEY = "auth";
+
+  private DockerClientConfigHandler() { }
+
+  /**
+   * Read the Docker client configuration and extract the auth tokens into
+   * Credentials.
+   *
+   * @param configFile the Path to the Docker client configuration.
+   * @param conf the Configuration object, needed by the FileSystem.
+   * @param applicationId the application ID to associate the Credentials with.
+   * @return the populated Credential object with the Docker Tokens.
+   * @throws IOException if the file can not be read.
+   */
+  public static Credentials readCredentialsFromConfigFile(Path configFile,
+      Configuration conf, String applicationId) throws IOException {
+    // Read the config file
+    String contents = null;
+    configFile = new Path(configFile.toUri());
+    FileSystem fs = configFile.getFileSystem(conf);
+    if (fs != null) {
+      FSDataInputStream fileHandle = fs.open(configFile);
+      if (fileHandle != null) {
+        contents = IOUtils.toString(fileHandle);
+      }
+    }
+    if (contents == null) {
+      throw new IOException("Failed to read Docker client configuration: "
+          + configFile);
+    }
+
+    // Parse the JSON and create the Tokens/Credentials.
+    ObjectMapper mapper = new ObjectMapper();
+    JsonFactory factory = mapper.getJsonFactory();
+    JsonParser parser = factory.createJsonParser(contents);
+    JsonNode rootNode = mapper.readTree(parser);
+
+    Credentials credentials = new Credentials();
+    if (rootNode.has(CONFIG_AUTHS_KEY)) {
+      Iterator<String> iter = rootNode.get(CONFIG_AUTHS_KEY).getFieldNames();
+      for (; iter.hasNext();) {
+        String registryUrl = iter.next();
+        String registryCred = rootNode.get(CONFIG_AUTHS_KEY)
+            .get(registryUrl)
+            .get(CONFIG_AUTH_KEY)
+            .asText();
+        TokenIdentifier tokenId =
+            new DockerCredentialTokenIdentifier(registryUrl, applicationId);
+        Token<DockerCredentialTokenIdentifier> token =
+            new Token<>(tokenId.getBytes(),
+                registryCred.getBytes(Charset.forName("UTF-8")),
+                tokenId.getKind(), new Text(registryUrl));
+        credentials.addToken(
+            new Text(registryUrl + "-" + applicationId), token);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Added token: " + token.toString());
+        }
+      }
+    }
+    return credentials;
+  }
+
+  /**
+   * Convert the Token ByteBuffer to the appropriate Credentials object.
+   *
+   * @param tokens the Tokens from the ContainerLaunchContext.
+   * @return the Credentials object populated from the Tokens.
+   */
+  public static Credentials getCredentialsFromTokensByteBuffer(
+      ByteBuffer tokens) throws IOException {
+    Credentials credentials = new Credentials();
+    DataInputByteBuffer dibb = new DataInputByteBuffer();
+    tokens.rewind();
+    dibb.reset(tokens);
+    credentials.readTokenStorageStream(dibb);
+    tokens.rewind();
+    if (LOG.isDebugEnabled()) {
+      for (Token token : credentials.getAllTokens()) {
+        LOG.debug("Added token: " + token.toString());
+      }
+    }
+    return credentials;
+  }
+
+  /**
+   * Extract the Docker related tokens from the Credentials and write the Docker
+   * client configuration to the supplied File.
+   *
+   * @param outConfigFile the File to write the Docker client configuration to.
+   * @param credentials the populated Credentials object.
+   * @throws IOException if the write fails.
+   */
+  public static void writeDockerCredentialsToPath(File outConfigFile,
+      Credentials credentials) throws IOException {
+    ObjectMapper mapper = new ObjectMapper();
+    ObjectNode rootNode = mapper.createObjectNode();
+    ObjectNode registryUrlNode = mapper.createObjectNode();
+    if (credentials.numberOfTokens() > 0) {
+      for (Token<? extends TokenIdentifier> tk : credentials.getAllTokens()) {
+        if (tk.getKind().equals(DockerCredentialTokenIdentifier.KIND)) {
+          DockerCredentialTokenIdentifier ti =
+              (DockerCredentialTokenIdentifier) tk.decodeIdentifier();
+          ObjectNode registryCredNode = mapper.createObjectNode();
+          registryUrlNode.put(ti.getRegistryUrl(), registryCredNode);
+          registryCredNode.put(CONFIG_AUTH_KEY,
+              new String(tk.getPassword(), Charset.forName("UTF-8")));
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Prepared token for write: " + tk.toString());
+          }
+        }
+      }
+    }
+    rootNode.put(CONFIG_AUTHS_KEY, registryUrlNode);
+    String json =
+        mapper.writerWithDefaultPrettyPrinter().writeValueAsString(rootNode);
+    FileUtils.writeStringToFile(outConfigFile, json, Charset.defaultCharset());
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb2449d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarn_security_token.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarn_security_token.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarn_security_token.proto
index 9aabd48..16e11aa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarn_security_token.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/proto/yarn_security_token.proto
@@ -72,3 +72,8 @@ message YARNDelegationTokenIdentifierProto {
   optional int32 masterKeyId = 7;
 }
 
+message DockerCredentialTokenIdentifierProto {
+  optional string registryUrl = 1;
+  optional string applicationId = 2;
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb2449d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
index a4ad548..a8eaa52 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
@@ -17,3 +17,4 @@ org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier
 org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier
 org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier
 org.apache.hadoop.yarn.security.NMTokenIdentifier
+org.apache.hadoop.yarn.security.DockerCredentialTokenIdentifier

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb2449d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestDockerClientConfigHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestDockerClientConfigHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestDockerClientConfigHandler.java
new file mode 100644
index 0000000..c4cbe45
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestDockerClientConfigHandler.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.security;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.util.DockerClientConfigHandler;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileWriter;
+import java.nio.ByteBuffer;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test the functionality of the DockerClientConfigHandler.
+ */
+public class TestDockerClientConfigHandler {
+
+  public static final String JSON = "{\"auths\": "
+      + "{\"https://index.docker.io/v1/\": "
+      + "{\"auth\": \"foobarbaz\"},"
+      + "\"registry.example.com\": "
+      + "{\"auth\": \"bazbarfoo\"}}}";
+  private static final String APPLICATION_ID = "application_2313_2131341";
+
+  private File file;
+  private Configuration conf = new Configuration();
+
+  @Before
+  public void setUp() throws Exception {
+    file = File.createTempFile("docker-client-config", "test");
+    file.deleteOnExit();
+    BufferedWriter bw = new BufferedWriter(new FileWriter(file));
+    bw.write(JSON);
+    bw.close();
+  }
+
+  @Test
+  public void testReadCredentialsFromConfigFile() throws Exception {
+    Credentials credentials =
+        DockerClientConfigHandler.readCredentialsFromConfigFile(
+            new Path(file.toURI()), conf, APPLICATION_ID);
+    Token token1 = credentials.getToken(
+        new Text("https://index.docker.io/v1/-" + APPLICATION_ID));
+    assertEquals(DockerCredentialTokenIdentifier.KIND, token1.getKind());
+    assertEquals("foobarbaz", new String(token1.getPassword()));
+    DockerCredentialTokenIdentifier ti1 =
+        (DockerCredentialTokenIdentifier) token1.decodeIdentifier();
+    assertEquals("https://index.docker.io/v1/", ti1.getRegistryUrl());
+    assertEquals(APPLICATION_ID, ti1.getApplicationId());
+
+    Token token2 = credentials.getToken(
+        new Text("registry.example.com-" + APPLICATION_ID));
+    assertEquals(DockerCredentialTokenIdentifier.KIND, token2.getKind());
+    assertEquals("bazbarfoo", new String(token2.getPassword()));
+    DockerCredentialTokenIdentifier ti2 =
+        (DockerCredentialTokenIdentifier) token2.decodeIdentifier();
+    assertEquals("registry.example.com", ti2.getRegistryUrl());
+    assertEquals(APPLICATION_ID, ti2.getApplicationId());
+  }
+
+  @Test
+  public void testGetCredentialsFromTokensByteBuffer() throws Exception {
+    Credentials credentials =
+        DockerClientConfigHandler.readCredentialsFromConfigFile(
+            new Path(file.toURI()), conf, APPLICATION_ID);
+    DataOutputBuffer dob = new DataOutputBuffer();
+    credentials.writeTokenStorageToStream(dob);
+    ByteBuffer tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
+    Credentials credentialsOut =
+        DockerClientConfigHandler.getCredentialsFromTokensByteBuffer(tokens);
+    assertEquals(credentials.numberOfTokens(), credentialsOut.numberOfTokens());
+    for (Token<? extends TokenIdentifier> tkIn : credentials.getAllTokens()) {
+      DockerCredentialTokenIdentifier ti =
+          (DockerCredentialTokenIdentifier) tkIn.decodeIdentifier();
+      Token tkOut = credentialsOut.getToken(
+          new Text(ti.getRegistryUrl() + "-" + ti.getApplicationId()));
+      assertEquals(tkIn.getKind(), tkOut.getKind());
+      assertEquals(new String(tkIn.getIdentifier()),
+          new String(tkOut.getIdentifier()));
+      assertEquals(new String(tkIn.getPassword()),
+          new String(tkOut.getPassword()));
+      assertEquals(tkIn.getService(), tkOut.getService());
+    }
+  }
+
+  @Test
+  public void testWriteDockerCredentialsToPath() throws Exception {
+    File outFile = File.createTempFile("docker-client-config", "out");
+    outFile.deleteOnExit();
+    Credentials credentials =
+        DockerClientConfigHandler.readCredentialsFromConfigFile(
+            new Path(file.toURI()), conf, APPLICATION_ID);
+    DockerClientConfigHandler.writeDockerCredentialsToPath(outFile,
+        credentials);
+    assertTrue(outFile.exists());
+    String fileContents = FileUtils.readFileToString(outFile);
+    assertTrue(fileContents.contains("auths"));
+    assertTrue(fileContents.contains("registry.example.com"));
+    assertTrue(fileContents.contains("https://index.docker.io/v1/"));
+    assertTrue(fileContents.contains("foobarbaz"));
+    assertTrue(fileContents.contains("bazbarfoo"));
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb2449d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index f95642b..401fc4a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -21,6 +21,7 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerCommandExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerKillCommand;
@@ -28,6 +29,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker.DockerVolumeCommand;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.DockerCommandPlugin;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.ResourcePlugin;
+import org.apache.hadoop.yarn.util.DockerClientConfigHandler;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -58,8 +60,11 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.Contai
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeConstants;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext;
 
+import java.io.File;
+import java.io.IOException;
 import java.net.InetAddress;
 import java.net.UnknownHostException;
+import java.nio.ByteBuffer;
 import java.nio.file.Files;
 import java.nio.file.Paths;
 import java.util.ArrayList;
@@ -846,6 +851,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
       runCommand.setPrivileged();
     }
 
+    addDockerClientConfigToRunCommand(ctx, runCommand);
+
     String resourcesOpts = ctx.getExecutionAttribute(RESOURCES_OPTIONS);
 
     addCGroupParentIfRequired(resourcesOpts, containerIdStr, runCommand);
@@ -1181,4 +1188,36 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
       }
     }
   }
+
+  private void addDockerClientConfigToRunCommand(ContainerRuntimeContext ctx,
+      DockerRunCommand dockerRunCommand) throws ContainerExecutionException {
+    ByteBuffer tokens = ctx.getContainer().getLaunchContext().getTokens();
+    Credentials credentials;
+    if (tokens != null) {
+      tokens.rewind();
+      if (tokens.hasRemaining()) {
+        try {
+          credentials = DockerClientConfigHandler
+              .getCredentialsFromTokensByteBuffer(tokens);
+        } catch (IOException e) {
+          throw new ContainerExecutionException("Unable to read tokens.");
+        }
+        if (credentials.numberOfTokens() > 0) {
+          Path nmPrivateDir =
+              ctx.getExecutionAttribute(NM_PRIVATE_CONTAINER_SCRIPT_PATH)
+                  .getParent();
+          File dockerConfigPath = new File(nmPrivateDir + "/config.json");
+          try {
+            DockerClientConfigHandler
+                .writeDockerCredentialsToPath(dockerConfigPath, credentials);
+          } catch (IOException e) {
+            throw new ContainerExecutionException(
+                "Unable to write Docker client credentials to "
+                    + dockerConfigPath);
+          }
+          dockerRunCommand.setClientConfigDir(dockerConfigPath.getParent());
+        }
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb2449d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java
index 7802209..0124c83 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java
@@ -88,4 +88,20 @@ public abstract class DockerCommand {
     }
     return ret.toString();
   }
+
+  /**
+   * Add the client configuration directory to the docker command.
+   *
+   * The client configuration option proceeds any of the docker subcommands
+   * (such as run, load, pull, etc). Ordering will be handled by
+   * container-executor. Docker expects the value to be a directory containing
+   * the file config.json. This file is typically generated via docker login.
+   *
+   * @param clientConfigDir - directory containing the docker client config.
+   */
+  public void setClientConfigDir(String clientConfigDir) {
+    if (clientConfigDir != null) {
+      addCommandArguments("docker-config", clientConfigDir);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb2449d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index 2015ab0..e9cf765 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -24,11 +24,16 @@ import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
+import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.util.DockerClientConfigHandler;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.security.TestDockerClientConfigHandler;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
@@ -56,12 +61,18 @@ import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.BufferedWriter;
 import java.io.File;
 import java.io.FileInputStream;
+import java.io.FileWriter;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.nio.charset.Charset;
 import java.nio.file.Files;
 import java.nio.file.Paths;
+import java.nio.file.attribute.FileAttribute;
+import java.nio.file.attribute.PosixFilePermission;
+import java.nio.file.attribute.PosixFilePermissions;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -69,6 +80,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
+import java.util.Set;
 
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.APPID;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.APPLICATION_LOCAL_DIRS;
@@ -1700,6 +1712,103 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("DAC_OVERRIDE", it.next());
   }
 
+  @Test
+  public void testLaunchContainerWithDockerTokens()
+      throws ContainerExecutionException, PrivilegedOperationException,
+      IOException {
+    // Write the JSOn to a temp file.
+    File file = File.createTempFile("docker-client-config", "runtime-test");
+    file.deleteOnExit();
+    BufferedWriter bw = new BufferedWriter(new FileWriter(file));
+    bw.write(TestDockerClientConfigHandler.JSON);
+    bw.close();
+
+    // Get the credentials object with the Tokens.
+    Credentials credentials = DockerClientConfigHandler
+        .readCredentialsFromConfigFile(new Path(file.toURI()), conf, appId);
+    DataOutputBuffer dob = new DataOutputBuffer();
+    credentials.writeTokenStorageToStream(dob);
+    ByteBuffer tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
+
+    // Configure the runtime and launch the container
+    when(context.getTokens()).thenReturn(tokens);
+    DockerLinuxContainerRuntime runtime =
+        new DockerLinuxContainerRuntime(mockExecutor, mockCGroupsHandler);
+    runtime.initialize(conf, null);
+
+    Set<PosixFilePermission> perms =
+        PosixFilePermissions.fromString("rwxr-xr--");
+    FileAttribute<Set<PosixFilePermission>> attr =
+        PosixFilePermissions.asFileAttribute(perms);
+    Path outDir = new Path(
+        Files.createTempDirectory("docker-client-config-out", attr).toUri()
+            .getPath() + "/launch_container.sh");
+    builder.setExecutionAttribute(NM_PRIVATE_CONTAINER_SCRIPT_PATH, outDir);
+    runtime.launchContainer(builder.build());
+    PrivilegedOperation op = capturePrivilegedOperation();
+    Assert.assertEquals(
+        PrivilegedOperation.OperationType.LAUNCH_DOCKER_CONTAINER,
+            op.getOperationType());
+
+    List<String> args = op.getArguments();
+
+    int expectedArgs = 13;
+    int argsCounter = 0;
+    Assert.assertEquals(expectedArgs, args.size());
+    Assert.assertEquals(runAsUser, args.get(argsCounter++));
+    Assert.assertEquals(user, args.get(argsCounter++));
+    Assert.assertEquals(Integer.toString(
+        PrivilegedOperation.RunAsUserCommand.LAUNCH_DOCKER_CONTAINER
+            .getValue()), args.get(argsCounter++));
+    Assert.assertEquals(appId, args.get(argsCounter++));
+    Assert.assertEquals(containerId, args.get(argsCounter++));
+    Assert.assertEquals(containerWorkDir.toString(), args.get(argsCounter++));
+    Assert.assertEquals(outDir.toUri().getPath(), args.get(argsCounter++));
+    Assert.assertEquals(nmPrivateTokensPath.toUri().getPath(),
+        args.get(argsCounter++));
+    Assert.assertEquals(pidFilePath.toString(), args.get(argsCounter++));
+    Assert.assertEquals(localDirs.get(0), args.get(argsCounter++));
+    Assert.assertEquals(logDirs.get(0), args.get(argsCounter++));
+    String dockerCommandFile = args.get(argsCounter++);
+    Assert.assertEquals(resourcesOptions, args.get(argsCounter));
+
+    List<String> dockerCommands = Files
+        .readAllLines(Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
+
+    int expected = 15;
+    int counter = 0;
+    Assert.assertEquals(expected, dockerCommands.size());
+    Assert.assertEquals("[docker-command-execution]",
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  cap-add=SYS_CHROOT,NET_BIND_SERVICE",
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
+    Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
+    Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
+    Assert.assertEquals("  docker-config=" + outDir.getParent(),
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  group-add=" + String.join(",", groups),
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  hostname=ctr-id", dockerCommands.get(counter++));
+    Assert.assertEquals("  image=busybox:latest",
+        dockerCommands.get(counter++));
+    Assert.assertEquals(
+        "  launch-command=bash,/test_container_work_dir/launch_container.sh",
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
+    Assert.assertEquals("  net=host", dockerCommands.get(counter++));
+    Assert.assertEquals(
+        "  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
+            + "/test_filecache_dir:/test_filecache_dir,"
+            + "/test_container_work_dir:/test_container_work_dir,"
+            + "/test_container_log_dir:/test_container_log_dir,"
+            + "/test_user_local_dir:/test_user_local_dir",
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
+    Assert.assertEquals("  workdir=/test_container_work_dir",
+        dockerCommands.get(counter++));
+  }
+
   class MockRuntime extends DockerLinuxContainerRuntime {
 
     private PrivilegedOperationExecutor privilegedOperationExecutor;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb2449d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerRunCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerRunCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerRunCommand.java
index e51d7ec..19b1544 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerRunCommand.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerRunCommand.java
@@ -36,6 +36,7 @@ public class TestDockerRunCommand {
   private static final String CONTAINER_NAME = "foo";
   private static final String USER_ID = "user_id";
   private static final String IMAGE_NAME = "image_name";
+  private static final String CLIENT_CONFIG_PATH = "/path/to/client.json";
 
   @Before
   public void setUp() throws Exception {
@@ -77,4 +78,11 @@ public class TestDockerRunCommand {
             .get("launch-command")));
     assertEquals(7, dockerRunCommand.getDockerCommandWithArguments().size());
   }
+
+  @Test
+  public void testSetClientConfigDir() {
+    dockerRunCommand.setClientConfigDir(CLIENT_CONFIG_PATH);
+    assertEquals(CLIENT_CONFIG_PATH, StringUtils.join(",",
+        dockerRunCommand.getDockerCommandWithArguments().get("docker-config")));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb2449d5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
index 442ce09..2efba3b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
@@ -380,11 +380,14 @@ For [YARN Service HTTPD example](./yarn-service/Examples.html), container-execut
 Connecting to a Secure Docker Repository
 ----------------------------------------
 
-Until YARN-5428 is complete, the Docker client command will draw its
-configuration from the default location, which is $HOME/.docker/config.json on
-the NodeManager host. The Docker configuration is where secure repository
-credentials are stored, so use of the LCE with secure Docker repos is
-discouraged until YARN-5428 is complete.
+The Docker client command will draw its configuration from the default location,
+which is $HOME/.docker/config.json on the NodeManager host. The Docker
+configuration is where secure repository credentials are stored, so use of the
+LCE with secure Docker repos is discouraged using this method.
+
+YARN-5428 added support to Distributed Shell for securely supplying the Docker
+client configuration. See the Distributed Shell help for usage. Support for
+additional frameworks is planned.
 
 As a work-around, you may manually log the Docker daemon on every NodeManager
 host into the secure repo using the Docker login command:


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: HADOOP-15076. Enhance S3A troubleshooting documents and add a performance document. Contributed by Steve Loughran.

Posted by ae...@apache.org.
HADOOP-15076. Enhance S3A troubleshooting documents and add a performance document.
Contributed by Steve Loughran.

(cherry picked from commit c761e658f6594c4e519ed39ef36669de2c5cee15)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b27ab7dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b27ab7dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b27ab7dd

Branch: refs/heads/HDFS-7240
Commit: b27ab7dd81359df0a7594ebb98e656a41cd19250
Parents: c9a373f
Author: Steve Loughran <st...@apache.org>
Authored: Thu Feb 15 14:57:56 2018 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Thu Feb 15 14:57:56 2018 +0000

----------------------------------------------------------------------
 .../markdown/tools/hadoop-aws/encryption.md     |  21 +-
 .../src/site/markdown/tools/hadoop-aws/index.md |  77 +-
 .../markdown/tools/hadoop-aws/performance.md    | 518 +++++++++++++
 .../tools/hadoop-aws/troubleshooting_s3a.md     | 753 ++++++++++++-------
 4 files changed, 1029 insertions(+), 340 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b27ab7dd/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md
index 719c5e5..54398d7 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/encryption.md
@@ -37,6 +37,8 @@ and keys with which the file was encrypted.
 * You can use AWS bucket policies to mandate encryption rules for a bucket.
 * You can use S3A per-bucket configuration to ensure that S3A clients use encryption
 policies consistent with the mandated rules.
+* You can use S3 Default Encryption to encrypt data without needing to
+set anything in the client.
 * Changing the encryption options on the client does not change how existing
 files were encrypted, except when the files are renamed.
 * For all mechanisms other than SSE-C, clients do not need any configuration
@@ -58,9 +60,10 @@ The server-side "SSE" encryption is performed with symmetric AES256 encryption;
 S3 offers different mechanisms for actually defining the key to use.
 
 
-There are thrre key management mechanisms, which in order of simplicity of use,
+There are four key management mechanisms, which in order of simplicity of use,
 are:
 
+* S3 Default Encryption
 * SSE-S3: an AES256 key is generated in S3, and saved alongside the data.
 * SSE-KMS: an AES256 key is generated in S3, and encrypted with a secret key provided
 by Amazon's Key Management Service, a key referenced by name in the uploading client.
@@ -68,6 +71,19 @@ by Amazon's Key Management Service, a key referenced by name in the uploading cl
 to encrypt and decrypt the data.
 
 
+## <a name="sse-s3"></a> S3 Default Encryption
+
+This feature allows the administrators of the AWS account to set the "default"
+encryption policy on a bucket -the encryption to use if the client does
+not explicitly declare an encryption algorithm.
+
+[S3 Default Encryption for S3 Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
+
+This supports SSE-S3 and SSE-KMS.
+
+There is no need to set anything up in the client: do it in the AWS console.
+
+
 ## <a name="sse-s3"></a> SSE-S3 Amazon S3-Managed Encryption Keys
 
 In SSE-S3, all keys and secrets are managed inside S3. This is the simplest encryption mechanism.
@@ -413,7 +429,6 @@ How can you do that from Hadoop? With `rename()`.
 
 The S3A client mimics a real filesystem's' rename operation by copying all the
 source files to the destination paths, then deleting the old ones.
-If you do a rename()
 
 Note: this does not work for SSE-C, because you cannot set a different key
 for reading as for writing, and you must supply that key for reading. There
@@ -421,7 +436,7 @@ you need to copy one bucket to a different bucket, one with a different key.
 Use `distCp`for this, with per-bucket encryption policies.
 
 
-## <a name="Troubleshooting"></a> Troubleshooting Encryption
+## <a name="troubleshooting"></a> Troubleshooting Encryption
 
 The [troubleshooting](./troubleshooting_s3a.html) document covers
 stack traces which may surface when working with encrypted data.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b27ab7dd/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 0e03100..edf392d 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -25,6 +25,7 @@ Please use `s3a:` as the connector to data hosted in S3 with Apache Hadoop.**
 See also:
 
 * [Encryption](./encryption.html)
+* [Performance](./performance.html)
 * [S3Guard](./s3guard.html)
 * [Troubleshooting](./troubleshooting_s3a.html)
 * [Committing work to S3 with the "S3A Committers"](./committers.html)
@@ -1580,80 +1581,8 @@ The S3A Filesystem client supports the notion of input policies, similar
 to that of the Posix `fadvise()` API call. This tunes the behavior of the S3A
 client to optimise HTTP GET requests for the different use cases.
 
-*"sequential"*
-
-Read through the file, possibly with some short forward seeks.
-
-The whole document is requested in a single HTTP request; forward seeks
-within the readahead range are supported by skipping over the intermediate
-data.
-
-This is leads to maximum read throughput —but with very expensive
-backward seeks.
-
-
-*"normal" (default)*
-
-The "Normal" policy starts off reading a file  in "sequential" mode,
-but if the caller seeks backwards in the stream, it switches from
-sequential to "random".
-
-This policy effectively recognizes the initial read pattern of columnar
-storage formats (e.g. Apache ORC and Apache Parquet), which seek to the end
-of a file, read in index data and then seek backwards to selectively read
-columns. The first seeks may be be expensive compared to the random policy,
-however the overall process is much less expensive than either sequentially
-reading through a file with the "random" policy, or reading columnar data
-with the "sequential" policy. When the exact format/recommended
-seek policy of data are known in advance, this policy
-
-*"random"*
-
-Optimised for random IO, specifically the Hadoop `PositionedReadable`
-operations —though `seek(offset); read(byte_buffer)` also benefits.
-
-Rather than ask for the whole file, the range of the HTTP request is
-set to that that of the length of data desired in the `read` operation
-(Rounded up to the readahead value set in `setReadahead()` if necessary).
-
-By reducing the cost of closing existing HTTP requests, this is
-highly efficient for file IO accessing a binary file
-through a series of `PositionedReadable.read()` and `PositionedReadable.readFully()`
-calls. Sequential reading of a file is expensive, as now many HTTP requests must
-be made to read through the file.
-
-For operations simply reading through a file: copying, distCp, reading
-Gzipped or other compressed formats, parsing .csv files, etc, the `sequential`
-policy is appropriate. This is the default: S3A does not need to be configured.
-
-For the specific case of high-performance random access IO, the `random` policy
-may be considered. The requirements are:
-
-* Data is read using the `PositionedReadable` API.
-* Long distance (many MB) forward seeks
-* Backward seeks as likely as forward seeks.
-* Little or no use of single character `read()` calls or small `read(buffer)`
-calls.
-* Applications running close to the S3 data store. That is: in EC2 VMs in
-the same datacenter as the S3 instance.
-
-The desired fadvise policy must be set in the configuration option
-`fs.s3a.experimental.input.fadvise` when the filesystem instance is created.
-That is: it can only be set on a per-filesystem basis, not on a per-file-read
-basis.
-
-    <property>
-      <name>fs.s3a.experimental.input.fadvise</name>
-      <value>random</value>
-      <description>Policy for reading files.
-       Values: 'random', 'sequential' or 'normal'
-       </description>
-    </property>
-
-[HDFS-2744](https://issues.apache.org/jira/browse/HDFS-2744),
-*Extend FSDataInputStream to allow fadvise* proposes adding a public API
-to set fadvise policies on input streams. Once implemented,
-this will become the supported mechanism used for configuring the input IO policy.
+See [Improving data input performance through fadvise](./performance.html#fadvise)
+for the details.
 
 ##<a name="metrics"></a>Metrics
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b27ab7dd/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/performance.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/performance.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/performance.md
new file mode 100644
index 0000000..e8f4d20
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/performance.md
@@ -0,0 +1,518 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+# Maximizing Performance when working with the S3A Connector
+
+<!-- MACRO{toc|fromDepth=0|toDepth=3} -->
+
+
+## <a name="introduction"></a> Introduction
+
+S3 is slower to work with than HDFS, even on virtual clusters running on
+Amazon EC2.
+
+That's because its a very different system, as you can see:
+
+
+| Feature | HDFS | S3 through the S3A connector |
+|---------|------|------------------------------|
+| communication | RPC | HTTP GET/PUT/HEAD/LIST/COPY requests |
+| data locality | local storage | remote S3 servers |
+| replication | multiple datanodes | asynchronous after upload |
+| consistency | consistent data and listings | eventual consistent for listings, deletes and updates |
+| bandwidth | best: local IO, worst: datacenter network | bandwidth between servers and S3 |
+| latency | low | high, especially for "low cost" directory operations |
+| rename | fast, atomic | slow faked rename through COPY & DELETE|
+| delete | fast, atomic | fast for a file, slow & non-atomic for directories |
+| writing| incremental | in blocks; not visible until the writer is closed |
+| reading | seek() is fast | seek() is slow and expensive |
+| IOPs | limited only by hardware | callers are throttled to shards in an s3 bucket |
+| Security | Posix user+group; ACLs | AWS Roles and policies |
+
+From a performance perspective, key points to remember are:
+
+* S3 throttles bucket access across all callers: adding workers can make things worse.
+* EC2 VMs have network IO throttled based on the VM type.
+* Directory rename and copy operations take *much* longer the more objects and data there is.
+The slow performance of `rename()` surfaces during the commit phase of jobs,
+applications like `DistCP`, and elsewhere.
+* seek() calls when reading a file can force new HTTP requests.
+This can make reading columnar Parquet/ORC data expensive.
+
+Overall, although the S3A connector makes S3 look like a file system,
+it isn't, and some attempts to preserve the metaphor are "aggressively suboptimal".
+
+To make most efficient use of S3, care is needed.
+
+## <a name="s3guard"></a> Speeding up directory listing operations through S3Guard
+
+[S3Guard](s3guard.html) provides significant speedups for operations which
+list files a lot. This includes the setup of all queries against data:
+MapReduce, Hive and Spark, as well as DistCP.
+
+
+Experiment with using it to see what speedup it delivers.
+
+
+## <a name="fadvise"></a> Improving data input performance through fadvise
+
+The S3A Filesystem client supports the notion of input policies, similar
+to that of the Posix `fadvise()` API call. This tunes the behavior of the S3A
+client to optimise HTTP GET requests for the different use cases.
+
+### fadvise `sequential`
+
+Read through the file, possibly with some short forward seeks.
+
+The whole document is requested in a single HTTP request; forward seeks
+within the readahead range are supported by skipping over the intermediate
+data.
+
+This delivers maximum sequential throughput —but with very expensive
+backward seeks.
+
+Applications reading a file in bulk (DistCP, any copy operations) should use
+sequential access, as should those reading data from gzipped `.gz` files.
+Because the "normal" fadvise policy starts off in sequential IO mode,
+there is rarely any need to explicit request this policy.
+
+### fadvise `random`
+
+Optimised for random IO, specifically the Hadoop `PositionedReadable`
+operations —though `seek(offset); read(byte_buffer)` also benefits.
+
+Rather than ask for the whole file, the range of the HTTP request is
+set to that that of the length of data desired in the `read` operation
+(Rounded up to the readahead value set in `setReadahead()` if necessary).
+
+By reducing the cost of closing existing HTTP requests, this is
+highly efficient for file IO accessing a binary file
+through a series of `PositionedReadable.read()` and `PositionedReadable.readFully()`
+calls. Sequential reading of a file is expensive, as now many HTTP requests must
+be made to read through the file: there's a delay between each GET operation.
+
+
+Random IO is best for IO with seek-heavy characteristics:
+
+* Data is read using the `PositionedReadable` API.
+* Long distance (many MB) forward seeks
+* Backward seeks as likely as forward seeks.
+* Little or no use of single character `read()` calls or small `read(buffer)`
+calls.
+* Applications running close to the S3 data store. That is: in EC2 VMs in
+the same datacenter as the S3 instance.
+
+The desired fadvise policy must be set in the configuration option
+`fs.s3a.experimental.input.fadvise` when the filesystem instance is created.
+That is: it can only be set on a per-filesystem basis, not on a per-file-read
+basis.
+
+```xml
+<property>
+  <name>fs.s3a.experimental.input.fadvise</name>
+  <value>random</value>
+  <description>
+  Policy for reading files.
+  Values: 'random', 'sequential' or 'normal'
+   </description>
+</property>
+```
+
+[HDFS-2744](https://issues.apache.org/jira/browse/HDFS-2744),
+*Extend FSDataInputStream to allow fadvise* proposes adding a public API
+to set fadvise policies on input streams. Once implemented,
+this will become the supported mechanism used for configuring the input IO policy.
+
+### fadvise `normal` (default)
+
+The `normal` policy starts off reading a file  in `sequential` mode,
+but if the caller seeks backwards in the stream, it switches from
+sequential to `random`.
+
+This policy essentially recognizes the initial read pattern of columnar
+storage formats (e.g. Apache ORC and Apache Parquet), which seek to the end
+of a file, read in index data and then seek backwards to selectively read
+columns. The first seeks may be be expensive compared to the random policy,
+however the overall process is much less expensive than either sequentially
+reading through a file with the `random` policy, or reading columnar data
+with the `sequential` policy.
+
+
+## <a name="commit"></a> Committing Work in MapReduce and Spark
+
+Hadoop MapReduce, Apache Hive and Apache Spark all write their work
+to HDFS and similar filesystems.
+When using S3 as a destination, this is slow because of the way `rename()`
+is mimicked with copy and delete.
+
+If committing output takes a long time, it is because you are using the standard
+`FileOutputCommitter`. If you are doing this on any S3 endpoint which lacks
+list consistency (Amazon S3 without [S3Guard](s3guard.html)), this committer
+is at risk of losing data!
+
+*Your problem may appear to be performance, but that is a symptom
+of the underlying problem: the way S3A fakes rename operations means that
+the rename cannot be safely be used in output-commit algorithms.*
+
+Fix: Use one of the dedicated [S3A Committers](committers.md).
+
+## <a name="tuning"></a> Options to Tune
+
+### <a name="pooling"></a> Thread and connection pool sizes.
+
+Each S3A client interacting with a single bucket, as a single user, has its
+own dedicated pool of open HTTP 1.1 connections alongside a pool of threads used
+for upload and copy operations.
+The default pool sizes are intended to strike a balance between performance
+and memory/thread use.
+
+You can have a larger pool of (reused) HTTP connections and threads
+for parallel IO (especially uploads) by setting the properties
+
+
+| property | meaning | default |
+|----------|---------|---------|
+| `fs.s3a.threads.max`| Threads in the AWS transfer manager| 10 |
+| `fs.s3a.connection.maximum`| Maximum number of HTTP connections | 10|
+
+We recommend using larger values for processes which perform
+a lot of IO: `DistCp`, Spark Workers and similar.
+
+```xml
+<property>
+  <name>fs.s3a.threads.max</name>
+  <value>20</value>
+</property>
+<property>
+  <name>fs.s3a.connection.maximum</name>
+  <value>20</value>
+</property>
+```
+
+Be aware, however, that processes which perform many parallel queries
+may consume large amounts of resources if each query is working with
+a different set of s3 buckets, or are acting on behalf of different users.
+
+### For large data uploads, tune the block size: `fs.s3a.block.size`
+
+When uploading data, it is uploaded in blocks set by the option
+`fs.s3a.block.size`; default value "32M" for 32 Megabytes.
+
+If a larger value is used, then more data is buffered before the upload
+begins:
+
+```xml
+<property>
+  <name>fs.s3a.block.size</name>
+  <value>128M</value>
+</property>
+```
+
+This means that fewer PUT/POST requests are made of S3 to upload data,
+which reduces the likelihood that S3 will throttle the client(s)
+
+### Maybe: Buffer Write Data in Memory
+
+When large files are being uploaded, blocks are saved to disk and then
+queued for uploading, with multiple threads uploading different blocks
+in parallel.
+
+The blocks can be buffered in memory by setting the option
+`fs.s3a.fast.upload.buffer` to `bytebuffer`, or, for on-heap storage
+`array`.
+
+1. Switching to in memory-IO reduces disk IO, and can be faster if the bandwidth
+to the S3 store is so high that the disk IO becomes the bottleneck.
+This can have a tangible benefit when working with on-premise S3-compatible
+object stores with very high bandwidth to servers.
+
+It is very easy to run out of memory when buffering to it; the option
+`fs.s3a.fast.upload.active.blocks"` exists to tune how many active blocks
+a single output stream writing to S3 may have queued at a time.
+
+As the size of each buffered block is determined by the value of `fs.s3a.block.size`,
+the larger the block size, the more likely you will run out of memory.
+
+## <a name="distcp"></a> DistCP
+
+DistCP can be slow, especially if the parameters and options for the operation
+are not tuned for working with S3.
+
+To exacerbate the issue, DistCP invariably puts heavy load against the
+bucket being worked with, which will cause S3 to throttle requests.
+It will throttle: directory operations, uploads of new data, and delete operations,
+amongst other things
+
+### DistCP: Options to Tune
+
+* `-numListstatusThreads <threads>` : set to something higher than the default (1).
+* `-bandwidth <mb>` : use to limit the upload bandwidth per worker
+* `-m <maps>` : limit the number of mappers, hence the load on the S3 bucket.
+
+Adding more maps with the `-m` option does not guarantee better performance;
+it may just increase the amount of throttling which takes place.
+A smaller number of maps with a higher bandwidth per map can be more efficient.
+
+### DistCP: Options to Avoid.
+
+DistCp's `-atomic` option copies up data into a directory, then renames
+it into place, which is the where the copy takes place. This is a performance
+killer.
+
+* Do not use the `-atomic` option.
+* The `-append` operation is not supported on S3; avoid.
+* `-p` S3 does not have a POSIX-style permission model; this will fail.
+
+
+### DistCP: Parameters to Tune
+
+1. As discussed [earlier](#pooling), use large values for
+`fs.s3a.threads.max` and `fs.s3a.connection.maximum`.
+
+1. Make sure that the bucket is using `sequential` or `normal` fadvise seek policies,
+that is, `fs.s3a.experimental.fadvise` is not set to `random`
+
+1. Perform listings in parallel by setting `-numListstatusThreads`
+to a higher number. Make sure that `fs.s3a.connection.maximum`
+is equal to or greater than the value used.
+
+1. If using `-delete`, set `fs.trash.interval` to 0 to avoid the deleted
+objects from being copied to a trash directory.
+
+*DO NOT* switch `fs.s3a.fast.upload.buffer` to buffer in memory.
+If one distcp mapper runs out of memory it will fail,
+and that runs the risk of failing the entire job.
+It is safer to keep the default value, `disk`.
+
+What is potentially useful is uploading in bigger blocks; this is more
+efficient in terms of HTTP connection use, and reduce the IOP rate against
+the S3 bucket/shard.
+
+```xml
+<property>
+  <name>fs.s3a.threads.max</name>
+  <value>20</value>
+</property>
+
+<property>
+  <name>fs.s3a.connection.maximum</name>
+  <value>30</value>
+  <descriptiom>
+   Make greater than both fs.s3a.threads.max and -numListstatusThreads
+   </descriptiom>
+</property>
+
+<property>
+  <name>fs.s3a.experimental.fadvise</name>
+  <value>normal</value>
+</property>
+
+<property>
+  <name>fs.s3a.block.size</name>
+  <value>128M</value>
+</property>
+
+<property>
+  <name>fs.s3a.fast.upload.buffer</name>
+  <value>disk</value>
+</property>
+
+<property>
+  <name>fs.trash.interval</name>
+  <value>0</value>
+</property>
+```
+
+## <a name="rm"></a> hadoop shell commands `fs -rm`
+
+The `hadoop fs -rm` command can rename the file under `.Trash` rather than
+deleting it. Use `-skipTrash` to eliminate that step.
+
+
+This can be set in the property `fs.trash.interval`; while the default is 0,
+most HDFS deployments have it set to a non-zero value to reduce the risk of
+data loss.
+
+```xml
+<property>
+  <name>fs.trash.interval</name>
+  <value>0</value>
+</property>
+```
+
+
+## <a name="load balancing"></a> Improving S3 load-balancing behavior
+
+Amazon S3 uses a set of front-end servers to provide access to the underlying data.
+The choice of which front-end server to use is handled via load-balancing DNS
+service: when the IP address of an S3 bucket is looked up, the choice of which
+IP address to return to the client is made based on the the current load
+of the front-end servers.
+
+Over time, the load across the front-end changes, so those servers considered
+"lightly loaded" will change. If the DNS value is cached for any length of time,
+your application may end up talking to an overloaded server. Or, in the case
+of failures, trying to talk to a server that is no longer there.
+
+And by default, for historical security reasons in the era of applets,
+the DNS TTL of a JVM is "infinity".
+
+To work with AWS better, set the DNS time-to-live of an application which
+works with S3 to something lower.
+See [AWS documentation](http://docs.aws.amazon.com/AWSSdkDocsJava/latest/DeveloperGuide/java-dg-jvm-ttl.html).
+
+## <a name="network_performance"></a> Troubleshooting network performance
+
+An example of this is covered in [HADOOP-13871](https://issues.apache.org/jira/browse/HADOOP-13871).
+
+1. For public data, use `curl`:
+
+        curl -O https://landsat-pds.s3.amazonaws.com/scene_list.gz
+1. Use `nettop` to monitor a processes connections.
+
+
+## <a name="throttling"></a> Throttling
+
+When many requests are made of a specific S3 bucket (or shard inside it),
+S3 will respond with a 503 "throttled" response.
+Throttling can be recovered from, provided overall load decreases.
+Furthermore, because it is sent before any changes are made to the object store,
+is inherently idempotent. For this reason, the client will always attempt to
+retry throttled requests.
+
+The limit of the number of times a throttled request can be retried,
+and the exponential interval increase between attempts, can be configured
+independently of the other retry limits.
+
+```xml
+<property>
+  <name>fs.s3a.retry.throttle.limit</name>
+  <value>20</value>
+  <description>
+    Number of times to retry any throttled request.
+  </description>
+</property>
+
+<property>
+  <name>fs.s3a.retry.throttle.interval</name>
+  <value>500ms</value>
+  <description>
+    Interval between retry attempts on throttled requests.
+  </description>
+</property>
+```
+
+If a client is failing due to `AWSServiceThrottledException` failures,
+increasing the interval and limit *may* address this. However, it
+it is a sign of AWS services being overloaded by the sheer number of clients
+and rate of requests. Spreading data across different buckets, and/or using
+a more balanced directory structure may be beneficial.
+Consult [the AWS documentation](http://docs.aws.amazon.com/AmazonS3/latest/dev/request-rate-perf-considerations.html).
+
+Reading or writing data encrypted with SSE-KMS forces S3 to make calls of
+the AWS KMS Key Management Service, which comes with its own
+[Request Rate Limits](http://docs.aws.amazon.com/kms/latest/developerguide/limits.html).
+These default to 1200/second for an account, across all keys and all uses of
+them, which, for S3 means: across all buckets with data encrypted with SSE-KMS.
+
+### <a name="minimizing_throttling"></a> Tips to Keep Throttling down
+
+If you are seeing a lot of throttling responses on a large scale
+operation like a `distcp` copy, *reduce* the number of processes trying
+to work with the bucket (for distcp: reduce the number of mappers with the
+`-m` option).
+
+If you are reading or writing lists of files, if you can randomize
+the list so they are not processed in a simple sorted order, you may
+reduce load on a specific shard of S3 data, so potentially increase throughput.
+
+An S3 Bucket is throttled by requests coming from all
+simultaneous clients. Different applications and jobs may interfere with
+each other: consider that when troubleshooting.
+Partitioning data into different buckets may help isolate load here.
+
+If you are using data encrypted with SSE-KMS, then the
+will also apply: these are stricter than the S3 numbers.
+If you believe that you are reaching these limits, you may be able to
+get them increased.
+Consult [the KMS Rate Limit documentation](http://docs.aws.amazon.com/kms/latest/developerguide/limits.html).
+
+### <a name="s3guard_throttling"></a> S3Guard and Throttling
+
+
+S3Guard uses DynamoDB for directory and file lookups;
+it is rate limited to the amount of (guaranteed) IO purchased for a
+table.
+
+To see the allocated capacity of a bucket, the `hadoop s3guard bucket-info s3a://bucket`
+command will print out the allocated capacity.
+
+
+If significant throttling events/rate is observed here, the pre-allocated
+IOPs can be increased with the `hadoop s3guard set-capacity` command, or
+through the AWS Console. Throttling events in S3Guard are noted in logs, and
+also in the S3A metrics `s3guard_metadatastore_throttle_rate` and
+`s3guard_metadatastore_throttled`.
+
+If you are using DistCP for a large backup to/from a S3Guarded bucket, it is
+actually possible to increase the capacity for the duration of the operation.
+
+
+## <a name="coding"></a> Best Practises for Code
+
+Here are some best practises if you are writing applications to work with
+S3 or any other object store through the Hadoop APIs.
+
+Use `listFiles(path, recursive)` over `listStatus(path)`.
+The recursive `listFiles()` call can enumerate all dependents of a path
+in a single LIST call, irrespective of how deep the path is.
+In contrast, any directory tree-walk implemented in the client is issuing
+multiple HTTP requests to scan each directory, all the way down.
+
+Cache the outcome of `getFileStats()`, rather than repeatedly ask for it.
+That includes using `isFile()`, `isDirectory()`, which are simply wrappers
+around `getFileStatus()`.
+
+Don't immediately look for a file with a `getFileStatus()` or listing call
+after creating it, or try to read it immediately.
+This is where eventual consistency problems surface: the data may not yet be visible.
+
+Rely on `FileNotFoundException` being raised if the source of an operation is
+missing, rather than implementing your own probe for the file before
+conditionally calling the operation.
+
+### `rename()`
+
+Avoid any algorithm which uploads data into a temporary file and then uses
+`rename()` to commit it into place with a final path.
+On HDFS this offers a fast commit operation.
+With S3, Wasb and other object stores, you can write straight to the destination,
+knowing that the file isn't visible until you close the write: the write itself
+is atomic.
+
+The `rename()` operation may return `false` if the source is missing; this
+is a weakness in the API. Consider a check before calling rename, and if/when
+a new rename() call is made public, switch to it.
+
+
+### `delete(path, recursive)`
+
+Keep in mind that `delete(path, recursive)` is a no-op if the path does not exist, so
+there's no need to have a check for the path existing before you call it.
+
+`delete()` is often used as a cleanup operation.
+With an object store this is slow, and may cause problems if the caller
+expects an immediate response. For example, a thread may block so long
+that other liveness checks start to fail.
+Consider spawning off an executor thread to do these background cleanup operations.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b27ab7dd/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md
index 1f3382c..97f9642 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/troubleshooting_s3a.md
@@ -14,9 +14,9 @@
 
 # Troubleshooting
 
-<!-- MACRO{toc|fromDepth=0|toDepth=5} -->
+<!-- MACRO{toc|fromDepth=0|toDepth=3} -->
 
-##<a name="introduction"></a> Introduction
+## <a name="introduction"></a> Introduction
 
 Common problems working with S3 are
 
@@ -24,28 +24,42 @@ Common problems working with S3 are
 1. Authentication
 1. S3 Inconsistency side-effects
 
-Classpath is usually the first problem. For the S3x filesystem clients,
-you need the Hadoop-specific filesystem clients, third party S3 client libraries
-compatible with the Hadoop code, and any dependent libraries compatible with
+
+Troubleshooting IAM Assumed Roles is covered in its
+[specific documentation](assumed_roles.html#troubleshooting).
+
+## <a name="classpath"></a> Classpath Setup
+
+Classpath is usually the first problem. For the S3A filesystem client,
+you need the Hadoop-specific filesystem clients, the very same AWS SDK library
+which Hadoop was built against, and any dependent libraries compatible with
 Hadoop and the specific JVM.
 
 The classpath must be set up for the process talking to S3: if this is code
 running in the Hadoop cluster, the JARs must be on that classpath. That
 includes `distcp` and the `hadoop fs` command.
 
-<!-- MACRO{toc|fromDepth=0|toDepth=2} -->
+<b>Critical:</b> *Do not attempt to "drop in" a newer version of the AWS
+SDK than that which the Hadoop version was built with*
+Whatever problem you have, changing the AWS SDK version will not fix things,
+only change the stack traces you see.
 
-Troubleshooting IAM Assumed Roles is covered in its
-[specific documentation](assumed_roles.html#troubeshooting).
+Similarly, don't try and mix a `hadoop-aws` JAR from one Hadoop release
+with that of any other. The JAR must be in sync with `hadoop-common` and
+some other Hadoop JARs.
 
-## <a name="classpath"></a> Classpath Setup
+<i>Randomly changing hadoop- and aws- JARs in the hope of making a problem
+"go away" or to gain access to a feature you want,
+will not lead to the outcome you desire.</i>
+
+Tip: you can use [mvnrepository](http://mvnrepository.com/artifact/org.apache.hadoop/hadoop-aws)
+to determine the dependency version requirements of a specific `hadoop-aws`
+JAR published by the ASF.
 
-Note that for security reasons, the S3A client does not provide much detail
-on the authentication process (i.e. the secrets used to authenticate).
 
 ### `ClassNotFoundException: org.apache.hadoop.fs.s3a.S3AFileSystem`
 
-These is Hadoop filesytem client classes, found in the `hadoop-aws` JAR.
+These are Hadoop filesystem client classes, found in the `hadoop-aws` JAR.
 An exception reporting this class as missing means that this JAR is not on
 the classpath.
 
@@ -56,7 +70,7 @@ the classpath.
 This means that the `aws-java-sdk-bundle.jar` JAR is not on the classpath:
 add it.
 
-### Missing method in `com.amazonaws` class
+### `java.lang.NoSuchMethodError` referencing a `com.amazonaws` class
 
 This can be triggered by incompatibilities between the AWS SDK on the classpath
 and the version which Hadoop was compiled with.
@@ -68,6 +82,15 @@ version.
 The sole fix is to use the same version of the AWS SDK with which Hadoop
 was built.
 
+This can also be caused by having more than one version of an AWS SDK
+JAR on the classpath. If the full `aws-java-sdk-bundle<` JAR is on the
+classpath, do not add any of the `aws-sdk-` JARs.
+
+
+### `java.lang.NoSuchMethodError` referencing an `org.apache.hadoop` class
+
+This happens if the `hadoop-aws` and `hadoop-common` JARs are out of sync.
+You can't mix them around: they have to have exactly matching version numbers.
 
 ## <a name="authentication"></a> Authentication Failure
 
@@ -115,7 +138,7 @@ mechanism.
 1. If using session authentication, the session may have expired.
 Generate a new session token and secret.
 
-1. If using environement variable-based authentication, make sure that the
+1. If using environment variable-based authentication, make sure that the
 relevant variables are set in the environment in which the process is running.
 
 The standard first step is: try to use the AWS command line tools with the same
@@ -126,7 +149,6 @@ credentials, through a command such as:
 Note the trailing "/" here; without that the shell thinks you are trying to list
 your home directory under the bucket, which will only exist if explicitly created.
 
-
 Attempting to list a bucket using inline credentials is a
 means of verifying that the key and secret can access a bucket;
 
@@ -186,7 +208,9 @@ Requests using the V2 API will be rejected with 400 `Bad Request`
 $ bin/hadoop fs -ls s3a://frankfurt/
 WARN s3a.S3AFileSystem: Client: Amazon S3 error 400: 400 Bad Request; Bad Request (retryable)
 
-com.amazonaws.services.s3.model.AmazonS3Exception: Bad Request (Service: Amazon S3; Status Code: 400; Error Code: 400 Bad Request; Request ID: 923C5D9E75E44C06), S3 Extended Request ID: HDwje6k+ANEeDsM6aJ8+D5gUmNAMguOk2BvZ8PH3g9z0gpH+IuwT7N19oQOnIr5CIx7Vqb/uThE=
+com.amazonaws.services.s3.model.AmazonS3Exception: Bad Request (Service: Amazon S3;
+ Status Code: 400; Error Code: 400 Bad Request; Request ID: 923C5D9E75E44C06),
+  S3 Extended Request ID: HDwje6k+ANEeDsM6aJ8+D5gUmNAMguOk2BvZ8PH3g9z0gpH+IuwT7N19oQOnIr5CIx7Vqb/uThE=
     at com.amazonaws.http.AmazonHttpClient.handleErrorResponse(AmazonHttpClient.java:1182)
     at com.amazonaws.http.AmazonHttpClient.executeOneRequest(AmazonHttpClient.java:770)
     at com.amazonaws.http.AmazonHttpClient.executeHelper(AmazonHttpClient.java:489)
@@ -231,13 +255,129 @@ As an example, the endpoint for S3 Frankfurt is `s3.eu-central-1.amazonaws.com`:
 </property>
 ```
 
+## <a name="access_denied"></a> `AccessDeniedException` "Access Denied"
+
+### <a name="access_denied_unknown-ID"></a> AccessDeniedException "The AWS Access Key Id you provided does not exist in our records."
+
+The value of `fs.s3a.access.key` does not match a known access key ID.
+It may be mistyped, or the access key may have been deleted by one of the account managers.
+
+```
+java.nio.file.AccessDeniedException: bucket: doesBucketExist on bucket:
+    com.amazonaws.services.s3.model.AmazonS3Exception:
+    The AWS Access Key Id you provided does not exist in our records.
+     (Service: Amazon S3; Status Code: 403; Error Code: InvalidAccessKeyId;
+  at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:214)
+  at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:111)
+  at org.apache.hadoop.fs.s3a.Invoker.lambda$retry$3(Invoker.java:260)
+  at org.apache.hadoop.fs.s3a.Invoker.retryUntranslated(Invoker.java:314)
+  at org.apache.hadoop.fs.s3a.Invoker.retry(Invoker.java:256)
+  at org.apache.hadoop.fs.s3a.Invoker.retry(Invoker.java:231)
+  at org.apache.hadoop.fs.s3a.S3AFileSystem.verifyBucketExists(S3AFileSystem.java:366)
+  at org.apache.hadoop.fs.s3a.S3AFileSystem.initialize(S3AFileSystem.java:302)
+  at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:3354)
+  at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:124)
+  at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:3403)
+  at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:3371)
+  at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:477)
+  at org.apache.hadoop.fs.contract.AbstractBondedFSContract.init(AbstractBondedFSContract.java:72)
+  at org.apache.hadoop.fs.contract.AbstractFSContractTestBase.setup(AbstractFSContractTestBase.java:177)
+  at org.apache.hadoop.fs.s3a.commit.AbstractCommitITest.setup(AbstractCommitITest.java:163)
+  at org.apache.hadoop.fs.s3a.commit.AbstractITCommitMRJob.setup(AbstractITCommitMRJob.java:129)
+  at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
+  at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
+  at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
+  at java.lang.reflect.Method.invoke(Method.java:498)
+  at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
+  at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
+  at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
+  at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:24)
+  at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
+  at org.junit.rules.ExternalResource$1.evaluate(ExternalResource.java:48)
+  at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
+  at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
+Caused by: com.amazonaws.services.s3.model.AmazonS3Exception:
+               The AWS Access Key Id you provided does not exist in our records.
+                (Service: Amazon S3; Status Code: 403; Error Code: InvalidAccessKeyId;
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleErrorResponse(AmazonHttpClient.java:1638)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1303)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1055)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:743)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:717)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:699)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:667)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:649)
+  at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:513)
+  at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4229)
+  at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4176)
+  at com.amazonaws.services.s3.AmazonS3Client.getAcl(AmazonS3Client.java:3381)
+  at com.amazonaws.services.s3.AmazonS3Client.getBucketAcl(AmazonS3Client.java:1160)
+  at com.amazonaws.services.s3.AmazonS3Client.getBucketAcl(AmazonS3Client.java:1150)
+  at com.amazonaws.services.s3.AmazonS3Client.doesBucketExist(AmazonS3Client.java:1266)
+  at org.apache.hadoop.fs.s3a.S3AFileSystem.lambda$verifyBucketExists$1(S3AFileSystem.java:367)
+  at org.apache.hadoop.fs.s3a.Invoker.once(Invoker.java:109)
+  ... 27 more
+
+```
+
+###  <a name="access_denied_disabled"></a> `AccessDeniedException` All access to this object has been disabled
 
-### "403 Access denied" when trying to write data
+Caller has no permission to access the bucket at all.
+
+```
+doesBucketExist on fdsd: java.nio.file.AccessDeniedException: fdsd: doesBucketExist on fdsd:
+ com.amazonaws.services.s3.model.AmazonS3Exception: All access to this object has been disabled
+ (Service: Amazon S3; Status Code: 403; Error Code: AllAccessDisabled; Request ID: E6229D7F8134E64F;
+  S3 Extended Request ID: 6SzVz2t4qa8J2Wxo/oc8yBuB13Mgrn9uMKnxVY0hsBd2kU/YdHzW1IaujpJdDXRDCQRX3f1RYn0=),
+  S3 Extended Request ID: 6SzVz2t4qa8J2Wxo/oc8yBuB13Mgrn9uMKnxVY0hsBd2kU/YdHzW1IaujpJdDXRDCQRX3f1RYn0=:AllAccessDisabled
+ All access to this object has been disabled (Service: Amazon S3; Status Code: 403;
+  at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:205)
+  at org.apache.hadoop.fs.s3a.S3ALambda.once(S3ALambda.java:122)
+  at org.apache.hadoop.fs.s3a.S3ALambda.lambda$retry$2(S3ALambda.java:233)
+  at org.apache.hadoop.fs.s3a.S3ALambda.retryUntranslated(S3ALambda.java:288)
+  at org.apache.hadoop.fs.s3a.S3ALambda.retry(S3ALambda.java:228)
+  at org.apache.hadoop.fs.s3a.S3ALambda.retry(S3ALambda.java:203)
+  at org.apache.hadoop.fs.s3a.S3AFileSystem.verifyBucketExists(S3AFileSystem.java:357)
+  at org.apache.hadoop.fs.s3a.S3AFileSystem.initialize(S3AFileSystem.java:293)
+  at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:3288)
+  at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:123)
+  at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:3337)
+  at org.apache.hadoop.fs.FileSystem$Cache.getUnique(FileSystem.java:3311)
+  at org.apache.hadoop.fs.FileSystem.newInstance(FileSystem.java:529)
+  at org.apache.hadoop.fs.s3a.s3guard.S3GuardTool$BucketInfo.run(S3GuardTool.java:997)
+  at org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.run(S3GuardTool.java:309)
+  at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:76)
+  at org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.run(S3GuardTool.java:1218)
+  at org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.main(S3GuardTool.java:1227)
+Caused by: com.amazonaws.services.s3.model.AmazonS3Exception: All access to this object has been disabled
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleErrorResponse(AmazonHttpClient.java:1638)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1303)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1055)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:743)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:717)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:699)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:667)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:649)
+  at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:513)
+  at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4229)
+  at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4176)
+  at com.amazonaws.services.s3.AmazonS3Client.getAcl(AmazonS3Client.java:3381)
+  at com.amazonaws.services.s3.AmazonS3Client.getBucketAcl(AmazonS3Client.java:1160)
+  at com.amazonaws.services.s3.AmazonS3Client.getBucketAcl(AmazonS3Client.java:1150)
+  at com.amazonaws.services.s3.AmazonS3Client.doesBucketExist(AmazonS3Client.java:1266)
+  at org.apache.hadoop.fs.s3a.S3AFileSystem.lambda$verifyBucketExists$1(S3AFileSystem.java:360)
+  at org.apache.hadoop.fs.s3a.S3ALambda.once(S3ALambda.java:120)
+```
+
+Check the name of the bucket is correct, and validate permissions for the active user/role.
+
+### <a name="access_denied_writing"></a> `AccessDeniedException` "Access denied" when trying to manipulate data
 
 Data can be read, but attempts to write data or manipulate the store fail with
 403/Access denied.
 
 The bucket may have an access policy which the request does not comply with.
+or the caller does not have the right to access the data.
 
 ```
 java.nio.file.AccessDeniedException: test/: PUT 0-byte object  on test/:
@@ -257,14 +397,31 @@ java.nio.file.AccessDeniedException: test/: PUT 0-byte object  on test/:
 ```
 
 In the AWS S3 management console, select the "permissions" tab for the bucket, then "bucket policy".
-If there is no bucket policy, then the error cannot be caused by one.
 
 If there is a bucket access policy, e.g. required encryption headers,
 then the settings of the s3a client must guarantee the relevant headers are set
 (e.g. the encryption options match).
 Note: S3 Default Encryption options are not considered here:
 if the bucket policy requires AES256 as the encryption policy on PUT requests,
-then the encryption option must be set in the s3a client so that the header is set.
+then the encryption option must be set in the hadoop client so that the header is set.
+
+
+Otherwise, the problem will likely be that the user does not have full access to the
+operation. Check what they were trying to (read vs write) and then look
+at the permissions of the user/role.
+
+If the client using [assumed roles](assumed_roles.html), and a policy
+is set in `fs.s3a.assumed.role.policy`, then that policy declares
+_all_ the rights which the caller has.
+
+
+### <a name="kms_access_denied"></a>  `AccessDeniedException` when using SSE-KMS
+
+When trying to write or read SEE-KMS-encrypted data, the client gets a
+`java.nio.AccessDeniedException` with the error 403/Forbidden.
+
+The caller does not have the permissions to access
+the key with which the data was encrypted.
 
 ## <a name="connectivity"></a> Connectivity Problems
 
@@ -283,14 +440,14 @@ org.apache.hadoop.fs.s3a.AWSS3IOException: Received permanent redirect response
   addressed using the specified endpoint. Please send all future requests to
   this endpoint. (Service: Amazon S3; Status Code: 301;
   Error Code: PermanentRedirect; Request ID: 7D39EC1021C61B11)
-        at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:132)
-        at org.apache.hadoop.fs.s3a.S3AFileSystem.initMultipartUploads(S3AFileSystem.java:287)
-        at org.apache.hadoop.fs.s3a.S3AFileSystem.initialize(S3AFileSystem.java:203)
-        at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2895)
-        at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:102)
-        at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2932)
-        at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2914)
-        at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:390)
+      at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:132)
+      at org.apache.hadoop.fs.s3a.S3AFileSystem.initMultipartUploads(S3AFileSystem.java:287)
+      at org.apache.hadoop.fs.s3a.S3AFileSystem.initialize(S3AFileSystem.java:203)
+      at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2895)
+      at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:102)
+      at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2932)
+      at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2914)
+      at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:390)
 ```
 
 1. Use the [Specific endpoint of the bucket's S3 service](http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
@@ -308,12 +465,15 @@ Using the explicit endpoint for the region is recommended for speed and
 to use the V4 signing API.
 
 
-### <a name="timeout"></a> "Timeout waiting for connection from pool" when writing data
+### <a name="timeout_from_pool"></a> "Timeout waiting for connection from pool" when writing data
 
 This happens when using the output stream thread pool runs out of capacity.
 
 ```
-[s3a-transfer-shared-pool1-t20] INFO  http.AmazonHttpClient (AmazonHttpClient.java:executeHelper(496)) - Unable to execute HTTP request: Timeout waiting for connection from poolorg.apache.http.conn.ConnectionPoolTimeoutException: Timeout waiting for connection from pool
+[s3a-transfer-shared-pool1-t20] INFO  http.AmazonHttpClient (AmazonHttpClient.java:executeHelper(496))
+ - Unable to execute HTTP request:
+  Timeout waiting for connection from poolorg.apache.http.conn.ConnectionPoolTimeoutException:
+   Timeout waiting for connection from pool
   at org.apache.http.impl.conn.PoolingClientConnectionManager.leaseConnection(PoolingClientConnectionManager.java:230)
   at org.apache.http.impl.conn.PoolingClientConnectionManager$1.getConnection(PoolingClientConnectionManager.java:199)
   at sun.reflect.GeneratedMethodAccessor13.invoke(Unknown Source)
@@ -364,6 +524,46 @@ the maximum number of allocated HTTP connections.
 Set `fs.s3a.connection.maximum` to a larger value (and at least as large as
 `fs.s3a.threads.max`)
 
+
+### `NoHttpResponseException`
+
+The HTTP Server did not respond.
+
+```
+2017-02-07 10:01:07,950 INFO [s3a-transfer-shared-pool1-t7] com.amazonaws.http.AmazonHttpClient:
+  Unable to execute HTTP request: bucket.s3.amazonaws.com:443 failed to respond
+org.apache.http.NoHttpResponseException: bucket.s3.amazonaws.com:443 failed to respond
+  at org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:143)
+  at org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:57)
+  at org.apache.http.impl.io.AbstractMessageParser.parse(AbstractMessageParser.java:261)
+  at org.apache.http.impl.AbstractHttpClientConnection.receiveResponseHeader(AbstractHttpClientConnection.java:283)
+  at org.apache.http.impl.conn.DefaultClientConnection.receiveResponseHeader(DefaultClientConnection.java:259)
+  at org.apache.http.impl.conn.ManagedClientConnectionImpl.receiveResponseHeader(ManagedClientConnectionImpl.java:209)
+  at org.apache.http.protocol.HttpRequestExecutor.doReceiveResponse(HttpRequestExecutor.java:272)
+  at com.amazonaws.http.protocol.SdkHttpRequestExecutor.doReceiveResponse(SdkHttpRequestExecutor.java:66)
+  at org.apache.http.protocol.HttpRequestExecutor.execute(HttpRequestExecutor.java:124)
+  at org.apache.http.impl.client.DefaultRequestDirector.tryExecute(DefaultRequestDirector.java:686)
+  at org.apache.http.impl.client.DefaultRequestDirector.execute(DefaultRequestDirector.java:488)
+  at org.apache.http.impl.client.AbstractHttpClient.doExecute(AbstractHttpClient.java:884)
+  at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:82)
+  at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:55)
+  at com.amazonaws.http.AmazonHttpClient.executeOneRequest(AmazonHttpClient.java:728)
+  at com.amazonaws.http.AmazonHttpClient.executeHelper(AmazonHttpClient.java:489)
+  at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:310)
+  at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:3785)
+  at com.amazonaws.services.s3.AmazonS3Client.copyPart(AmazonS3Client.java:1731)
+  at com.amazonaws.services.s3.transfer.internal.CopyPartCallable.call(CopyPartCallable.java:41)
+  at com.amazonaws.services.s3.transfer.internal.CopyPartCallable.call(CopyPartCallable.java:28)
+  at org.apache.hadoop.fs.s3a.SemaphoredDelegatingExecutor$CallableWithPermitRelease.call(SemaphoredDelegatingExecutor.java:222)
+  at java.util.concurrent.FutureTask.run(FutureTask.java:266)
+  at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
+  at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
+  at java.lang.Thread.run(Thread.java:745)
+```
+
+Probably network problems, unless it really is an outage of S3.
+
+
 ### Out of heap memory when writing with via Fast Upload
 
 This can happen when using the upload buffering mechanism
@@ -418,7 +618,8 @@ for up to date advice.
 org.apache.hadoop.fs.s3a.AWSClientIOException: getFileStatus on test/testname/streaming/:
   com.amazonaws.AmazonClientException: Failed to sanitize XML document
   destined for handler class com.amazonaws.services.s3.model.transform.XmlResponsesSaxParser$ListBucketHandler:
-  Failed to sanitize XML document destined for handler class com.amazonaws.services.s3.model.transform.XmlResponsesSaxParser$ListBucketHandler
+  Failed to sanitize XML document destined for handler class
+   com.amazonaws.services.s3.model.transform.XmlResponsesSaxParser$ListBucketHandler
     at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:105)
     at org.apache.hadoop.fs.s3a.S3AFileSystem.getFileStatus(S3AFileSystem.java:1462)
     at org.apache.hadoop.fs.s3a.S3AFileSystem.innerListStatus(S3AFileSystem.java:1227)
@@ -444,19 +645,136 @@ Again, we believe this is caused by the connection to S3 being broken.
 It may go away if the operation is retried.
 
 
+## <a name="other"></a> Other Errors
+
+### <a name="integrity"></a> `SdkClientException` Unable to verify integrity of data upload
 
-## Miscellaneous Errors
+Something has happened to the data as it was uploaded.
+
+```
+Caused by: org.apache.hadoop.fs.s3a.AWSClientIOException: saving output on dest/_task_tmp.-ext-10000/_tmp.000000_0:
+    com.amazonaws.AmazonClientException: Unable to verify integrity of data upload.
+    Client calculated content hash (contentMD5: L75PalQk0CIhTp04MStVOA== in base 64)
+    didn't match hash (etag: 37ace01f2c383d6b9b3490933c83bb0f in hex) calculated by Amazon S3.
+    You may need to delete the data stored in Amazon S3.
+    (metadata.contentMD5: L75PalQk0CIhTp04MStVOA==, md5DigestStream: null,
+    bucketName: ext2, key: dest/_task_tmp.-ext-10000/_tmp.000000_0):
+  at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:144)
+  at org.apache.hadoop.fs.s3a.S3AOutputStream.close(S3AOutputStream.java:121)
+  at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:72)
+  at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106)
+  at org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat$1.close(HiveIgnoreKeyTextOutputFormat.java:99)
+  at org.apache.hadoop.hive.ql.exec.FileSinkOperator$FSPaths.closeWriters(FileSinkOperator.java:190)
+  ... 22 more
+Caused by: com.amazonaws.AmazonClientException: Unable to verify integrity of data upload.
+  Client calculated content hash (contentMD5: L75PalQk0CIhTp04MStVOA== in base 64)
+  didn't match hash (etag: 37ace01f2c383d6b9b3490933c83bb0f in hex) calculated by Amazon S3.
+  You may need to delete the data stored in Amazon S3.
+  (metadata.contentMD5: L75PalQk0CIhTp04MStVOA==, md5DigestStream: null,
+  bucketName: ext2, key: dest/_task_tmp.-ext-10000/_tmp.000000_0)
+  at com.amazonaws.services.s3.AmazonS3Client.putObject(AmazonS3Client.java:1492)
+  at com.amazonaws.services.s3.transfer.internal.UploadCallable.uploadInOneChunk(UploadCallable.java:131)
+  at com.amazonaws.services.s3.transfer.internal.UploadCallable.call(UploadCallable.java:123)
+  at com.amazonaws.services.s3.transfer.internal.UploadMonitor.call(UploadMonitor.java:139)
+  at com.amazonaws.services.s3.transfer.internal.UploadMonitor.call(UploadMonitor.java:47)
+  ... 4 more
+```
+
+As it uploads data to S3, the AWS SDK builds up an MD5 checksum of what was
+PUT/POSTed. When S3 returns the checksum of the uploaded data, that is compared
+with the local checksum. If there is a mismatch, this error is reported.
+
+The uploaded data is already on S3 and will stay there, though if this happens
+during a multipart upload, it may not be visible (but still billed: clean up your
+multipart uploads via the `hadoop s3guard uploads` command).
+
+Possible causes for this
+
+1. A (possibly transient) network problem, including hardware faults.
+1. A proxy server is doing bad things to the data.
+1. Some signing problem, especially with third-party S3-compatible object stores.
+
+This is a very, very rare occurrence.
+
+If the problem is a signing one, try changing the signature algorithm.
+
+```xml
+<property>
+  <name>fs.s3a.signing-algorithm</name>
+  <value>S3SignerType</value>
+</property>
+```
+
+We cannot make any promises that it will work,
+only that it has been known to make the problem go away "once"
+
+### `AWSS3IOException` The Content-MD5 you specified did not match what we received
+
+Reads work, but writes, even `mkdir`, fail:
+
+```
+org.apache.hadoop.fs.s3a.AWSS3IOException: copyFromLocalFile(file:/tmp/hello.txt, s3a://bucket/hello.txt)
+    on file:/tmp/hello.txt:
+    The Content-MD5 you specified did not match what we received.
+    (Service: Amazon S3; Status Code: 400; Error Code: BadDigest; Request ID: 4018131225),
+    S3 Extended Request ID: null
+  at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:127)
+	at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:69)
+	at org.apache.hadoop.fs.s3a.S3AFileSystem.copyFromLocalFile(S3AFileSystem.java:1494)
+	at org.apache.hadoop.tools.cloudup.Cloudup.uploadOneFile(Cloudup.java:466)
+	at org.apache.hadoop.tools.cloudup.Cloudup.access$000(Cloudup.java:63)
+	at org.apache.hadoop.tools.cloudup.Cloudup$1.call(Cloudup.java:353)
+	at org.apache.hadoop.tools.cloudup.Cloudup$1.call(Cloudup.java:350)
+	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
+	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
+	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
+	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
+	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
+	at java.lang.Thread.run(Thread.java:748)
+Caused by: com.amazonaws.services.s3.model.AmazonS3Exception:
+    The Content-MD5 you specified did not match what we received.
+    (Service: Amazon S3; Status Code: 400; Error Code: BadDigest; Request ID: 4018131225),
+    S3 Extended Request ID: null
+  at com.amazonaws.http.AmazonHttpClient.handleErrorResponse(AmazonHttpClient.java:1307)
+	at com.amazonaws.http.AmazonHttpClient.executeOneRequest(AmazonHttpClient.java:894)
+	at com.amazonaws.http.AmazonHttpClient.executeHelper(AmazonHttpClient.java:597)
+	at com.amazonaws.http.AmazonHttpClient.doExecute(AmazonHttpClient.java:363)
+	at com.amazonaws.http.AmazonHttpClient.executeWithTimer(AmazonHttpClient.java:329)
+	at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:308)
+	at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:3659)
+	at com.amazonaws.services.s3.AmazonS3Client.putObject(AmazonS3Client.java:1422)
+	at com.amazonaws.services.s3.transfer.internal.UploadCallable.uploadInOneChunk(UploadCallable.java:131)
+	at com.amazonaws.services.s3.transfer.internal.UploadCallable.call(UploadCallable.java:123)
+	at com.amazonaws.services.s3.transfer.internal.UploadMonitor.call(UploadMonitor.java:139)
+	at com.amazonaws.services.s3.transfer.internal.UploadMonitor.call(UploadMonitor.java:47)
+	at org.apache.hadoop.fs.s3a.BlockingThreadPoolExecutorService$CallableWithPermitRelease.call(BlockingThreadPoolExecutorService.java:239)
+	... 4 more
+```
+
+This stack trace was seen when interacting with a third-party S3 store whose
+expectations of headers related to the AWS V4 signing mechanism was not
+compatible with that of the specific AWS SDK Hadoop was using.
+
+Workaround: revert to V2 signing.
+
+```xml
+<property>
+  <name>fs.s3a.signing-algorithm</name>
+  <value>S3SignerType</value>
+</property>
+```
 
 ### When writing data: "java.io.FileNotFoundException: Completing multi-part upload"
 
 
 A multipart upload was trying to complete, but failed as there was no upload
 with that ID.
+
 ```
 java.io.FileNotFoundException: Completing multi-part upload on fork-5/test/multipart/1c397ca6-9dfb-4ac1-9cf7-db666673246b:
  com.amazonaws.services.s3.model.AmazonS3Exception: The specified upload does not exist.
-  The upload ID may be invalid, or the upload may have been aborted or completed. (Service: Amazon S3; Status Code: 404;
-   Error Code: NoSuchUpload;
+  The upload ID may be invalid, or the upload may have been aborted or completed.
+   (Service: Amazon S3; Status Code: 404; Error Code: NoSuchUpload;
   at com.amazonaws.http.AmazonHttpClient.handleErrorResponse(AmazonHttpClient.java:1182)
   at com.amazonaws.http.AmazonHttpClient.executeOneRequest(AmazonHttpClient.java:770)
   at com.amazonaws.http.AmazonHttpClient.executeHelper(AmazonHttpClient.java:489)
@@ -482,14 +800,11 @@ for all open writes to complete the write,
 ### Application hangs after reading a number of files
 
 
-
-
-The pool of https client connectons and/or IO threads have been used up,
+The pool of https client connections and/or IO threads have been used up,
 and none are being freed.
 
 
-1. The pools aren't big enough. Increas `fs.s3a.connection.maximum` for
-the http connections, and `fs.s3a.threads.max` for the thread pool.
+1. The pools aren't big enough. See ["Timeout waiting for connection from pool"](#timeout_from_pool)
 2. Likely root cause: whatever code is reading files isn't calling `close()`
 on the input streams. Make sure your code does this!
 And if it's someone else's: make sure you have a recent version; search their
@@ -497,81 +812,13 @@ issue trackers to see if its a known/fixed problem.
 If not, it's time to work with the developers, or come up with a workaround
 (i.e closing the input stream yourself).
 
-### "Timeout waiting for connection from pool"
 
-This the same problem as above, exhibiting itself as the http connection
-pool determining that it has run out of capacity.
-
-```
-
-java.io.InterruptedIOException: getFileStatus on s3a://example/fork-0007/test:
- com.amazonaws.SdkClientException: Unable to execute HTTP request: Timeout waiting for connection from pool
-  at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:145)
-  at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:119)
-  at org.apache.hadoop.fs.s3a.S3AFileSystem.s3GetFileStatus(S3AFileSystem.java:2040)
-  at org.apache.hadoop.fs.s3a.S3AFileSystem.checkPathForDirectory(S3AFileSystem.java:1857)
-  at org.apache.hadoop.fs.s3a.S3AFileSystem.innerMkdirs(S3AFileSystem.java:1890)
-  at org.apache.hadoop.fs.s3a.S3AFileSystem.mkdirs(S3AFileSystem.java:1826)
-  at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:2230)
-  ...
-Caused by: com.amazonaws.SdkClientException: Unable to execute HTTP request: Timeout waiting for connection from pool
-  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleRetryableException(AmazonHttpClient.java:1069)
-  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1035)
-  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:742)
-  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:716)
-  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:699)
-  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:667)
-  at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:649)
-  at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:513)
-  at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4221)
-  at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4168)
-  at com.amazonaws.services.s3.AmazonS3Client.getObjectMetadata(AmazonS3Client.java:1249)
-  at org.apache.hadoop.fs.s3a.S3AFileSystem.getObjectMetadata(S3AFileSystem.java:1162)
-  at org.apache.hadoop.fs.s3a.S3AFileSystem.s3GetFileStatus(S3AFileSystem.java:2022)
-  at org.apache.hadoop.fs.s3a.S3AFileSystem.checkPathForDirectory(S3AFileSystem.java:1857)
-  at org.apache.hadoop.fs.s3a.S3AFileSystem.innerMkdirs(S3AFileSystem.java:1890)
-  at org.apache.hadoop.fs.s3a.S3AFileSystem.mkdirs(S3AFileSystem.java:1826)
-  at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:2230)
-...
-Caused by: com.amazonaws.thirdparty.apache.http.conn.ConnectionPoolTimeoutException: Timeout waiting for connection from pool
-  at com.amazonaws.thirdparty.apache.http.impl.conn.PoolingHttpClientConnectionManager.leaseConnection(PoolingHttpClientConnectionManager.java:286)
-  at com.amazonaws.thirdparty.apache.http.impl.conn.PoolingHttpClientConnectionManager$1.get(PoolingHttpClientConnectionManager.java:263)
-  at sun.reflect.GeneratedMethodAccessor10.invoke(Unknown Source)
-  at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
-  at java.lang.reflect.Method.invoke(Method.java:498)
-  at com.amazonaws.http.conn.ClientConnectionRequestFactory$Handler.invoke(ClientConnectionRequestFactory.java:70)
-  at com.amazonaws.http.conn.$Proxy15.get(Unknown Source)
-  at com.amazonaws.thirdparty.apache.http.impl.execchain.MainClientExec.execute(MainClientExec.java:190)
-  at com.amazonaws.thirdparty.apache.http.impl.execchain.ProtocolExec.execute(ProtocolExec.java:184)
-  at com.amazonaws.thirdparty.apache.http.impl.client.InternalHttpClient.doExecute(InternalHttpClient.java:184)
-  at com.amazonaws.thirdparty.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:82)
-  at com.amazonaws.thirdparty.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:55)
-  at com.amazonaws.http.apache.client.impl.SdkHttpClient.execute(SdkHttpClient.java:72)
-  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1190)
-  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1030)
-  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:742)
-  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:716)
-  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:699)
-  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:667)
-  at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:649)
-  at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:513)
-  at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4221)
-  at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4168)
-  at com.amazonaws.services.s3.AmazonS3Client.getObjectMetadata(AmazonS3Client.java:1249)
-  at org.apache.hadoop.fs.s3a.S3AFileSystem.getObjectMetadata(S3AFileSystem.java:1162)
-  at org.apache.hadoop.fs.s3a.S3AFileSystem.s3GetFileStatus(S3AFileSystem.java:2022)
-  at org.apache.hadoop.fs.s3a.S3AFileSystem.checkPathForDirectory(S3AFileSystem.java:1857)
-  at org.apache.hadoop.fs.s3a.S3AFileSystem.innerMkdirs(S3AFileSystem.java:1890)
-  at org.apache.hadoop.fs.s3a.S3AFileSystem.mkdirs(S3AFileSystem.java:1826)
-  at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:2230)
-```
-
-This is the same problem as the previous one, exhibited differently.
 
 ### Issue: when writing data, HTTP Exceptions logged at info from `AmazonHttpClient`
 
 ```
-[s3a-transfer-shared-pool4-t6] INFO  http.AmazonHttpClient (AmazonHttpClient.java:executeHelper(496)) - Unable to execute HTTP request: hwdev-steve-ireland-new.s3.amazonaws.com:443 failed to respond
+[s3a-transfer-shared-pool4-t6] INFO  http.AmazonHttpClient (AmazonHttpClient.java:executeHelper(496))
+ - Unable to execute HTTP request: hwdev-steve-ireland-new.s3.amazonaws.com:443 failed to respond
 org.apache.http.NoHttpResponseException: bucket.s3.amazonaws.com:443 failed to respond
   at org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:143)
   at org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:57)
@@ -606,6 +853,45 @@ will attempt to retry the operation; it may just be a transient event. If there
 are many such exceptions in logs, it may be a symptom of connectivity or network
 problems.
 
+### `AWSBadRequestException` IllegalLocationConstraintException/The unspecified location constraint is incompatible
+
+```
+ Cause: org.apache.hadoop.fs.s3a.AWSBadRequestException: put on :
+  com.amazonaws.services.s3.model.AmazonS3Exception:
+   The unspecified location constraint is incompatible for the region specific
+    endpoint this request was sent to.
+    (Service: Amazon S3; Status Code: 400; Error Code: IllegalLocationConstraintException;
+
+  at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:178)
+  at org.apache.hadoop.fs.s3a.S3ALambda.execute(S3ALambda.java:64)
+  at org.apache.hadoop.fs.s3a.WriteOperationHelper.uploadObject(WriteOperationHelper.java:451)
+  at org.apache.hadoop.fs.s3a.commit.magic.MagicCommitTracker.aboutToComplete(MagicCommitTracker.java:128)
+  at org.apache.hadoop.fs.s3a.S3ABlockOutputStream.close(S3ABlockOutputStream.java:373)
+  at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:72)
+  at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:101)
+  at org.apache.hadoop.hive.ql.io.orc.WriterImpl.close(WriterImpl.java:2429)
+  at org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat$OrcRecordWriter.close(OrcOutputFormat.java:106)
+  at org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat$OrcRecordWriter.close(OrcOutputFormat.java:91)
+  ...
+  Cause: com.amazonaws.services.s3.model.AmazonS3Exception:
+   The unspecified location constraint is incompatible for the region specific endpoint
+   this request was sent to. (Service: Amazon S3; Status Code: 400; Error Code: IllegalLocationConstraintException;
+   Request ID: EEBC5A08BCB3A645)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleErrorResponse(AmazonHttpClient.java:1588)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1258)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1030)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:742)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:716)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:699)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:667)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:649)
+  at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:513)
+  at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4221)
+  ...
+```
+
+Something has been trying to write data to "/".
+
 ## File System Semantics
 
 These are the issues where S3 does not appear to behave the way a filesystem
@@ -664,7 +950,7 @@ that it is not there)
 This is a visible sign of updates to the metadata server lagging
 behind the state of the underlying filesystem.
 
-Fix: Use S3Guard
+Fix: Use [S3Guard](s3guard.html).
 
 
 ### File not visible/saved
@@ -686,26 +972,74 @@ and the like. The standard strategy here is to save to HDFS and then copy to S3.
 
 ## <a name="encryption"></a> S3 Server Side Encryption
 
-### Using SSE-KMS "Invalid arn"
+### `AWSS3IOException` `KMS.NotFoundException` "Invalid arn" when using SSE-KMS
 
 When performing file operations, the user may run into an issue where the KMS
 key arn is invalid.
+
 ```
-com.amazonaws.services.s3.model.AmazonS3Exception:
-Invalid arn (Service: Amazon S3; Status Code: 400; Error Code: KMS.NotFoundException; Request ID: 708284CF60EE233F),
-S3 Extended Request ID: iHUUtXUSiNz4kv3Bdk/hf9F+wjPt8GIVvBHx/HEfCBYkn7W6zmpvbA3XT7Y5nTzcZtfuhcqDunw=:
-Invalid arn (Service: Amazon S3; Status Code: 400; Error Code: KMS.NotFoundException; Request ID: 708284CF60EE233F)
+org.apache.hadoop.fs.s3a.AWSS3IOException: innerMkdirs on /test:
+ com.amazonaws.services.s3.model.AmazonS3Exception:
+  Invalid arn (Service: Amazon S3; Status Code: 400; Error Code: KMS.NotFoundException;
+   Request ID: CA89F276B3394565),
+   S3 Extended Request ID: ncz0LWn8zor1cUO2fQ7gc5eyqOk3YfyQLDn2OQNoe5Zj/GqDLggUYz9QY7JhdZHdBaDTh+TL5ZQ=:
+   Invalid arn (Service: Amazon S3; Status Code: 400; Error Code: KMS.NotFoundException; Request ID: CA89F276B3394565)
+  at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:194)
+  at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:117)
+  at org.apache.hadoop.fs.s3a.S3AFileSystem.mkdirs(S3AFileSystem.java:1541)
+  at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:2230)
+  at org.apache.hadoop.fs.contract.AbstractFSContractTestBase.mkdirs(AbstractFSContractTestBase.java:338)
+  at org.apache.hadoop.fs.contract.AbstractFSContractTestBase.setup(AbstractFSContractTestBase.java:193)
+  at org.apache.hadoop.fs.s3a.scale.S3AScaleTestBase.setup(S3AScaleTestBase.java:90)
+  at org.apache.hadoop.fs.s3a.scale.AbstractSTestS3AHugeFiles.setup(AbstractSTestS3AHugeFiles.java:77)
+  at sun.reflect.GeneratedMethodAccessor12.invoke(Unknown Source)
+  at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
+  at java.lang.reflect.Method.invoke(Method.java:498)
+  at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
+  at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
+  at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
+  at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:24)
+  at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
+  at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
+  at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
+Caused by: com.amazonaws.services.s3.model.AmazonS3Exception:
+ Invalid arn (Service: Amazon S3; Status Code: 400; Error Code: KMS.NotFoundException; Request ID: CA89F276B3394565)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleErrorResponse(AmazonHttpClient.java:1588)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1258)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1030)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:742)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:716)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:699)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:667)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:649)
+  at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:513)
+  at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4221)
+  at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4168)
+  at com.amazonaws.services.s3.AmazonS3Client.putObject(AmazonS3Client.java:1718)
+  at com.amazonaws.services.s3.transfer.internal.UploadCallable.uploadInOneChunk(UploadCallable.java:133)
+  at com.amazonaws.services.s3.transfer.internal.UploadCallable.call(UploadCallable.java:125)
+  at com.amazonaws.services.s3.transfer.internal.UploadMonitor.call(UploadMonitor.java:143)
+  at com.amazonaws.services.s3.transfer.internal.UploadMonitor.call(UploadMonitor.java:48)
+  at java.util.concurrent.FutureTask.run(FutureTask.java:266)
+  at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
+  at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
+  at java.lang.Thread.run(Thread.java:745)
 ```
 
-This is due to either, the KMS key id is entered incorrectly, or the KMS key id
-is in a different region than the S3 bucket being used.
+Possible causes:
+
+* the KMS key ARN is entered incorrectly, or
+* the KMS key referenced by the ARN is in a different region than the S3 bucket
+being used.
+
 
 ### Using SSE-C "Bad Request"
 
 When performing file operations the user may run into an unexpected 400/403
 error such as
 ```
-org.apache.hadoop.fs.s3a.AWSS3IOException: getFileStatus on fork-4/: com.amazonaws.services.s3.model.AmazonS3Exception:
+org.apache.hadoop.fs.s3a.AWSS3IOException: getFileStatus on fork-4/:
+ com.amazonaws.services.s3.model.AmazonS3Exception:
 Bad Request (Service: Amazon S3; Status Code: 400;
 Error Code: 400 Bad Request; Request ID: 42F9A1987CB49A99),
 S3 Extended Request ID: jU2kcwaXnWj5APB14Cgb1IKkc449gu2+dhIsW/+7x9J4D+VUkKvu78mBo03oh9jnOT2eoTLdECU=:
@@ -719,83 +1053,49 @@ is used, no encryption is specified, or the SSE-C specified is incorrect.
 2. A directory is encrypted with a SSE-C keyA and the user is trying to move a
 file using configured SSE-C keyB into that structure.
 
-## <a name="performance"></a> Performance
-
-S3 is slower to read data than HDFS, even on virtual clusters running on
-Amazon EC2.
-
-* HDFS replicates data for faster query performance.
-* HDFS stores the data on the local hard disks, avoiding network traffic
- if the code can be executed on that host. As EC2 hosts often have their
- network bandwidth throttled, this can make a tangible difference.
-* HDFS is significantly faster for many "metadata" operations: listing
-the contents of a directory, calling `getFileStatus()` on path,
-creating or deleting directories. (S3Guard reduces but does not eliminate
-the speed gap).
-* On HDFS, Directory renames and deletes are `O(1)` operations. On
-S3 renaming is a very expensive `O(data)` operation which may fail partway through
-in which case the final state depends on where the copy+ delete sequence was when it failed.
-All the objects are copied, then the original set of objects are deleted, so
-a failure should not lose data —it may result in duplicate datasets.
-* Unless fast upload enabled, the write only begins on a `close()` operation.
-This can take so long that some applications can actually time out.
-* File IO involving many seek calls/positioned read calls will encounter
-performance problems due to the size of the HTTP requests made. Enable the
-"random" fadvise policy to alleviate this at the
-expense of sequential read performance and bandwidth.
-
-The slow performance of `rename()` surfaces during the commit phase of work,
-including
-
-* The MapReduce `FileOutputCommitter`. This also used by Apache Spark.
-* DistCp's rename-after-copy operation.
-* The `hdfs fs -rm` command renaming the file under `.Trash` rather than
-deleting it. Use `-skipTrash` to eliminate that step.
-
-These operations can be significantly slower when S3 is the destination
-compared to HDFS or other "real" filesystem.
+## <a name="not_all_bytes_were_read"></a> Message appears in logs "Not all bytes were read from the S3ObjectInputStream"
 
-*Improving S3 load-balancing behavior*
 
-Amazon S3 uses a set of front-end servers to provide access to the underlying data.
-The choice of which front-end server to use is handled via load-balancing DNS
-service: when the IP address of an S3 bucket is looked up, the choice of which
-IP address to return to the client is made based on the the current load
-of the front-end servers.
+This is a message which can be generated by the Amazon SDK when the client application
+calls `abort()` on the HTTP input stream, rather than reading to the end of
+the file/stream and causing `close()`. The S3A client does call `abort()` when
+seeking round large files, [so leading to the message](https://github.com/aws/aws-sdk-java/issues/1211).
 
-Over time, the load across the front-end changes, so those servers considered
-"lightly loaded" will change. If the DNS value is cached for any length of time,
-your application may end up talking to an overloaded server. Or, in the case
-of failures, trying to talk to a server that is no longer there.
+No ASF Hadoop releases have shipped with an SDK which prints this message
+when used by the S3A client. However third party and private builds of Hadoop
+may cause the message to be logged.
 
-And by default, for historical security reasons in the era of applets,
-the DNS TTL of a JVM is "infinity".
+Ignore it. The S3A client does call `abort()`, but that's because our benchmarking
+shows that it is generally more efficient to abort the TCP connection and initiate
+a new one than read to the end of a large file.
 
-To work with AWS better, set the DNS time-to-live of an application which
-works with S3 to something lower. See [AWS documentation](http://docs.aws.amazon.com/AWSSdkDocsJava/latest/DeveloperGuide/java-dg-jvm-ttl.html).
+Note: the threshold when data is read rather than the stream aborted can be tuned
+by `fs.s3a.readahead.range`; seek policy in `fs.s3a.experimental.fadvise`.
 
-## <a name="network_performance"></a>Troubleshooting network performance
+### <a name="no_such_bucket"></a> `FileNotFoundException` Bucket does not exist.
 
-An example of this is covered in [HADOOP-13871](https://issues.apache.org/jira/browse/HADOOP-13871).
+The bucket does not exist.
 
-1. For public data, use `curl`:
-
-        curl -O https://landsat-pds.s3.amazonaws.com/scene_list.gz
-1. Use `nettop` to monitor a processes connections.
-
-Consider reducing the connection timeout of the s3a connection.
-
-```xml
-<property>
-  <name>fs.s3a.connection.timeout</name>
-  <value>15000</value>
-</property>
 ```
-This *may* cause the client to react faster to network pauses, so display
-stack traces fast. At the same time, it may be less resilient to
-connectivity problems.
+java.io.FileNotFoundException: Bucket stevel45r56666 does not exist
+  at org.apache.hadoop.fs.s3a.S3AFileSystem.verifyBucketExists(S3AFileSystem.java:361)
+  at org.apache.hadoop.fs.s3a.S3AFileSystem.initialize(S3AFileSystem.java:293)
+  at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:3288)
+  at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:123)
+  at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:3337)
+  at org.apache.hadoop.fs.FileSystem$Cache.getUnique(FileSystem.java:3311)
+  at org.apache.hadoop.fs.FileSystem.newInstance(FileSystem.java:529)
+  at org.apache.hadoop.fs.s3a.s3guard.S3GuardTool$BucketInfo.run(S3GuardTool.java:997)
+  at org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.run(S3GuardTool.java:309)
+  at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:76)
+  at org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.run(S3GuardTool.java:1218)
+  at org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.main(S3GuardTool.java:1227)
+```
 
 
+Check the URI. If using a third-party store, verify that you've configured
+the client to talk to the specific server in `fs.s3a.endpoint`.
+
 ## Other Issues
 
 ### <a name="logging"></a> Enabling low-level logging
@@ -816,7 +1116,7 @@ log4j.logger.org.apache.http=DEBUG
 ```
 
 
-This produces a log such as this, wich is for a V4-authenticated PUT of a 0-byte file used
+This produces a log such as this, which is for a V4-authenticated PUT of a 0-byte file used
 as an empty directory marker
 
 ```
@@ -866,9 +1166,9 @@ execchain.MainClientExec (MainClientExec.java:execute(284)) - Connection can be
 
 ## <a name="retries"></a>  Reducing failures by configuring retry policy
 
-The S3A client can ba configured to rety those operations which are considered
-retriable. That can be because they are idempotent, or
-because there failure happened before the request was processed by S3.
+The S3A client can ba configured to retry those operations which are considered
+retryable. That can be because they are idempotent, or
+because the failure happened before the request was processed by S3.
 
 The number of retries and interval between each retry can be configured:
 
@@ -893,8 +1193,8 @@ Not all failures are retried. Specifically excluded are those considered
 unrecoverable:
 
 * Low-level networking: `UnknownHostException`, `NoRouteToHostException`.
-* 302 redirects
-* Missing resources, 404/`FileNotFoundException`
+* 302 redirects.
+* Missing resources, 404/`FileNotFoundException`.
 * HTTP 416 response/`EOFException`. This can surface if the length of a file changes
   while another client is reading it.
 * Failures during execution or result processing of non-idempotent operations where
@@ -910,79 +1210,6 @@ be idempotent, and will retry them on failure. These are only really idempotent
 if no other client is attempting to manipulate the same objects, such as:
 renaming() the directory tree or uploading files to the same location.
 Please don't do that. Given that the emulated directory rename and delete operations
-aren't atomic, even without retries, multiple S3 clients working with the same
+are not atomic, even without retries, multiple S3 clients working with the same
 paths can interfere with each other
 
-#### <a name="retries"></a> Throttling
-
-When many requests are made of a specific S3 bucket (or shard inside it),
-S3 will respond with a 503 "throttled" response.
-Throttling can be recovered from, provided overall load decreases.
-Furthermore, because it is sent before any changes are made to the object store,
-is inherently idempotent. For this reason, the client will always attempt to
-retry throttled requests.
-
-The limit of the number of times a throttled request can be retried,
-and the exponential interval increase between attempts, can be configured
-independently of the other retry limits.
-
-```xml
-<property>
-  <name>fs.s3a.retry.throttle.limit</name>
-  <value>20</value>
-  <description>
-    Number of times to retry any throttled request.
-  </description>
-</property>
-
-<property>
-  <name>fs.s3a.retry.throttle.interval</name>
-  <value>500ms</value>
-  <description>
-    Interval between retry attempts on throttled requests.
-  </description>
-</property>
-```
-
-If a client is failing due to `AWSServiceThrottledException` failures,
-increasing the interval and limit *may* address this. However, it
-it is a sign of AWS services being overloaded by the sheer number of clients
-and rate of requests. Spreading data across different buckets, and/or using
-a more balanced directory structure may be beneficial.
-Consult [the AWS documentation](http://docs.aws.amazon.com/AmazonS3/latest/dev/request-rate-perf-considerations.html).
-
-Reading or writing data encrypted with SSE-KMS forces S3 to make calls of
-the AWS KMS Key Management Service, which comes with its own
-[Request Rate Limits](http://docs.aws.amazon.com/kms/latest/developerguide/limits.html).
-These default to 1200/second for an account, across all keys and all uses of
-them, which, for S3 means: across all buckets with data encrypted with SSE-KMS.
-
-###### Tips to Keep Throttling down
-
-* If you are seeing a lot of throttling responses on a large scale
-operation like a `distcp` copy, *reduce* the number of processes trying
-to work with the bucket (for distcp: reduce the number of mappers with the
-`-m` option).
-
-* If you are reading or writing lists of files, if you can randomize
-the list so they are not processed in a simple sorted order, you may
-reduce load on a specific shard of S3 data, so potentially increase throughput.
-
-* An S3 Bucket is throttled by requests coming from all
-simultaneous clients. Different applications and jobs may interfere with
-each other: consider that when troubleshooting.
-Partitioning data into different buckets may help isolate load here.
-
-* If you are using data encrypted with SSE-KMS, then the
-will also apply: these are stricter than the S3 numbers.
-If you believe that you are reaching these limits, you may be able to
-get them increased.
-Consult [the KMS Rate Limit documentation](http://docs.aws.amazon.com/kms/latest/developerguide/limits.html).
-
-* S3Guard uses DynamoDB for directory and file lookups;
-it is rate limited to the amount of (guaranteed) IO purchased for a
-table. If significant throttling events/rate is observed here, the preallocated
-IOPs can be increased with the `s3guard set-capacity` command, or
-through the AWS Console. Throttling events in S3Guard are noted in logs, and
-also in the S3A metrics `s3guard_metadatastore_throttle_rate` and
-`s3guard_metadatastore_throttled`.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: HDFS-13130. Log object instance get incorrectly in SlowDiskTracker. Contributed by Jianfei Jiang.

Posted by ae...@apache.org.
HDFS-13130. Log object instance get incorrectly in SlowDiskTracker. Contributed by Jianfei Jiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25fbec67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25fbec67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25fbec67

Branch: refs/heads/HDFS-7240
Commit: 25fbec67d1c01cc3531b51d9e2ec03e5c3591a7e
Parents: 60f9e60
Author: Yiqun Lin <yq...@apache.org>
Authored: Sun Feb 11 12:02:10 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Sun Feb 11 12:02:10 2018 +0800

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25fbec67/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
index 051121e..d0d1ee4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
@@ -53,7 +53,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 @InterfaceStability.Unstable
 public class SlowDiskTracker {
   public static final Logger LOG =
-      LoggerFactory.getLogger(SlowPeerTracker.class);
+      LoggerFactory.getLogger(SlowDiskTracker.class);
 
   /**
    * Time duration after which a report is considered stale. This is


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: HDFS-13112. Token expiration edits may cause log corruption or deadlock. Contributed by Daryn Sharp.

Posted by ae...@apache.org.
HDFS-13112. Token expiration edits may cause log corruption or deadlock. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47473952
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47473952
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47473952

Branch: refs/heads/HDFS-7240
Commit: 47473952e56b0380147d42f4110ad03c2276c961
Parents: a53d62a
Author: Kihwal Lee <ki...@apache.org>
Authored: Thu Feb 15 15:32:42 2018 -0600
Committer: Kihwal Lee <ki...@apache.org>
Committed: Thu Feb 15 15:32:42 2018 -0600

----------------------------------------------------------------------
 .../DelegationTokenSecretManager.java           | 53 ++++++++++++++------
 .../hdfs/server/namenode/FSNamesystem.java      | 17 ++++---
 .../hdfs/server/namenode/FSNamesystemLock.java  |  7 +++
 .../org/apache/hadoop/hdfs/util/RwLock.java     |  5 +-
 .../namenode/TestSecurityTokenEditLog.java      | 24 ++++++++-
 5 files changed, 83 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47473952/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
index b7f89a8..3547c96 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hdfs.security.token.delegation;
 import java.io.DataInput;
 import java.io.DataOutputStream;
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Iterator;
@@ -366,34 +365,58 @@ public class DelegationTokenSecretManager
   @Override //AbstractDelegationTokenManager
   protected void logUpdateMasterKey(DelegationKey key)
       throws IOException {
-    synchronized (noInterruptsLock) {
+    try {
       // The edit logging code will fail catastrophically if it
       // is interrupted during a logSync, since the interrupt
       // closes the edit log files. Doing this inside the
-      // above lock and then checking interruption status
-      // prevents this bug.
-      if (Thread.interrupted()) {
-        throw new InterruptedIOException(
-            "Interrupted before updating master key");
+      // fsn lock will prevent being interrupted when stopping
+      // the secret manager.
+      namesystem.readLockInterruptibly();
+      try {
+        // this monitor isn't necessary if stopped while holding write lock
+        // but for safety, guard against a stop with read lock.
+        synchronized (noInterruptsLock) {
+          if (Thread.currentThread().isInterrupted()) {
+            return; // leave flag set so secret monitor exits.
+          }
+          namesystem.logUpdateMasterKey(key);
+        }
+      } finally {
+        namesystem.readUnlock();
       }
-      namesystem.logUpdateMasterKey(key);
+    } catch (InterruptedException ie) {
+      // AbstractDelegationTokenManager may crash if an exception is thrown.
+      // The interrupt flag will be detected when it attempts to sleep.
+      Thread.currentThread().interrupt();
     }
   }
   
   @Override //AbstractDelegationTokenManager
   protected void logExpireToken(final DelegationTokenIdentifier dtId)
       throws IOException {
-    synchronized (noInterruptsLock) {
+    try {
       // The edit logging code will fail catastrophically if it
       // is interrupted during a logSync, since the interrupt
       // closes the edit log files. Doing this inside the
-      // above lock and then checking interruption status
-      // prevents this bug.
-      if (Thread.interrupted()) {
-        throw new InterruptedIOException(
-            "Interrupted before expiring delegation token");
+      // fsn lock will prevent being interrupted when stopping
+      // the secret manager.
+      namesystem.readLockInterruptibly();
+      try {
+        // this monitor isn't necessary if stopped while holding write lock
+        // but for safety, guard against a stop with read lock.
+        synchronized (noInterruptsLock) {
+          if (Thread.currentThread().isInterrupted()) {
+            return; // leave flag set so secret monitor exits.
+          }
+          namesystem.logExpireDelegationToken(dtId);
+        }
+      } finally {
+        namesystem.readUnlock();
       }
-      namesystem.logExpireDelegationToken(dtId);
+    } catch (InterruptedException ie) {
+      // AbstractDelegationTokenManager may crash if an exception is thrown.
+      // The interrupt flag will be detected when it attempts to sleep.
+      Thread.currentThread().interrupt();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47473952/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 6c27d7e..b0973a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1580,6 +1580,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     this.fsLock.readLock();
   }
   @Override
+  public void readLockInterruptibly() throws InterruptedException {
+    this.fsLock.readLockInterruptibly();
+  }
+  @Override
   public void readUnlock() {
     this.fsLock.readUnlock();
   }
@@ -5675,9 +5679,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     assert !isInSafeMode() :
       "this should never be called while in safemode, since we stop " +
       "the DT manager before entering safemode!";
-    // No need to hold FSN lock since we don't access any internal
-    // structures, and this is stopped before the FSN shuts itself
-    // down, etc.
+    // edit log rolling is not thread-safe and must be protected by the
+    // fsn lock.  not updating namespace so read lock is sufficient.
+    assert hasReadLock();
     getEditLog().logUpdateMasterKey(key);
     getEditLog().logSync();
   }
@@ -5691,9 +5695,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     assert !isInSafeMode() :
       "this should never be called while in safemode, since we stop " +
       "the DT manager before entering safemode!";
-    // No need to hold FSN lock since we don't access any internal
-    // structures, and this is stopped before the FSN shuts itself
-    // down, etc.
+    // edit log rolling is not thread-safe and must be protected by the
+    // fsn lock.  not updating namespace so read lock is sufficient.
+    assert hasReadLock();
+    // do not logSync so expiration edits are batched
     getEditLog().logCancelDelegationToken(id);
   }  
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47473952/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
index 32c7efa..900f8a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
@@ -145,6 +145,13 @@ class FSNamesystemLock {
     }
   }
 
+  public void readLockInterruptibly() throws InterruptedException {
+    coarseLock.readLock().lockInterruptibly();
+    if (coarseLock.getReadHoldCount() == 1) {
+      readLockHeldTimeStampNanos.set(timer.monotonicNowNanos());
+    }
+  }
+
   public void readUnlock() {
     readUnlock(OP_NAME_OTHER);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47473952/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/RwLock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/RwLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/RwLock.java
index e36f0f7..deaeaa4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/RwLock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/RwLock.java
@@ -21,7 +21,10 @@ package org.apache.hadoop.hdfs.util;
 public interface RwLock {
   /** Acquire read lock. */
   public void readLock();
-  
+
+  /** Acquire read lock, unless interrupted while waiting  */
+  void readLockInterruptibly() throws InterruptedException;
+
   /** Release read lock. */
   public void readUnlock();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47473952/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
index 5aa19bb..c43c909 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
@@ -24,6 +24,7 @@ import java.io.File;
 import java.io.IOException;
 import java.net.URI;
 import java.util.Iterator;
+import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -37,7 +38,11 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
+import org.junit.Assert;
 import org.junit.Test;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
 import static org.mockito.Mockito.*;
 
 /**
@@ -180,8 +185,25 @@ public class TestSecurityTokenEditLog {
     Text renewer = new Text(UserGroupInformation.getCurrentUser().getUserName());
     FSImage fsImage = mock(FSImage.class);
     FSEditLog log = mock(FSEditLog.class);
-    doReturn(log).when(fsImage).getEditLog();   
+    doReturn(log).when(fsImage).getEditLog();
+    // verify that the namesystem read lock is held while logging token
+    // expirations.  the namesystem is not updated, so write lock is not
+    // necessary, but the lock is required because edit log rolling is not
+    // thread-safe.
+    final AtomicReference<FSNamesystem> fsnRef = new AtomicReference<>();
+    doAnswer(
+      new Answer<Void>() {
+        @Override
+        public Void answer(InvocationOnMock invocation) throws Throwable {
+          // fsn claims read lock if either read or write locked.
+          Assert.assertTrue(fsnRef.get().hasReadLock());
+          Assert.assertFalse(fsnRef.get().hasWriteLock());
+          return null;
+        }
+      }
+    ).when(log).logCancelDelegationToken(any(DelegationTokenIdentifier.class));
     FSNamesystem fsn = new FSNamesystem(conf, fsImage);
+    fsnRef.set(fsn);
     
     DelegationTokenSecretManager dtsm = fsn.getDelegationTokenSecretManager();
     try {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: HADOOP-10571. Use Log.*(Object, Throwable) overload to log exceptions. Contributed by Andras Bokor.

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index b78fc9c..d0ded89 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -148,10 +148,8 @@ class DataXceiver extends Receiver implements Runnable {
         (colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx);
     localAddress = peer.getLocalAddressString();
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Number of active connections is: "
-          + datanode.getXceiverCount());
-    }
+    LOG.debug("Number of active connections is: {}",
+        datanode.getXceiverCount());
   }
 
   /**
@@ -187,7 +185,7 @@ class DataXceiver extends Receiver implements Runnable {
     // This doesn't need to be in a critical section. Althogh the client
     // can resue the connection to issue a different request, trying sending
     // an OOB through the recently closed block receiver is harmless.
-    LOG.info("Sending OOB to peer: " + peer);
+    LOG.info("Sending OOB to peer: {}", peer);
     br.sendOOB();
   }
 
@@ -199,7 +197,7 @@ class DataXceiver extends Receiver implements Runnable {
       }
       xceiver.interrupt();
     }
-    LOG.info("Stopped the writer: " + peer);
+    LOG.info("Stopped the writer: {}", peer);
   }
 
   /**
@@ -239,14 +237,15 @@ class DataXceiver extends Receiver implements Runnable {
       } catch (InvalidMagicNumberException imne) {
         if (imne.isHandshake4Encryption()) {
           LOG.info("Failed to read expected encryption handshake from client " +
-              "at " + peer.getRemoteAddressString() + ". Perhaps the client " +
+              "at {}. Perhaps the client " +
               "is running an older version of Hadoop which does not support " +
-              "encryption", imne);
+              "encryption", peer.getRemoteAddressString(), imne);
         } else {
           LOG.info("Failed to read expected SASL data transfer protection " +
-              "handshake from client at " + peer.getRemoteAddressString() + 
+              "handshake from client at {}" +
               ". Perhaps the client is running an older version of Hadoop " +
-              "which does not support SASL data transfer protection", imne);
+              "which does not support SASL data transfer protection",
+              peer.getRemoteAddressString(), imne);
         }
         return;
       }
@@ -302,7 +301,7 @@ class DataXceiver extends Receiver implements Runnable {
         if (LOG.isTraceEnabled()) {
           LOG.trace(s, t);
         } else {
-          LOG.info(s + "; " + t);
+          LOG.info("{}; {}", s, t.toString());
         }
       } else if (op == Op.READ_BLOCK && t instanceof SocketTimeoutException) {
         String s1 =
@@ -311,23 +310,19 @@ class DataXceiver extends Receiver implements Runnable {
         if (LOG.isTraceEnabled()) {
           LOG.trace(s1, t);
         } else {
-          LOG.info(s1 + "; " + t);          
+          LOG.info("{}; {}", s1, t.toString());
         }
       } else if (t instanceof InvalidToken) {
         // The InvalidToken exception has already been logged in
         // checkAccess() method and this is not a server error.
-        if (LOG.isTraceEnabled()) {
-          LOG.trace(s, t);
-        }
+        LOG.trace(s, t);
       } else {
         LOG.error(s, t);
       }
     } finally {
       collectThreadLocalStates();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(datanode.getDisplayName() + ":Number of active connections is: "
-            + datanode.getXceiverCount());
-      }
+      LOG.debug("{}:Number of active connections is: {}",
+          datanode.getDisplayName(), datanode.getXceiverCount());
       updateCurrentThreadName("Cleaning up");
       if (peer != null) {
         dataXceiverServer.closePeer(peer);
@@ -405,21 +400,22 @@ class DataXceiver extends Receiver implements Runnable {
         DomainSocket sock = peer.getDomainSocket();
         sock.sendFileDescriptors(fds, buf, 0, buf.length);
         if (supportsReceiptVerification) {
-          LOG.trace("Reading receipt verification byte for " + slotId);
+          LOG.trace("Reading receipt verification byte for {}", slotId);
           int val = sock.getInputStream().read();
           if (val < 0) {
             throw new EOFException();
           }
         } else {
-          LOG.trace("Receipt verification is not enabled on the DataNode.  " +
-                    "Not verifying " + slotId);
+          LOG.trace("Receipt verification is not enabled on the DataNode. " +
+                    "Not verifying {}", slotId);
         }
         success = true;
       }
     } finally {
       if ((!success) && (registeredSlotId != null)) {
-        LOG.info("Unregistering " + registeredSlotId + " because the " +
-            "requestShortCircuitFdsForRead operation failed.");
+        LOG.info("Unregistering {} because the " +
+            "requestShortCircuitFdsForRead operation failed.",
+            registeredSlotId);
         datanode.shortCircuitRegistry.unregisterSlot(registeredSlotId);
       }
       if (ClientTraceLog.isInfoEnabled()) {
@@ -547,8 +543,8 @@ class DataXceiver extends Receiver implements Runnable {
         // We don't want to close the socket here, since that might lead to
         // bad behavior inside the poll() call.  See HADOOP-11802 for details.
         try {
-          LOG.warn("Failed to send success response back to the client.  " +
-              "Shutting down socket for " + shmInfo.getShmId() + ".");
+          LOG.warn("Failed to send success response back to the client. " +
+              "Shutting down socket for {}", shmInfo.getShmId());
           sock.shutdown();
         } catch (IOException e) {
           LOG.warn("Failed to shut down socket in error handler", e);
@@ -616,9 +612,9 @@ class DataXceiver extends Receiver implements Runnable {
           ClientReadStatusProto stat = ClientReadStatusProto.parseFrom(
               PBHelperClient.vintPrefixed(in));
           if (!stat.hasStatus()) {
-            LOG.warn("Client " + peer.getRemoteAddressString() +
-                " did not send a valid status code after reading. " +
-                "Will close connection.");
+            LOG.warn("Client {} did not send a valid status code " +
+                "after reading. Will close connection.",
+                peer.getRemoteAddressString());
             IOUtils.closeStream(out);
           }
         } catch (IOException ioe) {
@@ -633,10 +629,8 @@ class DataXceiver extends Receiver implements Runnable {
       datanode.metrics.incrBlocksRead();
       datanode.metrics.incrTotalReadTime(duration);
     } catch ( SocketException ignored ) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace(dnR + ":Ignoring exception while serving " + block + " to " +
-            remoteAddress, ignored);
-      }
+      LOG.trace("{}:Ignoring exception while serving {} to {}",
+          dnR, block, remoteAddress, ignored);
       // Its ok for remote side to close the connection anytime.
       datanode.metrics.incrBlocksRead();
       IOUtils.closeStream(out);
@@ -645,8 +639,8 @@ class DataXceiver extends Receiver implements Runnable {
        * Earlier version shutdown() datanode if there is disk error.
        */
       if (!(ioe instanceof SocketTimeoutException)) {
-        LOG.warn(dnR + ":Got exception while serving " + block + " to "
-          + remoteAddress, ioe);
+        LOG.warn("{}:Got exception while serving {} to {}",
+            dnR, block, remoteAddress, ioe);
         incrDatanodeNetworkErrors();
       }
       throw ioe;
@@ -719,19 +713,18 @@ class DataXceiver extends Receiver implements Runnable {
       throw new IOException(stage + " does not support multiple targets "
           + Arrays.asList(targets));
     }
-    
+
     if (LOG.isDebugEnabled()) {
-      LOG.debug("opWriteBlock: stage=" + stage + ", clientname=" + clientname 
-      		+ "\n  block  =" + block + ", newGs=" + latestGenerationStamp
-      		+ ", bytesRcvd=[" + minBytesRcvd + ", " + maxBytesRcvd + "]"
-          + "\n  targets=" + Arrays.asList(targets)
-          + "; pipelineSize=" + pipelineSize + ", srcDataNode=" + srcDataNode
-          + ", pinning=" + pinning);
-      LOG.debug("isDatanode=" + isDatanode
-          + ", isClient=" + isClient
-          + ", isTransfer=" + isTransfer);
-      LOG.debug("writeBlock receive buf size " + peer.getReceiveBufferSize() +
-                " tcp no delay " + peer.getTcpNoDelay());
+      LOG.debug("opWriteBlock: stage={}, clientname={}\n  " +
+              "block  ={}, newGs={}, bytesRcvd=[{}, {}]\n  " +
+              "targets={}; pipelineSize={}, srcDataNode={}, pinning={}",
+          stage, clientname, block, latestGenerationStamp, minBytesRcvd,
+          maxBytesRcvd, Arrays.asList(targets), pipelineSize, srcDataNode,
+          pinning);
+      LOG.debug("isDatanode={}, isClient={}, isTransfer={}",
+          isDatanode, isClient, isTransfer);
+      LOG.debug("writeBlock receive buf size {} tcp no delay {}",
+          peer.getReceiveBufferSize(), peer.getTcpNoDelay());
     }
 
     // We later mutate block's generation stamp and length, but we need to
@@ -741,8 +734,8 @@ class DataXceiver extends Receiver implements Runnable {
     if (block.getNumBytes() == 0) {
       block.setNumBytes(dataXceiverServer.estimateBlockSize);
     }
-    LOG.info("Receiving " + block + " src: " + remoteAddress + " dest: "
-        + localAddress);
+    LOG.info("Receiving {} src: {} dest: {}",
+        block, remoteAddress, localAddress);
 
     DataOutputStream mirrorOut = null;  // stream to next target
     DataInputStream mirrorIn = null;    // reply from next target
@@ -778,9 +771,7 @@ class DataXceiver extends Receiver implements Runnable {
         InetSocketAddress mirrorTarget = null;
         // Connect to backup machine
         mirrorNode = targets[0].getXferAddr(connectToDnViaHostname);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Connecting to datanode " + mirrorNode);
-        }
+        LOG.debug("Connecting to datanode {}", mirrorNode);
         mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
         mirrorSock = datanode.newSocket();
         try {
@@ -844,11 +835,10 @@ class DataXceiver extends Receiver implements Runnable {
               BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(mirrorIn));
             mirrorInStatus = connectAck.getStatus();
             firstBadLink = connectAck.getFirstBadLink();
-            if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
-              LOG.debug("Datanode " + targets.length +
-                       " got response for connect ack " +
-                       " from downstream datanode with firstbadlink as " +
-                       firstBadLink);
+            if (mirrorInStatus != SUCCESS) {
+              LOG.debug("Datanode {} got response for connect" +
+                  "ack  from downstream datanode with firstbadlink as {}",
+                  targets.length, firstBadLink);
             }
           }
 
@@ -869,13 +859,12 @@ class DataXceiver extends Receiver implements Runnable {
           IOUtils.closeSocket(mirrorSock);
           mirrorSock = null;
           if (isClient) {
-            LOG.error(datanode + ":Exception transfering block " +
-                      block + " to mirror " + mirrorNode + ": " + e);
+            LOG.error("{}:Exception transfering block {} to mirror {}",
+                datanode, block, mirrorNode, e);
             throw e;
           } else {
-            LOG.info(datanode + ":Exception transfering " +
-                     block + " to mirror " + mirrorNode +
-                     "- continuing without the mirror", e);
+            LOG.info("{}:Exception transfering {} to mirror {}- continuing " +
+                "without the mirror", datanode, block, mirrorNode, e);
             incrDatanodeNetworkErrors();
           }
         }
@@ -883,10 +872,9 @@ class DataXceiver extends Receiver implements Runnable {
 
       // send connect-ack to source for clients and not transfer-RBW/Finalized
       if (isClient && !isTransfer) {
-        if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
-          LOG.debug("Datanode " + targets.length +
-                   " forwarding connect ack to upstream firstbadlink is " +
-                   firstBadLink);
+        if (mirrorInStatus != SUCCESS) {
+          LOG.debug("Datanode {} forwarding connect ack to upstream " +
+              "firstbadlink is {}", targets.length, firstBadLink);
         }
         BlockOpResponseProto.newBuilder()
           .setStatus(mirrorInStatus)
@@ -904,9 +892,7 @@ class DataXceiver extends Receiver implements Runnable {
 
         // send close-ack for transfer-RBW/Finalized 
         if (isTransfer) {
-          if (LOG.isTraceEnabled()) {
-            LOG.trace("TRANSFER: send close-ack");
-          }
+          LOG.trace("TRANSFER: send close-ack");
           writeResponse(SUCCESS, null, replyOut);
         }
       }
@@ -924,15 +910,16 @@ class DataXceiver extends Receiver implements Runnable {
       if (isDatanode ||
           stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
         datanode.closeBlock(block, null, storageUuid, isOnTransientStorage);
-        LOG.info("Received " + block + " src: " + remoteAddress + " dest: "
-            + localAddress + " of size " + block.getNumBytes());
+        LOG.info("Received {} src: {} dest: {} of size {}",
+            block, remoteAddress, localAddress, block.getNumBytes());
       }
 
       if(isClient) {
         size = block.getNumBytes();
       }
     } catch (IOException ioe) {
-      LOG.info("opWriteBlock " + block + " received exception " + ioe);
+      LOG.info("opWriteBlock {} received exception {}",
+          block, ioe.toString());
       incrDatanodeNetworkErrors();
       throw ioe;
     } finally {
@@ -970,7 +957,8 @@ class DataXceiver extends Receiver implements Runnable {
           targetStorageTypes, targetStorageIds, clientName);
       writeResponse(Status.SUCCESS, null, out);
     } catch (IOException ioe) {
-      LOG.info("transferBlock " + blk + " received exception " + ioe);
+      LOG.info("transferBlock {} received exception {}",
+          blk, ioe.toString());
       incrDatanodeNetworkErrors();
       throw ioe;
     } finally {
@@ -1005,7 +993,8 @@ class DataXceiver extends Receiver implements Runnable {
           .writeDelimitedTo(out);
       out.flush();
     } catch (IOException ioe) {
-      LOG.info("blockChecksum " + block + " received exception " + ioe);
+      LOG.info("blockChecksum {} received exception {}",
+          block, ioe.toString());
       incrDatanodeNetworkErrors();
       throw ioe;
     } finally {
@@ -1046,8 +1035,8 @@ class DataXceiver extends Receiver implements Runnable {
           .writeDelimitedTo(out);
       out.flush();
     } catch (IOException ioe) {
-      LOG.info("blockChecksum " + stripedBlockInfo.getBlock() +
-          " received exception " + ioe);
+      LOG.info("blockChecksum {} received exception {}",
+          stripedBlockInfo.getBlock(), ioe.toString());
       incrDatanodeNetworkErrors();
       throw ioe;
     } finally {
@@ -1105,10 +1094,10 @@ class DataXceiver extends Receiver implements Runnable {
       datanode.metrics.incrBlocksRead();
       datanode.metrics.incrTotalReadTime(duration);
       
-      LOG.info("Copied " + block + " to " + peer.getRemoteAddressString());
+      LOG.info("Copied {} to {}", block, peer.getRemoteAddressString());
     } catch (IOException ioe) {
       isOpSuccess = false;
-      LOG.info("opCopyBlock " + block + " received exception " + ioe);
+      LOG.info("opCopyBlock {} received exception {}", block, ioe.toString());
       incrDatanodeNetworkErrors();
       throw ioe;
     } finally {
@@ -1163,16 +1152,14 @@ class DataXceiver extends Receiver implements Runnable {
         ReplicaInfo oldReplica = datanode.data.moveBlockAcrossStorage(block,
             storageType, storageId);
         if (oldReplica != null) {
-          LOG.info("Moved " + block + " from StorageType "
-              + oldReplica.getVolume().getStorageType() + " to " + storageType);
+          LOG.info("Moved {} from StorageType {} to {}",
+              block, oldReplica.getVolume().getStorageType(), storageType);
         }
       } else {
         block.setNumBytes(dataXceiverServer.estimateBlockSize);
         // get the output stream to the proxy
         final String dnAddr = proxySource.getXferAddr(connectToDnViaHostname);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Connecting to datanode " + dnAddr);
-        }
+        LOG.debug("Connecting to datanode {}", dnAddr);
         InetSocketAddress proxyAddr = NetUtils.createSocketAddr(dnAddr);
         proxySock = datanode.newSocket();
         NetUtils.connect(proxySock, proxyAddr, dnConf.socketTimeout);
@@ -1229,8 +1216,8 @@ class DataXceiver extends Receiver implements Runnable {
         datanode.notifyNamenodeReceivedBlock(
             block, delHint, r.getStorageUuid(), r.isOnTransientStorage());
         
-        LOG.info("Moved " + block + " from " + peer.getRemoteAddressString()
-            + ", delHint=" + delHint);
+        LOG.info("Moved {} from {}, delHint={}",
+            block, peer.getRemoteAddressString(), delHint);
       }
     } catch (IOException ioe) {
       opStatus = ERROR;
@@ -1260,7 +1247,8 @@ class DataXceiver extends Receiver implements Runnable {
       try {
         sendResponse(opStatus, errMsg);
       } catch (IOException ioe) {
-        LOG.warn("Error writing reply back to " + peer.getRemoteAddressString());
+        LOG.warn("Error writing reply back to {}",
+            peer.getRemoteAddressString());
         incrDatanodeNetworkErrors();
       }
       IOUtils.closeStream(proxyOut);
@@ -1408,10 +1396,8 @@ class DataXceiver extends Receiver implements Runnable {
       final String[] storageIds) throws IOException {
     checkAndWaitForBP(blk);
     if (datanode.isBlockTokenEnabled) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Checking block access token for block '" + blk.getBlockId()
-            + "' with mode '" + mode + "'");
-      }
+      LOG.debug("Checking block access token for block '{}' with mode '{}'",
+          blk.getBlockId(), mode);
       try {
         datanode.blockPoolTokenSecretManager.checkAccess(t, null, blk, mode,
             storageTypes, storageIds);
@@ -1429,9 +1415,9 @@ class DataXceiver extends Receiver implements Runnable {
             resp.build().writeDelimitedTo(out);
             out.flush();
           }
-          LOG.warn("Block token verification failed: op=" + op
-              + ", remoteAddress=" + remoteAddress
-              + ", message=" + e.getLocalizedMessage());
+          LOG.warn("Block token verification failed: op={}, " +
+                  "remoteAddress={}, message={}",
+              op, remoteAddress, e.getLocalizedMessage());
           throw e;
         } finally {
           IOUtils.closeStream(out);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 0c9b875..6c27d7e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -6185,7 +6185,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         LOG.debug("Get corrupt file blocks returned error: " + e.getMessage());
       }
     } catch (IOException e) {
-      LOG.warn("Get corrupt file blocks returned error: " + e.getMessage());
+      LOG.warn("Get corrupt file blocks returned error", e);
     }
     return JSON.toString(list);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
index 9fc954c..6f8ce91 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
@@ -28,8 +28,6 @@ import java.util.List;
 import java.util.concurrent.*;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.ServiceFailedException;
@@ -51,6 +49,8 @@ import org.apache.hadoop.security.UserGroupInformation;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Thread which runs inside the NN when it's in Standby state,
@@ -60,7 +60,8 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
  */
 @InterfaceAudience.Private
 public class StandbyCheckpointer {
-  private static final Log LOG = LogFactory.getLog(StandbyCheckpointer.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(StandbyCheckpointer.class);
   private static final long PREVENT_AFTER_CANCEL_MS = 2*60*1000L;
   private final CheckpointConf checkpointConf;
   private final Configuration conf;
@@ -136,8 +137,8 @@ public class StandbyCheckpointer {
 
   public void start() {
     LOG.info("Starting standby checkpoint thread...\n" +
-        "Checkpointing active NN to possible NNs: " + activeNNAddresses + "\n" +
-        "Serving checkpoints at " + myNNAddress);
+        "Checkpointing active NN to possible NNs: {}\n" +
+        "Serving checkpoints at {}", activeNNAddresses, myNNAddress);
     thread.start();
   }
   
@@ -177,8 +178,8 @@ public class StandbyCheckpointer {
       assert thisCheckpointTxId >= prevCheckpointTxId;
       if (thisCheckpointTxId == prevCheckpointTxId) {
         LOG.info("A checkpoint was triggered but the Standby Node has not " +
-            "received any transactions since the last checkpoint at txid " +
-            thisCheckpointTxId + ". Skipping...");
+            "received any transactions since the last checkpoint at txid {}. " +
+            "Skipping...", thisCheckpointTxId);
         return;
       }
 
@@ -253,8 +254,7 @@ public class StandbyCheckpointer {
         }
 
       } catch (ExecutionException e) {
-        ioe = new IOException("Exception during image upload: " + e.getMessage(),
-            e.getCause());
+        ioe = new IOException("Exception during image upload", e);
         break;
       } catch (InterruptedException e) {
         ie = e;
@@ -401,15 +401,15 @@ public class StandbyCheckpointer {
           if (needCheckpoint) {
             LOG.info("Triggering a rollback fsimage for rolling upgrade.");
           } else if (uncheckpointed >= checkpointConf.getTxnCount()) {
-            LOG.info("Triggering checkpoint because there have been " + 
-                uncheckpointed + " txns since the last checkpoint, which " +
-                "exceeds the configured threshold " +
-                checkpointConf.getTxnCount());
+            LOG.info("Triggering checkpoint because there have been {} txns " +
+                "since the last checkpoint, " +
+                "which exceeds the configured threshold {}",
+                uncheckpointed, checkpointConf.getTxnCount());
             needCheckpoint = true;
           } else if (secsSinceLast >= checkpointConf.getPeriod()) {
-            LOG.info("Triggering checkpoint because it has been " +
-                secsSinceLast + " seconds since the last checkpoint, which " +
-                "exceeds the configured interval " + checkpointConf.getPeriod());
+            LOG.info("Triggering checkpoint because it has been {} seconds " +
+                "since the last checkpoint, which exceeds the configured " +
+                "interval {}", secsSinceLast, checkpointConf.getPeriod());
             needCheckpoint = true;
           }
 
@@ -442,7 +442,7 @@ public class StandbyCheckpointer {
             LOG.info("Checkpoint finished successfully.");
           }
         } catch (SaveNamespaceCancelledException ce) {
-          LOG.info("Checkpoint was cancelled: " + ce.getMessage());
+          LOG.info("Checkpoint was cancelled: {}", ce.getMessage());
           canceledCount++;
         } catch (InterruptedException ie) {
           LOG.info("Interrupted during checkpointing", ie);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 107decf..45c02f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -1332,7 +1332,7 @@ public class MiniDFSCluster implements AutoCloseable {
     try {
       uri = new URI("hdfs://" + hostPort);
     } catch (URISyntaxException e) {
-      NameNode.LOG.warn("unexpected URISyntaxException: " + e );
+      NameNode.LOG.warn("unexpected URISyntaxException", e);
     }
     return uri;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
index 1373891..1d06616 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
@@ -32,13 +32,13 @@ import org.apache.commons.cli.HelpFormatter;
 import org.apache.commons.cli.OptionBuilder;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.eclipse.jetty.util.ajax.JSON;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class drives the creation of a mini-cluster on the local machine. By
@@ -58,8 +58,8 @@ import org.eclipse.jetty.util.ajax.JSON;
  * $HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/share/hadoop/hdfs/hadoop-hdfs-0.24.0-SNAPSHOT-tests.jar org.apache.hadoop.test.MiniDFSClusterManager -options...
  */
 public class MiniDFSClusterManager {
-  private static final Log LOG =
-    LogFactory.getLog(MiniDFSClusterManager.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(MiniDFSClusterManager.class);
 
   private MiniDFSCluster dfs;
   private String writeDetails;
@@ -146,8 +146,8 @@ public class MiniDFSClusterManager {
                                           .build();
     dfs.waitActive();
     
-    LOG.info("Started MiniDFSCluster -- namenode on port "
-        + dfs.getNameNodePort());
+    LOG.info("Started MiniDFSCluster -- namenode on port {}",
+        dfs.getNameNodePort());
 
     if (writeConfig != null) {
       FileOutputStream fos = new FileOutputStream(new File(writeConfig));
@@ -180,7 +180,7 @@ public class MiniDFSClusterManager {
       CommandLineParser parser = new GnuParser();
       cli = parser.parse(options, args);
     } catch(ParseException e) {
-      LOG.warn("options parsing failed:  "+e.getMessage());
+      LOG.warn("options parsing failed", e);
       new HelpFormatter().printHelp("...", options);
       return false;
     }
@@ -192,7 +192,7 @@ public class MiniDFSClusterManager {
     
     if (cli.getArgs().length > 0) {
       for (String arg : cli.getArgs()) {
-        LOG.error("Unrecognized option: " + arg);
+        LOG.error("Unrecognized option: {}", arg);
         new HelpFormatter().printHelp("...", options);
         return false;
       }
@@ -236,12 +236,12 @@ public class MiniDFSClusterManager {
           conf2.set(keyval[0], keyval[1]);
           num_confs_updated++;
         } else {
-          LOG.warn("Ignoring -D option " + prop);
+          LOG.warn("Ignoring -D option {}", prop);
         }
       }
     }
-    LOG.info("Updated " + num_confs_updated +
-        " configuration settings from command line.");
+    LOG.info("Updated {} configuration settings from command line.",
+        num_confs_updated);
   }
 
   /**
@@ -254,8 +254,8 @@ public class MiniDFSClusterManager {
         return Integer.parseInt(o);
       } 
     } catch (NumberFormatException ex) {
-      LOG.error("Couldn't parse value (" + o + ") for option " 
-          + argName + ". Using default: " + defaultValue);
+      LOG.error("Couldn't parse value ({}) for option {}. " +
+          "Using default: {}", o, argName, defaultValue);
     }
     
     return defaultValue;    

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
index 3507b7f..e476223 100644
--- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
+++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java
@@ -45,8 +45,8 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.tools.rumen.JobStoryProducer;
 import org.apache.hadoop.tools.rumen.ZombieJobProducer;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Driver class for the Gridmix3 benchmark. Gridmix accepts a timestamped
@@ -58,7 +58,7 @@ import org.apache.commons.logging.LogFactory;
  */
 public class Gridmix extends Configured implements Tool {
 
-  public static final Log LOG = LogFactory.getLog(Gridmix.class);
+  public static final Logger LOG = LoggerFactory.getLogger(Gridmix.class);
 
   /**
    * Output (scratch) directory for submitted jobs. Relative paths are
@@ -184,8 +184,8 @@ public class Gridmix extends Configured implements Tool {
       final Configuration conf = getConf();
 
       if (inputDir.getFileSystem(conf).exists(inputDir)) {
-        LOG.error("Gridmix input data directory " + inputDir
-                  + " already exists when -generate option is used.\n");
+        LOG.error("Gridmix input data directory {} already exists " +
+            "when -generate option is used.", inputDir);
         return STARTUP_FAILED_ERROR;
       }
 
@@ -193,13 +193,13 @@ public class Gridmix extends Configured implements Tool {
       CompressionEmulationUtil.setupDataGeneratorConfig(conf);
     
       final GenerateData genData = new GenerateData(conf, inputDir, genbytes);
-      LOG.info("Generating " + StringUtils.humanReadableInt(genbytes) +
-               " of test data...");
+      LOG.info("Generating {} of test data...",
+          StringUtils.TraditionalBinaryPrefix.long2String(genbytes, "", 1));
       launchGridmixJob(genData);
     
       FsShell shell = new FsShell(conf);
       try {
-        LOG.info("Changing the permissions for inputPath " + inputDir.toString());
+        LOG.info("Changing the permissions for inputPath {}", inputDir);
         shell.run(new String[] {"-chmod","-R","777", inputDir.toString()});
       } catch (Exception e) {
         LOG.error("Couldnt change the file permissions " , e);
@@ -528,9 +528,7 @@ public class Gridmix extends Configured implements Tool {
         statistics.start();
       } catch (Throwable e) {
         LOG.error("Startup failed. " + e.toString() + "\n");
-        if (LOG.isDebugEnabled()) {
-          e.printStackTrace();
-        }
+        LOG.debug("Startup failed", e);
         if (factory != null) factory.abort(); // abort pipeline
         exitCode = STARTUP_FAILED_ERROR;
       } finally {
@@ -561,7 +559,7 @@ public class Gridmix extends Configured implements Tool {
         summarizer.finalize(factory, traceIn, genbytes, userResolver, stats, 
                             conf);
       }
-      IOUtils.cleanup(LOG, trace);
+      IOUtils.cleanupWithLogger(LOG, trace);
     }
     return exitCode;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/HttpInputStreamWithRelease.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/HttpInputStreamWithRelease.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/HttpInputStreamWithRelease.java
index aa52f83..bd025ac 100644
--- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/HttpInputStreamWithRelease.java
+++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/http/HttpInputStreamWithRelease.java
@@ -18,12 +18,12 @@
 
 package org.apache.hadoop.fs.swift.http;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.swift.exceptions.SwiftConnectionClosedException;
 import org.apache.hadoop.fs.swift.util.SwiftUtils;
 import org.apache.http.HttpResponse;
 import org.apache.http.client.methods.HttpRequestBase;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.ByteArrayInputStream;
 import java.io.EOFException;
@@ -44,8 +44,8 @@ import java.net.URI;
  */
 public class HttpInputStreamWithRelease extends InputStream {
 
-  private static final Log LOG =
-    LogFactory.getLog(HttpInputStreamWithRelease.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(HttpInputStreamWithRelease.class);
   private final URI uri;
   private HttpRequestBase req;
   private HttpResponse resp;
@@ -100,9 +100,7 @@ public class HttpInputStreamWithRelease extends InputStream {
     if (!released) {
       reasonClosed = reason;
       try {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Releasing connection to " + uri + ":  " + reason, ex);
-        }
+        LOG.debug("Releasing connection to {}:  {}", uri, reason, ex);
         if (req != null) {
           if (!dataConsumed) {
             req.abort();
@@ -137,7 +135,7 @@ public class HttpInputStreamWithRelease extends InputStream {
     try {
       release(operation, ex);
     } catch (IOException ioe) {
-      LOG.debug("Exception during release: " + operation + " - " + ioe, ioe);
+      LOG.debug("Exception during release: {}", operation, ioe);
       //make this the exception if there was none before
       if (ex == null) {
         ex = ioe;
@@ -173,9 +171,7 @@ public class HttpInputStreamWithRelease extends InputStream {
     try {
       read = inStream.read();
     } catch (EOFException e) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("EOF exception " + e, e);
-      }
+      LOG.debug("EOF exception", e);
       read = -1;
     } catch (IOException e) {
       throw releaseAndRethrow("read()", e);
@@ -200,9 +196,7 @@ public class HttpInputStreamWithRelease extends InputStream {
     try {
       read = inStream.read(b, off, len);
     } catch (EOFException e) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("EOF exception " + e, e);
-      }
+      LOG.debug("EOF exception", e);
       read = -1;
     } catch (IOException e) {
       throw releaseAndRethrow("read(b, off, " + len + ")", e);
@@ -222,13 +216,12 @@ public class HttpInputStreamWithRelease extends InputStream {
   protected void finalize() {
     try {
       if (release("finalize()", constructionStack)) {
-        LOG.warn("input stream of " + uri
-                 + " not closed properly -cleaned up in finalize()");
+        LOG.warn("input stream of {}" +
+                 " not closed properly -cleaned up in finalize()", uri);
       }
     } catch (Exception e) {
       //swallow anything that failed here
-      LOG.warn("Exception while releasing " + uri + "in finalizer",
-               e);
+      LOG.warn("Exception while releasing {} in finalizer", uri, e);
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: HADOOP-15040 Upgrade AWS SDK to 1.11.271: NPE bug spams logs w/ Yarn Log Aggregation

Posted by ae...@apache.org.
HADOOP-15040 Upgrade AWS SDK to 1.11.271: NPE bug spams logs w/ Yarn Log Aggregation


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60971b81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60971b81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60971b81

Branch: refs/heads/HDFS-7240
Commit: 60971b8195c954c109e83cdbd1c94c700da4a271
Parents: 332269d
Author: Aaron Fabbri <fa...@apache.org>
Authored: Tue Feb 13 18:38:22 2018 -0800
Committer: Aaron Fabbri <fa...@apache.org>
Committed: Tue Feb 13 18:51:22 2018 -0800

----------------------------------------------------------------------
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60971b81/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index dd8465a..c27596c 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -137,7 +137,7 @@
     <make-maven-plugin.version>1.0-beta-1</make-maven-plugin.version>
     <native-maven-plugin.version>1.0-alpha-8</native-maven-plugin.version>
     <surefire.fork.timeout>900</surefire.fork.timeout>
-    <aws-java-sdk.version>1.11.199</aws-java-sdk.version>
+    <aws-java-sdk.version>1.11.271</aws-java-sdk.version>
     <hsqldb.version>2.3.4</hsqldb.version>
     <frontend-maven-plugin.version>1.5</frontend-maven-plugin.version>
     <!-- the version of Hadoop declared in the version resources; can be overridden


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: HADOOP-10571. Use Log.*(Object, Throwable) overload to log exceptions. Contributed by Andras Bokor.

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index 0db633f..6e63543 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -28,8 +28,6 @@ import java.nio.ByteBuffer;
 import java.nio.charset.Charset;
 import java.util.EnumSet;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
@@ -137,6 +135,8 @@ import org.jboss.netty.channel.Channel;
 import org.jboss.netty.channel.ChannelHandlerContext;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * RPC program corresponding to nfs daemon. See {@link Nfs3}.
@@ -146,7 +146,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   public static final FsPermission umask = new FsPermission(
       (short) DEFAULT_UMASK);
 
-  static final Log LOG = LogFactory.getLog(RpcProgramNfs3.class);
+  static final Logger LOG = LoggerFactory.getLogger(RpcProgramNfs3.class);
 
   private final NfsConfiguration config;
   private final WriteManager writeManager;
@@ -204,7 +204,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
     superuser = config.get(NfsConfigKeys.NFS_SUPERUSER_KEY,
         NfsConfigKeys.NFS_SUPERUSER_DEFAULT);
-    LOG.info("Configured HDFS superuser is " + superuser);
+    LOG.info("Configured HDFS superuser is {}", superuser);
 
     if (!enableDump) {
       writeDumpDir = null;
@@ -230,13 +230,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   private void clearDirectory(String writeDumpDir) throws IOException {
     File dumpDir = new File(writeDumpDir);
     if (dumpDir.exists()) {
-      LOG.info("Delete current dump directory " + writeDumpDir);
+      LOG.info("Delete current dump directory {}", writeDumpDir);
       if (!(FileUtil.fullyDelete(dumpDir))) {
         throw new IOException("Cannot remove current dump directory: "
             + dumpDir);
       }
     }
-    LOG.info("Create new dump directory " + writeDumpDir);
+    LOG.info("Create new dump directory {}", writeDumpDir);
     if (!dumpDir.mkdirs()) {
       throw new IOException("Cannot create dump directory " + dumpDir);
     }
@@ -298,9 +298,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
   @Override
   public NFS3Response nullProcedure() {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS NULL");
-    }
+    LOG.debug("NFS NULL");
     return new NFS3Response(Nfs3Status.NFS3_OK);
   }
 
@@ -331,10 +329,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     int namenodeId = handle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("GETATTR for fileHandle: " + handle.dumpFileHandle()
-          + " client: " + remoteAddress);
+      LOG.debug("GETATTR for fileHandle: {} client: {}",
+          handle.dumpFileHandle(), remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -346,7 +343,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       attrs = writeManager.getFileAttr(dfsClient, handle, iug);
     } catch (RemoteException r) {
-      LOG.warn("Exception ", r);
+      LOG.warn("Exception", r);
       IOException io = r.unwrapRemoteException();
       /**
        * AuthorizationException can be thrown if the user can't be proxy'ed.
@@ -357,13 +354,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         return new GETATTR3Response(Nfs3Status.NFS3ERR_IO);
       }
     } catch (IOException e) {
-      LOG.info("Can't get file attribute, fileId=" + handle.getFileId(), e);
+      LOG.info("Can't get file attribute, fileId={}", handle.getFileId(), e);
       int status = mapErrorStatus(e);
       response.setStatus(status);
       return response;
     }
     if (attrs == null) {
-      LOG.error("Can't get path for fileId: " + handle.getFileId());
+      LOG.error("Can't get path for fileId: {}", handle.getFileId());
       response.setStatus(Nfs3Status.NFS3ERR_STALE);
       return response;
     }
@@ -378,9 +375,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     EnumSet<SetAttrField> updateFields = newAttr.getUpdateFields();
 
     if (setMode && updateFields.contains(SetAttrField.MODE)) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("set new mode: " + newAttr.getMode());
-      }
+      LOG.debug("set new mode: {}", newAttr.getMode());
       dfsClient.setPermission(fileIdPath,
           new FsPermission((short) (newAttr.getMode())));
     }
@@ -398,9 +393,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     long mtime = updateFields.contains(SetAttrField.MTIME) ? newAttr.getMtime()
         .getMilliSeconds() : -1;
     if (atime != -1 || mtime != -1) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("set atime: " + +atime + " mtime: " + mtime);
-      }
+      LOG.debug("set atime: {} mtime: {}", atime, mtime);
       dfsClient.setTimes(fileIdPath, mtime, atime);
     }
   }
@@ -427,10 +420,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     int namenodeId = handle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS SETATTR fileHandle: " + handle.dumpFileHandle()
-          + " client: " + remoteAddress);
+      LOG.debug("NFS SETATTR fileHandle: {} client: {}",
+          handle.dumpFileHandle(), remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -439,8 +431,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     }
 
     if (request.getAttr().getUpdateFields().contains(SetAttrField.SIZE)) {
-      LOG.error("Setting file size is not supported when setattr, fileId: "
-          + handle.getFileId());
+      LOG.error("Setting file size is not supported when setattr, fileId: {}",
+          handle.getFileId());
       response.setStatus(Nfs3Status.NFS3ERR_INVAL);
       return response;
     }
@@ -450,7 +442,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
       if (preOpAttr == null) {
-        LOG.info("Can't get path for fileId: " + handle.getFileId());
+        LOG.info("Can't get path for fileId: {}", handle.getFileId());
         response.setStatus(Nfs3Status.NFS3ERR_STALE);
         return response;
       }
@@ -474,13 +466,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       WccData wccData = new WccData(preOpWcc, postOpAttr);
       return new SETATTR3Response(Nfs3Status.NFS3_OK, wccData);
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       WccData wccData = null;
       try {
         wccData = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpAttr),
             dfsClient, fileIdPath, iug);
       } catch (IOException e1) {
-        LOG.info("Can't get postOpAttr for fileIdPath: " + fileIdPath, e1);
+        LOG.info("Can't get postOpAttr for fileIdPath: {}", fileIdPath, e1);
       }
 
       int status = mapErrorStatus(e);
@@ -515,10 +507,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     String fileName = request.getName();
     int namenodeId = dirHandle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS LOOKUP dir fileHandle: " + dirHandle.dumpFileHandle()
-          + " name: " + fileName + " client: " + remoteAddress);
+      LOG.debug("NFS LOOKUP dir fileHandle: {} name: {} client: {}",
+          dirHandle.dumpFileHandle(), fileName, remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -530,10 +521,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       Nfs3FileAttributes postOpObjAttr = writeManager.getFileAttr(dfsClient,
           dirHandle, fileName, namenodeId);
       if (postOpObjAttr == null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("NFS LOOKUP fileId: " + dirHandle.getFileId() + " name: "
-              + fileName + " does not exist");
-        }
+        LOG.debug("NFS LOOKUP fileId: {} name: {} does not exist",
+            dirHandle.getFileId(), fileName);
         Nfs3FileAttributes postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient,
             dirFileIdPath, iug);
         return new LOOKUP3Response(Nfs3Status.NFS3ERR_NOENT, null, null,
@@ -543,7 +532,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       Nfs3FileAttributes postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient,
           dirFileIdPath, iug);
       if (postOpDirAttr == null) {
-        LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
+        LOG.info("Can't get path for dir fileId: {}", dirHandle.getFileId());
         return new LOOKUP3Response(Nfs3Status.NFS3ERR_STALE);
       }
       FileHandle fileHandle =
@@ -552,7 +541,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           postOpDirAttr);
 
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       int status = mapErrorStatus(e);
       return new LOOKUP3Response(status);
     }
@@ -592,16 +581,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     }
 
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS ACCESS fileHandle: " + handle.dumpFileHandle()
-          + " client: " + remoteAddress);
+      LOG.debug("NFS ACCESS fileHandle: {} client: {}",
+          handle.dumpFileHandle(), remoteAddress);
     }
-
     Nfs3FileAttributes attrs;
     try {
       attrs = writeManager.getFileAttr(dfsClient, handle, iug);
 
       if (attrs == null) {
-        LOG.error("Can't get path for fileId: " + handle.getFileId());
+        LOG.error("Can't get path for fileId: {}", handle.getFileId());
         return new ACCESS3Response(Nfs3Status.NFS3ERR_STALE);
       }
       if(iug.getUserName(securityHandler.getUid(), "unknown").equals(superuser)) {
@@ -616,7 +604,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
       return new ACCESS3Response(Nfs3Status.NFS3_OK, attrs, access);
     } catch (RemoteException r) {
-      LOG.warn("Exception ", r);
+      LOG.warn("Exception", r);
       IOException io = r.unwrapRemoteException();
       /**
        * AuthorizationException can be thrown if the user can't be proxy'ed.
@@ -627,7 +615,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         return new ACCESS3Response(Nfs3Status.NFS3ERR_IO);
       }
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       int status = mapErrorStatus(e);
       return new ACCESS3Response(status);
     }
@@ -660,10 +648,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     int namenodeId = handle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS READLINK fileHandle: " + handle.dumpFileHandle()
-          + " client: " + remoteAddress);
+      LOG.debug("NFS READLINK fileHandle: {} client: {}",
+          handle.dumpFileHandle(), remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -678,24 +665,23 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       Nfs3FileAttributes postOpAttr = Nfs3Utils.getFileAttr(dfsClient,
           fileIdPath, iug);
       if (postOpAttr == null) {
-        LOG.info("Can't get path for fileId: " + handle.getFileId());
+        LOG.info("Can't get path for fileId: {}", handle.getFileId());
         return new READLINK3Response(Nfs3Status.NFS3ERR_STALE);
       }
       if (postOpAttr.getType() != NfsFileType.NFSLNK.toValue()) {
-        LOG.error("Not a symlink, fileId: " + handle.getFileId());
+        LOG.error("Not a symlink, fileId: {}", handle.getFileId());
         return new READLINK3Response(Nfs3Status.NFS3ERR_INVAL);
       }
       if (target == null) {
-        LOG.error("Symlink target should not be null, fileId: "
-            + handle.getFileId());
+        LOG.error("Symlink target should not be null, fileId: {}",
+            handle.getFileId());
         return new READLINK3Response(Nfs3Status.NFS3ERR_SERVERFAULT);
       }
       int rtmax = config.getInt(NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY,
           NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT);
       if (rtmax < target.getBytes(Charset.forName("UTF-8")).length) {
-        LOG.error("Link size: "
-            + target.getBytes(Charset.forName("UTF-8")).length
-            + " is larger than max transfer size: " + rtmax);
+        LOG.error("Link size: {} is larger than max transfer size: {}",
+            target.getBytes(Charset.forName("UTF-8")).length, rtmax);
         return new READLINK3Response(Nfs3Status.NFS3ERR_IO, postOpAttr,
             new byte[0]);
       }
@@ -704,7 +690,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           target.getBytes(Charset.forName("UTF-8")));
 
     } catch (IOException e) {
-      LOG.warn("Readlink error: " + e.getClass(), e);
+      LOG.warn("Readlink error", e);
       int status = mapErrorStatus(e);
       return new READLINK3Response(status);
     }
@@ -741,10 +727,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     int namenodeId = handle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS READ fileHandle: " + handle.dumpFileHandle()+ " offset: "
-          + offset + " count: " + count + " client: " + remoteAddress);
+      LOG.debug("NFS READ fileHandle: {} offset: {} count: {} client: {}",
+          handle.dumpFileHandle(), offset, count, remoteAddress);
     }
-
     DFSClient dfsClient = clientCache.getDfsClient(userName, namenodeId);
     if (dfsClient == null) {
       response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
@@ -760,15 +745,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         attrs = Nfs3Utils.getFileAttr(dfsClient,
             Nfs3Utils.getFileIdPath(handle), iug);
       } catch (IOException e) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Get error accessing file, fileId: " + handle.getFileId(), e);
-        }
+        LOG.debug("Get error accessing file, fileId: {}",
+            handle.getFileId(), e);
         return new READ3Response(Nfs3Status.NFS3ERR_IO);
       }
       if (attrs == null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Can't get path for fileId: " + handle.getFileId());
-        }
+        LOG.debug("Can't get path for fileId: {}", handle.getFileId());
         return new READ3Response(Nfs3Status.NFS3ERR_NOENT);
       }
       int access = Nfs3Utils.getAccessRightsForUserGroup(
@@ -787,8 +769,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     // optimized later by reading from the cache.
     int ret = writeManager.commitBeforeRead(dfsClient, handle, offset + count);
     if (ret != Nfs3Status.NFS3_OK) {
-      LOG.warn("commitBeforeRead didn't succeed with ret=" + ret
-          + ". Read may not get most recent data.");
+      LOG.warn("commitBeforeRead didn't succeed with ret={}. " +
+          "Read may not get most recent data.", ret);
     }
 
     try {
@@ -828,9 +810,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle),
           iug);
       if (readCount < count) {
-        LOG.info("Partial read. Asked offset: " + offset + " count: " + count
-            + " and read back: " + readCount + " file size: "
-            + attrs.getSize());
+        LOG.info("Partial read. Asked offset: {} count: {} and read back: {} " +
+                "file size: {}", offset, count, readCount, attrs.getSize());
       }
       // HDFS returns -1 for read beyond file size.
       if (readCount < 0) {
@@ -841,8 +822,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           ByteBuffer.wrap(readbuffer));
 
     } catch (IOException e) {
-      LOG.warn("Read error: " + e.getClass() + " offset: " + offset
-          + " count: " + count, e);
+      LOG.warn("Read error. Offset: {} count: {}", offset, count, e);
       int status = mapErrorStatus(e);
       return new READ3Response(status);
     }
@@ -884,11 +864,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     int namenodeId = handle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS WRITE fileHandle: " + handle.dumpFileHandle() + " offset: "
-          + offset + " length: " + count + " stableHow: " + stableHow.getValue()
-          + " xid: " + xid + " client: " + remoteAddress);
+      LOG.debug("NFS WRITE fileHandle: {} offset: {} length: {} " +
+              "stableHow: {} xid: {} client: {}",
+          handle.dumpFileHandle(), offset, count, stableHow.getValue(), xid,
+          remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -900,7 +880,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpAttr = writeManager.getFileAttr(dfsClient, handle, iug);
       if (preOpAttr == null) {
-        LOG.error("Can't get path for fileId: " + handle.getFileId());
+        LOG.error("Can't get path for fileId: {}", handle.getFileId());
         return new WRITE3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -910,22 +890,20 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
             Nfs3Constant.WRITE_COMMIT_VERF);
       }
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("requested offset=" + offset + " and current filesize="
-            + preOpAttr.getSize());
-      }
+      LOG.debug("requested offset={} and current filesize={}",
+          offset, preOpAttr.getSize());
 
       writeManager.handleWrite(dfsClient, request, channel, xid, preOpAttr);
 
     } catch (IOException e) {
-      LOG.info("Error writing to fileId " + handle.getFileId() + " at offset "
-          + offset + " and length " + data.length, e);
+      LOG.info("Error writing to fileId {} at offset {} and length {}",
+          handle.getFileId(), offset, data.length, e);
       // Try to return WccData
       Nfs3FileAttributes postOpAttr = null;
       try {
         postOpAttr = writeManager.getFileAttr(dfsClient, handle, iug);
       } catch (IOException e1) {
-        LOG.info("Can't get postOpAttr for fileId: " + handle.getFileId(), e1);
+        LOG.info("Can't get postOpAttr for fileId: {}", e1);
       }
       WccAttr attr = preOpAttr == null ? null : Nfs3Utils.getWccAttr(preOpAttr);
       WccData fileWcc = new WccData(attr, postOpAttr);
@@ -961,10 +939,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     String fileName = request.getName();
     int namenodeId = dirHandle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS CREATE dir fileHandle: " + dirHandle.dumpFileHandle()
-          + " filename: " + fileName + " client: " + remoteAddress);
+      LOG.debug("NFS CREATE dir fileHandle: {} filename: {} client: {}",
+          dirHandle.dumpFileHandle(), fileName, remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -976,8 +953,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     if ((createMode != Nfs3Constant.CREATE_EXCLUSIVE)
         && request.getObjAttr().getUpdateFields().contains(SetAttrField.SIZE)
         && request.getObjAttr().getSize() != 0) {
-      LOG.error("Setting file size is not supported when creating file: "
-          + fileName + " dir fileId: " + dirHandle.getFileId());
+      LOG.error("Setting file size is not supported when creating file: {} " +
+          "dir fileId: {}", fileName, dirHandle.getFileId());
       return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL);
     }
 
@@ -990,7 +967,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (preOpDirAttr == null) {
-        LOG.error("Can't get path for dirHandle: " + dirHandle);
+        LOG.error("Can't get path for dirHandle: {}", dirHandle);
         return new CREATE3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -1041,10 +1018,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         fos.close();
         fos = null;
       } else {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Opened stream for file: " + fileName + ", fileId: "
-              + fileHandle.getFileId());
-        }
+        LOG.debug("Opened stream for file: {}, fileId: {}",
+            fileName, fileHandle.getFileId());
       }
 
     } catch (IOException e) {
@@ -1053,8 +1028,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         try {
           fos.close();
         } catch (IOException e1) {
-          LOG.error("Can't close stream for dirFileId: " + dirHandle.getFileId()
-              + " filename: " + fileName, e1);
+          LOG.error("Can't close stream for dirFileId: {} filename: {}",
+              dirHandle.getFileId(), fileName, e1);
         }
       }
       if (dirWcc == null) {
@@ -1062,8 +1037,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr),
               dfsClient, dirFileIdPath, iug);
         } catch (IOException e1) {
-          LOG.error("Can't get postOpDirAttr for dirFileId: "
-              + dirHandle.getFileId(), e1);
+          LOG.error("Can't get postOpDirAttr for dirFileId: {}",
+              dirHandle.getFileId(), e1);
         }
       }
 
@@ -1105,13 +1080,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     }
 
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS MKDIR dirHandle: " + dirHandle.dumpFileHandle()
-          + " filename: " + fileName + " client: " + remoteAddress);
+      LOG.debug("NFS MKDIR dirHandle: {} filename: {} client: {}",
+          dirHandle.dumpFileHandle(), fileName, remoteAddress);
     }
-
     if (request.getObjAttr().getUpdateFields().contains(SetAttrField.SIZE)) {
-      LOG.error("Setting file size is not supported when mkdir: " + fileName
-          + " in dirHandle" + dirHandle);
+      LOG.error("Setting file size is not supported when mkdir: " +
+          "{} in dirHandle {}", fileName, dirHandle);
       return new MKDIR3Response(Nfs3Status.NFS3ERR_INVAL);
     }
 
@@ -1123,7 +1097,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (preOpDirAttr == null) {
-        LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
+        LOG.info("Can't get path for dir fileId: {}", dirHandle.getFileId());
         return new MKDIR3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -1158,13 +1132,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       return new MKDIR3Response(Nfs3Status.NFS3_OK, new FileHandle(
           postOpObjAttr.getFileId(), namenodeId), postOpObjAttr, dirWcc);
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       // Try to return correct WccData
       if (postOpDirAttr == null) {
         try {
           postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
         } catch (IOException e1) {
-          LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e);
+          LOG.info("Can't get postOpDirAttr for {}", dirFileIdPath, e);
         }
       }
 
@@ -1202,10 +1176,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     String fileName = request.getName();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS REMOVE dir fileHandle: " + dirHandle.dumpFileHandle()
-          + " fileName: " + fileName + " client: " + remoteAddress);
+      LOG.debug("NFS REMOVE dir fileHandle: {} fileName: {} client: {}",
+          dirHandle.dumpFileHandle(), fileName, remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -1219,7 +1192,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpDirAttr =  Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (preOpDirAttr == null) {
-        LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
+        LOG.info("Can't get path for dir fileId: {}", dirHandle.getFileId());
         return new REMOVE3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -1247,13 +1220,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       }
       return new REMOVE3Response(Nfs3Status.NFS3_OK, dirWcc);
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       // Try to return correct WccData
       if (postOpDirAttr == null) {
         try {
           postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
         } catch (IOException e1) {
-          LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1);
+          LOG.info("Can't get postOpDirAttr for {}", dirFileIdPath, e1);
         }
       }
 
@@ -1285,10 +1258,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     String fileName = request.getName();
     int namenodeId = dirHandle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS RMDIR dir fileHandle: " + dirHandle.dumpFileHandle()
-          + " fileName: " + fileName + " client: " + remoteAddress);
+      LOG.debug("NFS RMDIR dir fileHandle: {} fileName: {} client: {}",
+          dirHandle.dumpFileHandle(), fileName, remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -1302,7 +1274,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (preOpDirAttr == null) {
-        LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
+        LOG.info("Can't get path for dir fileId: {}", dirHandle.getFileId());
         return new RMDIR3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -1334,13 +1306,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
       return new RMDIR3Response(Nfs3Status.NFS3_OK, dirWcc);
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       // Try to return correct WccData
       if (postOpDirAttr == null) {
         try {
           postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
         } catch (IOException e1) {
-          LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1);
+          LOG.info("Can't get postOpDirAttr for {}", dirFileIdPath, e1);
         }
       }
 
@@ -1376,11 +1348,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     int toNamenodeId = toHandle.getNamenodeId();
     String toName = request.getToName();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS RENAME from: " + fromHandle.dumpFileHandle()
-          + "/" + fromName + " to: " + toHandle.dumpFileHandle()
-          + "/" + toName + " client: " + remoteAddress);
+      LOG.debug("NFS RENAME from: {}/{} to: {}/{} client: {}",
+          fromHandle.dumpFileHandle(), fromName, toHandle.dumpFileHandle(),
+          toName, remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), fromNamenodeId);
     if (dfsClient == null) {
@@ -1403,14 +1374,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       fromPreOpAttr = Nfs3Utils.getFileAttr(dfsClient, fromDirFileIdPath, iug);
       if (fromPreOpAttr == null) {
-        LOG.info("Can't get path for fromHandle fileId: "
-            + fromHandle.getFileId());
+        LOG.info("Can't get path for fromHandle fileId: {}",
+            fromHandle.getFileId());
         return new RENAME3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
       toPreOpAttr = Nfs3Utils.getFileAttr(dfsClient, toDirFileIdPath, iug);
       if (toPreOpAttr == null) {
-        LOG.info("Can't get path for toHandle fileId: " + toHandle.getFileId());
+        LOG.info("Can't get path for toHandle fileId: {}",
+            toHandle.getFileId());
         return new RENAME3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -1434,7 +1406,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           dfsClient, toDirFileIdPath, iug);
       return new RENAME3Response(Nfs3Status.NFS3_OK, fromDirWcc, toDirWcc);
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       // Try to return correct WccData
       try {
         fromDirWcc = Nfs3Utils.createWccData(
@@ -1443,8 +1415,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         toDirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(toPreOpAttr),
             dfsClient, toDirFileIdPath, iug);
       } catch (IOException e1) {
-        LOG.info("Can't get postOpDirAttr for " + fromDirFileIdPath + " or"
-            + toDirFileIdPath, e1);
+        LOG.info("Can't get postOpDirAttr for {} or {}",
+            fromDirFileIdPath, toDirFileIdPath, e1);
       }
 
       int status = mapErrorStatus(e);
@@ -1484,10 +1456,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     // Don't do any name check to source path, just leave it to HDFS
     String linkIdPath = linkDirIdPath + "/" + name;
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS SYMLINK, target: " + symData + " link: " + linkIdPath
-          + " namenodeId: " + namenodeId + " client: " + remoteAddress);
-    }
+    LOG.debug("NFS SYMLINK, target: {} link: {} namenodeId: {} client: {}",
+        symData, linkIdPath, namenodeId, remoteAddress);
 
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
@@ -1515,7 +1485,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           objAttr.getFileId(), namenodeId), objAttr, dirWcc);
 
     } catch (IOException e) {
-      LOG.warn("Exception: " + e);
+      LOG.warn("Exception", e);
       int status = mapErrorStatus(e);
       response.setStatus(status);
       return response;
@@ -1542,9 +1512,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         throw io;
       }
       // This happens when startAfter was just deleted
-      LOG.info("Cookie couldn't be found: "
-          + new String(startAfter, Charset.forName("UTF-8"))
-          + ", do listing from beginning");
+      LOG.info("Cookie couldn't be found: {}, do listing from beginning",
+          new String(startAfter, Charset.forName("UTF-8")));
       dlisting = dfsClient
           .listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME);
     }
@@ -1577,21 +1546,19 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     long cookie = request.getCookie();
     if (cookie < 0) {
-      LOG.error("Invalid READDIR request, with negative cookie: " + cookie);
+      LOG.error("Invalid READDIR request, with negative cookie: {}", cookie);
       return new READDIR3Response(Nfs3Status.NFS3ERR_INVAL);
     }
     long count = request.getCount();
     if (count <= 0) {
-      LOG.info("Nonpositive count in invalid READDIR request: " + count);
+      LOG.info("Nonpositive count in invalid READDIR request: {}", count);
       return new READDIR3Response(Nfs3Status.NFS3_OK);
     }
 
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS READDIR fileHandle: " + handle.dumpFileHandle()
-          + " cookie: " + cookie + " count: " + count + " client: "
-          + remoteAddress);
+      LOG.debug("NFS READDIR fileHandle: {} cookie: {} count: {} client: {}",
+          handle.dumpFileHandle(), cookie, count, remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -1607,12 +1574,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
       dirStatus = dfsClient.getFileInfo(dirFileIdPath);
       if (dirStatus == null) {
-        LOG.info("Can't get path for fileId: " + handle.getFileId());
+        LOG.info("Can't get path for fileId: {}", handle.getFileId());
         return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
       }
       if (!dirStatus.isDirectory()) {
-        LOG.error("Can't readdir for regular file, fileId: "
-            + handle.getFileId());
+        LOG.error("Can't readdir for regular file, fileId: {}",
+            handle.getFileId());
         return new READDIR3Response(Nfs3Status.NFS3ERR_NOTDIR);
       }
       long cookieVerf = request.getCookieVerf();
@@ -1631,8 +1598,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           LOG.warn("AIX compatibility mode enabled, ignoring cookieverf " +
               "mismatches.");
         } else {
-          LOG.error("CookieVerf mismatch. request cookieVerf: " + cookieVerf
-              + " dir cookieVerf: " + dirStatus.getModificationTime());
+          LOG.error("CookieVerf mismatch. request cookieVerf: {} " +
+              "dir cookieVerf: {}",
+              cookieVerf, dirStatus.getModificationTime());
           return new READDIR3Response(
               Nfs3Status.NFS3ERR_BAD_COOKIE,
               Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug));
@@ -1664,11 +1632,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
       postOpAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (postOpAttr == null) {
-        LOG.error("Can't get path for fileId: " + handle.getFileId());
+        LOG.error("Can't get path for fileId: {}", handle.getFileId());
         return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
       }
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       int status = mapErrorStatus(e);
       return new READDIR3Response(status);
     }
@@ -1742,26 +1710,28 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     int namenodeId = handle.getNamenodeId();
     long cookie = request.getCookie();
     if (cookie < 0) {
-      LOG.error("Invalid READDIRPLUS request, with negative cookie: " + cookie);
+      LOG.error("Invalid READDIRPLUS request, with negative cookie: {}",
+          cookie);
       return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
     }
     long dirCount = request.getDirCount();
     if (dirCount <= 0) {
-      LOG.info("Nonpositive dircount in invalid READDIRPLUS request: " + dirCount);
+      LOG.info("Nonpositive dircount in invalid READDIRPLUS request: {}",
+          dirCount);
       return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
     }
     int maxCount = request.getMaxCount();
     if (maxCount <= 0) {
-      LOG.info("Nonpositive maxcount in invalid READDIRPLUS request: " + maxCount);
+      LOG.info("Nonpositive maxcount in invalid READDIRPLUS request: {}",
+          maxCount);
       return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
     }
 
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS READDIRPLUS fileHandle: " + handle.dumpFileHandle()
-          + " cookie: " + cookie + " dirCount: " + dirCount + " maxCount: "
-          + maxCount + " client: " + remoteAddress);
+      LOG.debug("NFS READDIRPLUS fileHandle: {} cookie: {} dirCount: {} " +
+              "maxCount: {} client: {}",
+          handle.dumpFileHandle(), cookie, dirCount, maxCount, remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -1777,12 +1747,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
       dirStatus = dfsClient.getFileInfo(dirFileIdPath);
       if (dirStatus == null) {
-        LOG.info("Can't get path for fileId: " + handle.getFileId());
+        LOG.info("Can't get path for fileId: {}", handle.getFileId());
         return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
       }
       if (!dirStatus.isDirectory()) {
-        LOG.error("Can't readdirplus for regular file, fileId: "
-            + handle.getFileId());
+        LOG.error("Can't readdirplus for regular file, fileId: {}",
+            handle.getFileId());
         return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_NOTDIR);
       }
       long cookieVerf = request.getCookieVerf();
@@ -1799,8 +1769,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           LOG.warn("AIX compatibility mode enabled, ignoring cookieverf " +
               "mismatches.");
         } else {
-          LOG.error("cookieverf mismatch. request cookieverf: " + cookieVerf
-              + " dir cookieverf: " + dirStatus.getModificationTime());
+          LOG.error("cookieverf mismatch. request cookieverf: {} " +
+                  "dir cookieverf: {}",
+              cookieVerf, dirStatus.getModificationTime());
           return new READDIRPLUS3Response(
               Nfs3Status.NFS3ERR_BAD_COOKIE,
               Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug),
@@ -1833,11 +1804,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
       postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
       if (postOpDirAttr == null) {
-        LOG.info("Can't get path for fileId: " + handle.getFileId());
+        LOG.info("Can't get path for fileId: {}", handle.getFileId());
         return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
       }
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       int status = mapErrorStatus(e);
       return new READDIRPLUS3Response(status);
     }
@@ -1865,7 +1836,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         try {
           attr = writeManager.getFileAttr(dfsClient, childHandle, iug);
         } catch (IOException e) {
-          LOG.error("Can't get file attributes for fileId: " + fileId, e);
+          LOG.error("Can't get file attributes for fileId: {}", fileId, e);
           continue;
         }
         entries[i] = new READDIRPLUS3Response.EntryPlus3(fileId,
@@ -1882,7 +1853,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         try {
           attr = writeManager.getFileAttr(dfsClient, childHandle, iug);
         } catch (IOException e) {
-          LOG.error("Can't get file attributes for fileId: " + fileId, e);
+          LOG.error("Can't get file attributes for fileId: {}", fileId, e);
           continue;
         }
         entries[i] = new READDIRPLUS3Response.EntryPlus3(fileId,
@@ -1923,10 +1894,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     int namenodeId = handle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS FSSTAT fileHandle: " + handle.dumpFileHandle()
-          + " client: " + remoteAddress);
+      LOG.debug("NFS FSSTAT fileHandle: {} client: {}",
+          handle.dumpFileHandle(), remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -1942,7 +1912,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       Nfs3FileAttributes attrs = writeManager.getFileAttr(dfsClient, handle,
           iug);
       if (attrs == null) {
-        LOG.info("Can't get path for fileId: " + handle.getFileId());
+        LOG.info("Can't get path for fileId: {}", handle.getFileId());
         return new FSSTAT3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -1957,7 +1927,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       return new FSSTAT3Response(Nfs3Status.NFS3_OK, attrs, totalBytes,
           freeBytes, freeBytes, maxFsObjects, maxFsObjects, maxFsObjects, 0);
     } catch (RemoteException r) {
-      LOG.warn("Exception ", r);
+      LOG.warn("Exception", r);
       IOException io = r.unwrapRemoteException();
       /**
        * AuthorizationException can be thrown if the user can't be proxy'ed.
@@ -1968,7 +1938,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         return new FSSTAT3Response(Nfs3Status.NFS3ERR_IO);
       }
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       int status = mapErrorStatus(e);
       return new FSSTAT3Response(status);
     }
@@ -2000,10 +1970,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     int namenodeId = handle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS FSINFO fileHandle: " + handle.dumpFileHandle()
-          +" client: " + remoteAddress);
+      LOG.debug("NFS FSINFO fileHandle: {} client: {}", remoteAddress,
+          handle.dumpFileHandle(), remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -2025,7 +1994,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       Nfs3FileAttributes attrs = Nfs3Utils.getFileAttr(dfsClient,
           Nfs3Utils.getFileIdPath(handle), iug);
       if (attrs == null) {
-        LOG.info("Can't get path for fileId: " + handle.getFileId());
+        LOG.info("Can't get path for fileId: {}", handle.getFileId());
         return new FSINFO3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -2035,7 +2004,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       return new FSINFO3Response(Nfs3Status.NFS3_OK, attrs, rtmax, rtmax, 1,
           wtmax, wtmax, 1, dtperf, Long.MAX_VALUE, new NfsTime(1), fsProperty);
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       int status = mapErrorStatus(e);
       return new FSINFO3Response(status);
     }
@@ -2069,10 +2038,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     int namenodeId = handle.getNamenodeId();
 
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS PATHCONF fileHandle: " + handle.dumpFileHandle()
-          + " client: " + remoteAddress);
+      LOG.debug("NFS PATHCONF fileHandle: {} client: {}",
+          handle.dumpFileHandle(), remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -2084,14 +2052,14 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle),
           iug);
       if (attrs == null) {
-        LOG.info("Can't get path for fileId: " + handle.getFileId());
+        LOG.info("Can't get path for fileId: {}", handle.getFileId());
         return new PATHCONF3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
       return new PATHCONF3Response(Nfs3Status.NFS3_OK, attrs, 0,
           HdfsServerConstants.MAX_PATH_LENGTH, true, false, false, true);
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       int status = mapErrorStatus(e);
       return new PATHCONF3Response(status);
     }
@@ -2123,11 +2091,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     FileHandle handle = request.getHandle();
     int namenodeId = handle.getNamenodeId();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("NFS COMMIT fileHandle: " + handle.dumpFileHandle() + " offset="
-          + request.getOffset() + " count=" + request.getCount() + " client: "
-          + remoteAddress);
+      LOG.debug("NFS COMMIT fileHandle: {} offset={} count={} client: {}",
+          handle.dumpFileHandle(), request.getOffset(), request.getCount(),
+          remoteAddress);
     }
-
     DFSClient dfsClient =
         clientCache.getDfsClient(securityHandler.getUser(), namenodeId);
     if (dfsClient == null) {
@@ -2140,7 +2107,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     try {
       preOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
       if (preOpAttr == null) {
-        LOG.info("Can't get path for fileId: " + handle.getFileId());
+        LOG.info("Can't get path for fileId: {}", handle.getFileId());
         return new COMMIT3Response(Nfs3Status.NFS3ERR_STALE);
       }
 
@@ -2158,12 +2125,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           preOpAttr, namenodeId);
       return null;
     } catch (IOException e) {
-      LOG.warn("Exception ", e);
+      LOG.warn("Exception", e);
       Nfs3FileAttributes postOpAttr = null;
       try {
         postOpAttr = writeManager.getFileAttr(dfsClient, handle, iug);
       } catch (IOException e1) {
-        LOG.info("Can't get postOpAttr for fileId: " + handle.getFileId(), e1);
+        LOG.info("Can't get postOpAttr for fileId: {}", handle.getFileId(), e1);
       }
 
       WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
@@ -2205,8 +2172,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     if (nfsproc3 != NFSPROC3.NULL) {
       if (credentials.getFlavor() != AuthFlavor.AUTH_SYS
           && credentials.getFlavor() != AuthFlavor.RPCSEC_GSS) {
-        LOG.info("Wrong RPC AUTH flavor, " + credentials.getFlavor()
-            + " is not AUTH_SYS or RPCSEC_GSS.");
+        LOG.info("Wrong RPC AUTH flavor, {} is not AUTH_SYS or RPCSEC_GSS.",
+            credentials.getFlavor());
         XDR reply = new XDR();
         RpcDeniedReply rdr = new RpcDeniedReply(xid,
             RpcReply.ReplyState.MSG_ACCEPTED,
@@ -2226,12 +2193,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           xid);
       if (entry != null) { // in cache
         if (entry.isCompleted()) {
-          LOG.info("Sending the cached reply to retransmitted request " + xid);
+          LOG.info("Sending the cached reply to retransmitted request {}",
+              xid);
           RpcUtil.sendRpcResponse(ctx, entry.getResponse());
           return;
         } else { // else request is in progress
-          LOG.info("Retransmitted request, transaction still in progress "
-              + xid);
+          LOG.info("Retransmitted request, transaction still in progress {}",
+              xid);
           // Ignore the request and do nothing
           return;
         }
@@ -2261,18 +2229,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       response = readlink(xdr, info);
       metrics.addReadlink(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.READ) {
-      if (LOG.isDebugEnabled()) {
-          LOG.debug(Nfs3Utils.READ_RPC_START + xid);
-      }
+      LOG.debug("{}{}", Nfs3Utils.READ_RPC_START, xid);
       response = read(xdr, info);
-      if (LOG.isDebugEnabled() && (nfsproc3 == NFSPROC3.READ)) {
-        LOG.debug(Nfs3Utils.READ_RPC_END + xid);
-      }
+      LOG.debug("{}{}", Nfs3Utils.READ_RPC_END, xid);
       metrics.addRead(Nfs3Utils.getElapsedTime(startTime));
     } else if (nfsproc3 == NFSPROC3.WRITE) {
-      if (LOG.isDebugEnabled()) {
-          LOG.debug(Nfs3Utils.WRITE_RPC_START + xid);
-      }
+      LOG.debug("{}{}", Nfs3Utils.WRITE_RPC_START, xid);
       response = write(xdr, info);
       // Write end debug trace is in Nfs3Utils.writeChannel
     } else if (nfsproc3 == NFSPROC3.CREATE) {
@@ -2323,10 +2285,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
           out);
     }
     if (response == null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("No sync response, expect an async response for request XID="
-            + rpcCall.getXid());
-      }
+      LOG.debug("No sync response, expect an async response for request XID={}",
+          rpcCall.getXid());
       return;
     }
     // TODO: currently we just return VerifierNone

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 6163d93..30f75ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -477,7 +477,7 @@ public class DataNode extends ReconfigurableBase
               HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT)) {
       String reason = DomainSocket.getLoadingFailureReason();
       if (reason != null) {
-        LOG.warn("File descriptor passing is disabled because " + reason);
+        LOG.warn("File descriptor passing is disabled because {}", reason);
         this.fileDescriptorPassingDisabledReason = reason;
       } else {
         LOG.info("File descriptor passing is enabled.");
@@ -493,7 +493,7 @@ public class DataNode extends ReconfigurableBase
 
     try {
       hostName = getHostName(conf);
-      LOG.info("Configured hostname is " + hostName);
+      LOG.info("Configured hostname is {}", hostName);
       startDataNode(dataDirs, resources);
     } catch (IOException ie) {
       shutdown();
@@ -533,7 +533,7 @@ public class DataNode extends ReconfigurableBase
       case DFS_DATANODE_DATA_DIR_KEY: {
         IOException rootException = null;
         try {
-          LOG.info("Reconfiguring " + property + " to " + newVal);
+          LOG.info("Reconfiguring {} to {}", property, newVal);
           this.refreshVolumes(newVal);
           return getConf().get(DFS_DATANODE_DATA_DIR_KEY);
         } catch (IOException e) {
@@ -545,7 +545,7 @@ public class DataNode extends ReconfigurableBase
                 new BlockReportOptions.Factory().setIncremental(false).build());
           } catch (IOException e) {
             LOG.warn("Exception while sending the block report after refreshing"
-                + " volumes " + property + " to " + newVal, e);
+                + " volumes {} to {}", property, newVal, e);
             if (rootException == null) {
               rootException = e;
             }
@@ -561,7 +561,7 @@ public class DataNode extends ReconfigurableBase
       case DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY: {
         ReconfigurationException rootException = null;
         try {
-          LOG.info("Reconfiguring " + property + " to " + newVal);
+          LOG.info("Reconfiguring {} to {}", property, newVal);
           int movers;
           if (newVal == null) {
             // set to default
@@ -696,8 +696,8 @@ public class DataNode extends ReconfigurableBase
       // New conf doesn't have the storage location which available in
       // the current storage locations. Add to the deactivateLocations list.
       if (!found) {
-        LOG.info("Deactivation request received for active volume: "
-            + dir.getRoot().toString());
+        LOG.info("Deactivation request received for active volume: {}",
+            dir.getRoot());
         results.deactivateLocations.add(
             StorageLocation.parse(dir.getRoot().toString()));
       }
@@ -724,8 +724,8 @@ public class DataNode extends ReconfigurableBase
         // New conf doesn't have this failed storage location.
         // Add to the deactivate locations list.
         if (!found) {
-          LOG.info("Deactivation request received for failed volume: "
-              + failedStorageLocation);
+          LOG.info("Deactivation request received for failed volume: {}",
+              failedStorageLocation);
           results.deactivateLocations.add(StorageLocation.parse(
               failedStorageLocation));
         }
@@ -760,7 +760,7 @@ public class DataNode extends ReconfigurableBase
         throw new IOException("Attempt to remove all volumes.");
       }
       if (!changedVolumes.newLocations.isEmpty()) {
-        LOG.info("Adding new volumes: " +
+        LOG.info("Adding new volumes: {}",
             Joiner.on(",").join(changedVolumes.newLocations));
 
         // Add volumes for each Namespace
@@ -794,16 +794,16 @@ public class DataNode extends ReconfigurableBase
               errorMessageBuilder.append(
                   String.format("FAILED TO ADD: %s: %s%n",
                   volume, ioe.getMessage()));
-              LOG.error("Failed to add volume: " + volume, ioe);
+              LOG.error("Failed to add volume: {}", volume, ioe);
             } else {
               effectiveVolumes.add(volume.toString());
-              LOG.info("Successfully added volume: " + volume);
+              LOG.info("Successfully added volume: {}", volume);
             }
           } catch (Exception e) {
             errorMessageBuilder.append(
                 String.format("FAILED to ADD: %s: %s%n", volume,
                               e.toString()));
-            LOG.error("Failed to add volume: " + volume, e);
+            LOG.error("Failed to add volume: {}", volume, e);
           }
         }
       }
@@ -812,7 +812,7 @@ public class DataNode extends ReconfigurableBase
         removeVolumes(changedVolumes.deactivateLocations);
       } catch (IOException e) {
         errorMessageBuilder.append(e.getMessage());
-        LOG.error("Failed to remove volume: " + e.getMessage(), e);
+        LOG.error("Failed to remove volume", e);
       }
 
       if (errorMessageBuilder.length() > 0) {
@@ -967,16 +967,17 @@ public class DataNode extends ReconfigurableBase
           ServicePlugin.class);
     } catch (RuntimeException e) {
       String pluginsValue = conf.get(DFS_DATANODE_PLUGINS_KEY);
-      LOG.error("Unable to load DataNode plugins. Specified list of plugins: " +
+      LOG.error("Unable to load DataNode plugins. " +
+              "Specified list of plugins: {}",
           pluginsValue, e);
       throw e;
     }
     for (ServicePlugin p: plugins) {
       try {
         p.start(this);
-        LOG.info("Started plug-in " + p);
+        LOG.info("Started plug-in {}", p);
       } catch (Throwable t) {
-        LOG.warn("ServicePlugin " + p + " could not be started", t);
+        LOG.warn("ServicePlugin {} could not be started", p, t);
       }
     }
   }
@@ -1026,7 +1027,7 @@ public class DataNode extends ReconfigurableBase
         traceAdminService,
         ipcServer);
 
-    LOG.info("Opened IPC server at " + ipcServer.getListenerAddress());
+    LOG.info("Opened IPC server at {}", ipcServer.getListenerAddress());
 
     // set service-level authorization security policy
     if (getConf().getBoolean(
@@ -1085,8 +1086,9 @@ public class DataNode extends ReconfigurableBase
       directoryScanner = new DirectoryScanner(this, data, conf);
       directoryScanner.start();
     } else {
-      LOG.info("Periodic Directory Tree Verification scan is disabled because " +
-                   reason);
+      LOG.info("Periodic Directory Tree Verification scan " +
+              "is disabled because {}",
+          reason);
     }
   }
   
@@ -1139,7 +1141,7 @@ public class DataNode extends ReconfigurableBase
           dnConf.getTransferSocketRecvBufferSize());
     }
     streamingAddr = tcpPeerServer.getStreamingAddr();
-    LOG.info("Opened streaming server at " + streamingAddr);
+    LOG.info("Opened streaming server at {}", streamingAddr);
     this.threadGroup = new ThreadGroup("dataXceiverServer");
     xserver = new DataXceiverServer(tcpPeerServer, getConf(), this);
     this.dataXceiverServer = new Daemon(threadGroup, xserver);
@@ -1157,7 +1159,7 @@ public class DataNode extends ReconfigurableBase
       if (domainPeerServer != null) {
         this.localDataXceiverServer = new Daemon(threadGroup,
             new DataXceiverServer(domainPeerServer, getConf(), this));
-        LOG.info("Listening on UNIX domain socket: " +
+        LOG.info("Listening on UNIX domain socket: {}",
             domainPeerServer.getBindPath());
       }
     }
@@ -1175,7 +1177,7 @@ public class DataNode extends ReconfigurableBase
          (!conf.getBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
           HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT))) {
         LOG.warn("Although short-circuit local reads are configured, " +
-            "they are disabled because you didn't configure " +
+            "they are disabled because you didn't configure {}",
             DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY);
       }
       return null;
@@ -1205,8 +1207,8 @@ public class DataNode extends ReconfigurableBase
       bpos.notifyNamenodeReceivedBlock(block, delHint, storageUuid,
           isOnTransientStorage);
     } else {
-      LOG.error("Cannot find BPOfferService for reporting block received for bpid="
-          + block.getBlockPoolId());
+      LOG.error("Cannot find BPOfferService for reporting block received " +
+              "for bpid={}", block.getBlockPoolId());
     }
   }
   
@@ -1217,8 +1219,8 @@ public class DataNode extends ReconfigurableBase
     if(bpos != null) {
       bpos.notifyNamenodeReceivingBlock(block, storageUuid);
     } else {
-      LOG.error("Cannot find BPOfferService for reporting block receiving for bpid="
-          + block.getBlockPoolId());
+      LOG.error("Cannot find BPOfferService for reporting block receiving " +
+          "for bpid={}", block.getBlockPoolId());
     }
   }
   
@@ -1239,7 +1241,7 @@ public class DataNode extends ReconfigurableBase
   public void reportBadBlocks(ExtendedBlock block) throws IOException{
     FsVolumeSpi volume = getFSDataset().getVolume(block);
     if (volume == null) {
-      LOG.warn("Cannot find FsVolumeSpi to report bad block: " + block);
+      LOG.warn("Cannot find FsVolumeSpi to report bad block: {}", block);
       return;
     }
     reportBadBlocks(block, volume);
@@ -1382,7 +1384,7 @@ public class DataNode extends ReconfigurableBase
         }
       }
     }
-    LOG.info("Starting DataNode with maxLockedMemory = " +
+    LOG.info("Starting DataNode with maxLockedMemory = {}",
         dnConf.maxLockedMemory);
 
     int volFailuresTolerated = dnConf.getVolFailuresTolerated();
@@ -1409,8 +1411,8 @@ public class DataNode extends ReconfigurableBase
 
     // Login is done by now. Set the DN user name.
     dnUserName = UserGroupInformation.getCurrentUser().getUserName();
-    LOG.info("dnUserName = " + dnUserName);
-    LOG.info("supergroup = " + supergroup);
+    LOG.info("dnUserName = {}", dnUserName);
+    LOG.info("supergroup = {}", supergroup);
     initIpcServer();
 
     metrics = DataNodeMetrics.create(getConf(), getDisplayName());
@@ -1514,8 +1516,8 @@ public class DataNode extends ReconfigurableBase
     if (storage.getDatanodeUuid() == null) {
       storage.setDatanodeUuid(generateUuid());
       storage.writeAll();
-      LOG.info("Generated and persisted new Datanode UUID " +
-               storage.getDatanodeUuid());
+      LOG.info("Generated and persisted new Datanode UUID {}",
+          storage.getDatanodeUuid());
     }
   }
 
@@ -1583,11 +1585,11 @@ public class DataNode extends ReconfigurableBase
     if (!blockPoolTokenSecretManager.isBlockPoolRegistered(blockPoolId)) {
       long blockKeyUpdateInterval = keys.getKeyUpdateInterval();
       long blockTokenLifetime = keys.getTokenLifetime();
-      LOG.info("Block token params received from NN: for block pool " +
-          blockPoolId + " keyUpdateInterval="
-          + blockKeyUpdateInterval / (60 * 1000)
-          + " min(s), tokenLifetime=" + blockTokenLifetime / (60 * 1000)
-          + " min(s)");
+      LOG.info("Block token params received from NN: " +
+          "for block pool {} keyUpdateInterval={} min(s), " +
+          "tokenLifetime={} min(s)",
+          blockPoolId, blockKeyUpdateInterval / (60 * 1000),
+          blockTokenLifetime / (60 * 1000));
       final boolean enableProtobuf = getConf().getBoolean(
           DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE,
           DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE_DEFAULT);
@@ -1690,9 +1692,10 @@ public class DataNode extends ReconfigurableBase
         storage.recoverTransitionRead(this, nsInfo, dataDirs, startOpt);
       }
       final StorageInfo bpStorage = storage.getBPStorage(bpid);
-      LOG.info("Setting up storage: nsid=" + bpStorage.getNamespaceID()
-          + ";bpid=" + bpid + ";lv=" + storage.getLayoutVersion()
-          + ";nsInfo=" + nsInfo + ";dnuuid=" + storage.getDatanodeUuid());
+      LOG.info("Setting up storage: nsid={};bpid={};lv={};" +
+              "nsInfo={};dnuuid={}",
+          bpStorage.getNamespaceID(), bpid, storage.getLayoutVersion(),
+          nsInfo, storage.getDatanodeUuid());
     }
 
     // If this is a newly formatted DataNode then assign a new DatanodeUuid.
@@ -1802,9 +1805,8 @@ public class DataNode extends ReconfigurableBase
       final boolean connectToDnViaHostname) throws IOException {
     final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
     final InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
-    }
+    LOG.debug("Connecting to datanode {} addr={}",
+        dnAddr, addr);
     final UserGroupInformation loginUgi = UserGroupInformation.getLoginUser();
     try {
       return loginUgi
@@ -1868,20 +1870,15 @@ public class DataNode extends ReconfigurableBase
     checkBlockToken(block, token, BlockTokenIdentifier.AccessMode.READ);
     Preconditions.checkNotNull(data, "Storage not yet initialized");
     BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
-    if (LOG.isDebugEnabled()) {
-      if (info != null) {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("getBlockLocalPathInfo successful block=" + block
-              + " blockfile " + info.getBlockPath() + " metafile "
-              + info.getMetaPath());
-        }
-      } else {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("getBlockLocalPathInfo for block=" + block
-              + " returning null");
-        }
-      }
+    if (info != null) {
+      LOG.trace("getBlockLocalPathInfo successful " +
+          "block={} blockfile {} metafile {}",
+          block, info.getBlockPath(), info.getMetaPath());
+    } else {
+      LOG.trace("getBlockLocalPathInfo for block={} " +
+          "returning null", block);
     }
+
     metrics.incrBlocksGetLocalPathInfo();
     return info;
   }
@@ -1939,9 +1936,7 @@ public class DataNode extends ReconfigurableBase
       ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
       DataInputStream in = new DataInputStream(buf);
       id.readFields(in);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Got: " + id.toString());
-      }
+      LOG.debug("Got: {}", id);
       blockPoolTokenSecretManager.checkAccess(id, null, block, accessMode,
           null, null);
     }
@@ -1959,9 +1954,9 @@ public class DataNode extends ReconfigurableBase
       for (ServicePlugin p : plugins) {
         try {
           p.stop();
-          LOG.info("Stopped plug-in " + p);
+          LOG.info("Stopped plug-in {}", p);
         } catch (Throwable t) {
-          LOG.warn("ServicePlugin " + p + " could not be stopped", t);
+          LOG.warn("ServicePlugin {} could not be stopped", p, t);
         }
       }
     }
@@ -1984,7 +1979,7 @@ public class DataNode extends ReconfigurableBase
         this.dataXceiverServer.interrupt();
       } catch (Exception e) {
         // Ignore, since the out of band messaging is advisory.
-        LOG.trace("Exception interrupting DataXceiverServer: ", e);
+        LOG.trace("Exception interrupting DataXceiverServer", e);
       }
     }
 
@@ -2038,7 +2033,7 @@ public class DataNode extends ReconfigurableBase
           this.threadGroup.interrupt();
           break;
         }
-        LOG.info("Waiting for threadgroup to exit, active threads is " +
+        LOG.info("Waiting for threadgroup to exit, active threads is {}",
                  this.threadGroup.activeCount());
         if (this.threadGroup.activeCount() == 0) {
           break;
@@ -2085,7 +2080,7 @@ public class DataNode extends ReconfigurableBase
       try {
         this.blockPoolManager.shutDownAll(bposArray);
       } catch (InterruptedException ie) {
-        LOG.warn("Received exception in BlockPoolManager#shutDownAll: ", ie);
+        LOG.warn("Received exception in BlockPoolManager#shutDownAll", ie);
       }
     }
     
@@ -2093,7 +2088,7 @@ public class DataNode extends ReconfigurableBase
       try {
         this.storage.unlockAll();
       } catch (IOException ie) {
-        LOG.warn("Exception when unlocking storage: " + ie, ie);
+        LOG.warn("Exception when unlocking storage", ie);
       }
     }
     if (data != null) {
@@ -2140,8 +2135,8 @@ public class DataNode extends ReconfigurableBase
 
   private void handleDiskError(String failedVolumes) {
     final boolean hasEnoughResources = data.hasEnoughResource();
-    LOG.warn("DataNode.handleDiskError on : [" + failedVolumes +
-        "] Keep Running: " + hasEnoughResources);
+    LOG.warn("DataNode.handleDiskError on: " +
+        "[{}] Keep Running: {}", failedVolumes, hasEnoughResources);
     
     // If we have enough active valid volumes then we do not want to 
     // shutdown the DN completely.
@@ -2438,15 +2433,13 @@ public class DataNode extends ReconfigurableBase
         String[] targetStorageIds, ExtendedBlock b,
         BlockConstructionStage stage, final String clientname) {
       if (DataTransferProtocol.LOG.isDebugEnabled()) {
-        DataTransferProtocol.LOG.debug(getClass().getSimpleName() + ": "
-            + b + " (numBytes=" + b.getNumBytes() + ")"
-            + ", stage=" + stage
-            + ", clientname=" + clientname
-            + ", targets=" + Arrays.asList(targets)
-            + ", target storage types=" + (targetStorageTypes == null ? "[]" :
-            Arrays.asList(targetStorageTypes))
-            + ", target storage IDs=" + (targetStorageIds == null ? "[]" :
-            Arrays.asList(targetStorageIds)));
+        DataTransferProtocol.LOG.debug("{}: {} (numBytes={}), stage={}, " +
+                "clientname={}, targets={}, target storage types={}, " +
+                "target storage IDs={}", getClass().getSimpleName(), b,
+            b.getNumBytes(), stage, clientname, Arrays.asList(targets),
+            targetStorageTypes == null ? "[]" :
+                Arrays.asList(targetStorageTypes),
+            targetStorageIds == null ? "[]" : Arrays.asList(targetStorageIds));
       }
       this.targets = targets;
       this.targetStorageTypes = targetStorageTypes;
@@ -2475,9 +2468,7 @@ public class DataNode extends ReconfigurableBase
       try {
         final String dnAddr = targets[0].getXferAddr(connectToDnViaHostname);
         InetSocketAddress curTarget = NetUtils.createSocketAddr(dnAddr);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Connecting to datanode " + dnAddr);
-        }
+        LOG.debug("Connecting to datanode {}", dnAddr);
         sock = newSocket();
         NetUtils.connect(sock, curTarget, dnConf.socketTimeout);
         sock.setTcpNoDelay(dnConf.getDataTransferServerTcpNoDelay());
@@ -2521,17 +2512,15 @@ public class DataNode extends ReconfigurableBase
         blockSender.sendBlock(out, unbufOut, null);
 
         // no response necessary
-        LOG.info(getClass().getSimpleName() + ", at "
-            + DataNode.this.getDisplayName() + ": Transmitted " + b
-            + " (numBytes=" + b.getNumBytes() + ") to " + curTarget);
+        LOG.info("{}, at {}: Transmitted {} (numBytes={}) to {}",
+            getClass().getSimpleName(), DataNode.this.getDisplayName(),
+            b, b.getNumBytes(), curTarget);
 
         // read ack
         if (isClient) {
           DNTransferAckProto closeAck = DNTransferAckProto.parseFrom(
               PBHelperClient.vintPrefixed(in));
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(getClass().getSimpleName() + ": close-ack=" + closeAck);
-          }
+          LOG.debug("{}: close-ack={}", getClass().getSimpleName(), closeAck);
           if (closeAck.getStatus() != Status.SUCCESS) {
             if (closeAck.getStatus() == Status.ERROR_ACCESS_TOKEN) {
               throw new InvalidBlockTokenException(
@@ -2550,17 +2539,11 @@ public class DataNode extends ReconfigurableBase
           // Add the block to the front of the scanning queue if metadata file
           // is corrupt. We already add the block to front of scanner if the
           // peer disconnects.
-          LOG.info("Adding block: " + b + " for scanning");
+          LOG.info("Adding block: {} for scanning", b);
           blockScanner.markSuspectBlock(data.getVolume(b).getStorageID(), b);
         }
-        LOG.warn(bpReg + ":Failed to transfer " + b + " to " +
-            targets[0] + " got ", ie);
-        // disk check moved to FileIoProvider
-        IOException cause = DatanodeUtil.getCauseIfDiskError(ie);
-        if (cause != null) { // possible disk error
-          LOG.warn("IOException in DataTransfer#run() "+ ie.getMessage() +". "
-                  + "Cause is ", cause);
-        }
+        LOG.warn("{}:Failed to transfer {} to {} got",
+            bpReg, b, targets[0], ie);
       } finally {
         decrementXmitsInProgress();
         IOUtils.closeStream(blockSender);
@@ -2691,14 +2674,9 @@ public class DataNode extends ReconfigurableBase
       final StorageLocation location;
       try {
         location = StorageLocation.parse(locationString);
-      } catch (IOException ioe) {
-        LOG.error("Failed to initialize storage directory " + locationString
-            + ". Exception details: " + ioe);
-        // Ignore the exception.
-        continue;
-      } catch (SecurityException se) {
-        LOG.error("Failed to initialize storage directory " + locationString
-                     + ". Exception details: " + se);
+      } catch (IOException | SecurityException ioe) {
+        LOG.error("Failed to initialize storage directory {}." +
+            "Exception details: {}", locationString, ioe.toString());
         // Ignore the exception.
         continue;
       }
@@ -2745,7 +2723,7 @@ public class DataNode extends ReconfigurableBase
           wait(2000);
         }
       } catch (InterruptedException ex) {
-        LOG.warn("Received exception in Datanode#join: " + ex);
+        LOG.warn("Received exception in Datanode#join: {}", ex.toString());
       }
     }
   }
@@ -2950,9 +2928,7 @@ public class DataNode extends ReconfigurableBase
       }
       for (TokenIdentifier tokenId : tokenIds) {
         BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Got: " + id.toString());
-        }
+        LOG.debug("Got: {}", id);
         blockPoolTokenSecretManager.checkAccess(id, null, block,
             BlockTokenIdentifier.AccessMode.READ, null, null);
       }
@@ -3165,11 +3141,11 @@ public class DataNode extends ReconfigurableBase
   public void deleteBlockPool(String blockPoolId, boolean force)
       throws IOException {
     checkSuperuserPrivilege();
-    LOG.info("deleteBlockPool command received for block pool " + blockPoolId
-        + ", force=" + force);
+    LOG.info("deleteBlockPool command received for block pool {}, " +
+        "force={}", blockPoolId, force);
     if (blockPoolManager.get(blockPoolId) != null) {
-      LOG.warn("The block pool "+blockPoolId+
-          " is still running, cannot be deleted.");
+      LOG.warn("The block pool {} is still running, cannot be deleted.",
+          blockPoolId);
       throw new IOException(
           "The block pool is still running. First do a refreshNamenodes to " +
           "shutdown the block pool service");
@@ -3181,8 +3157,8 @@ public class DataNode extends ReconfigurableBase
   @Override // ClientDatanodeProtocol
   public synchronized void shutdownDatanode(boolean forUpgrade) throws IOException {
     checkSuperuserPrivilege();
-    LOG.info("shutdownDatanode command received (upgrade=" + forUpgrade +
-        "). Shutting down Datanode...");
+    LOG.info("shutdownDatanode command received (upgrade={}). " +
+        "Shutting down Datanode...", forUpgrade);
 
     // Shutdown can be called only once.
     if (shutdownInProgress) {
@@ -3381,12 +3357,9 @@ public class DataNode extends ReconfigurableBase
       // Remove all unhealthy volumes from DataNode.
       removeVolumes(unhealthyLocations, false);
     } catch (IOException e) {
-      LOG.warn("Error occurred when removing unhealthy storage dirs: "
-          + e.getMessage(), e);
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(sb.toString());
+      LOG.warn("Error occurred when removing unhealthy storage dirs", e);
     }
+    LOG.debug("{}", sb);
       // send blockreport regarding volume failure
     handleDiskError(sb.toString());
   }
@@ -3568,7 +3541,7 @@ public class DataNode extends ReconfigurableBase
     case DiskBalancerConstants.DISKBALANCER_BANDWIDTH :
       return Long.toString(this.diskBalancer.getBandwidth());
     default:
-      LOG.error("Disk Balancer - Unknown key in get balancer setting. Key: " +
+      LOG.error("Disk Balancer - Unknown key in get balancer setting. Key: {}",
           key);
       throw new DiskBalancerException("Unknown key",
           DiskBalancerException.Result.UNKNOWN_KEY);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: adding missing file

Posted by ae...@apache.org.
adding missing file


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2ffd9ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2ffd9ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2ffd9ce

Branch: refs/heads/HDFS-7240
Commit: a2ffd9ceaf0240dfd811a10b987f259c7ea1d93c
Parents: 4791978
Author: Anu Engineer <ae...@apache.org>
Authored: Thu Feb 15 15:37:57 2018 -0800
Committer: Anu Engineer <ae...@apache.org>
Committed: Thu Feb 15 15:37:57 2018 -0800

----------------------------------------------------------------------
 .../hadoop/ozone/container/common/impl/ContainerManagerImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2ffd9ce/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 65b8726..f701900 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -747,7 +747,7 @@ public class ContainerManagerImpl implements ContainerManager {
 
   }
 
-  @Override
+    @Override
   public void readLockInterruptibly() throws InterruptedException {
     this.lock.readLock().lockInterruptibly();
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: HDFS-13142. Define and Implement a DiifList Interface to store and manage SnapshotDiffs. Contributed by Shashikant Banerjee

Posted by ae...@apache.org.
HDFS-13142. Define and Implement a DiifList Interface to store and manage SnapshotDiffs.  Contributed by Shashikant Banerjee


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ea7d78c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ea7d78c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ea7d78c

Branch: refs/heads/HDFS-7240
Commit: 6ea7d78ccb0d1c4af9bcac02a4cff89bdffff252
Parents: 8f66aff
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Thu Feb 15 19:33:44 2018 +0800
Committer: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Committed: Thu Feb 15 19:33:44 2018 +0800

----------------------------------------------------------------------
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   3 +-
 .../snapshot/AbstractINodeDiffList.java         |  27 ++--
 .../hdfs/server/namenode/snapshot/DiffList.java | 140 +++++++++++++++++++
 .../namenode/snapshot/DiffListByArrayList.java  |  80 +++++++++++
 .../snapshot/DirectoryWithSnapshotFeature.java  |  10 +-
 .../snapshot/FSImageFormatPBSnapshot.java       |   4 +-
 .../server/namenode/snapshot/FileDiffList.java  |  11 +-
 .../snapshot/FileWithSnapshotFeature.java       |   2 +-
 .../snapshot/SnapshotFSImageFormat.java         |   4 +-
 .../namenode/TestFSImageWithSnapshot.java       |   3 +-
 .../snapshot/TestRenameWithSnapshots.java       |  40 +++---
 .../snapshot/TestSetQuotaWithSnapshot.java      |   3 +-
 .../namenode/snapshot/TestSnapshotRename.java   |   3 +-
 13 files changed, 276 insertions(+), 54 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ea7d78c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 90659f3..6693297 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.DiffList;
 import org.apache.hadoop.hdfs.util.LongBitFormat;
 import org.apache.hadoop.util.StringUtils;
 import static org.apache.hadoop.io.erasurecode.ErasureCodeConstants.REPLICATION_POLICY_ID;
@@ -988,7 +989,7 @@ public class INodeFile extends INodeWithAdditionalFields
     } else {
       // Collect all distinct blocks
       Set<BlockInfo> allBlocks = new HashSet<>(Arrays.asList(getBlocks()));
-      List<FileDiff> diffs = sf.getDiffs().asList();
+      DiffList<FileDiff> diffs = sf.getDiffs().asList();
       for(FileDiff diff : diffs) {
         BlockInfo[] diffBlocks = diff.getBlocks();
         if (diffBlocks != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ea7d78c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
index 98d8c53..8f2465a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
@@ -36,14 +35,15 @@ abstract class AbstractINodeDiffList<N extends INode,
                                      A extends INodeAttributes,
                                      D extends AbstractINodeDiff<N, A, D>> 
     implements Iterable<D> {
+
   /** Diff list sorted by snapshot IDs, i.e. in chronological order.
     * Created lazily to avoid wasting memory by empty lists. */
-  private List<D> diffs;
+  private DiffList<D> diffs;
 
   /** @return this list as a unmodifiable {@link List}. */
-  public final List<D> asList() {
+  public final DiffList<D> asList() {
     return diffs != null ?
-        Collections.unmodifiableList(diffs) : Collections.emptyList();
+        DiffList.unmodifiableList(diffs) : DiffList.emptyList();
   }
   
   /** Clear the list. */
@@ -72,7 +72,7 @@ abstract class AbstractINodeDiffList<N extends INode,
     if (diffs == null) {
       return;
     }
-    int snapshotIndex = Collections.binarySearch(diffs, snapshot);
+    int snapshotIndex = diffs.binarySearch(snapshot);
 
     D removed;
     if (snapshotIndex == 0) {
@@ -114,7 +114,7 @@ abstract class AbstractINodeDiffList<N extends INode,
   private D addLast(D diff) {
     createDiffsIfNeeded();
     final D last = getLast();
-    diffs.add(diff);
+    diffs.addLast(diff);
     if (last != null) {
       last.setPosterior(diff);
     }
@@ -125,7 +125,7 @@ abstract class AbstractINodeDiffList<N extends INode,
   final void addFirst(D diff) {
     createDiffsIfNeeded();
     final D first = diffs.isEmpty()? null : diffs.get(0);
-    diffs.add(0, diff);
+    diffs.addFirst(diff);
     diff.setPosterior(first);
   }
 
@@ -140,7 +140,8 @@ abstract class AbstractINodeDiffList<N extends INode,
 
   private void createDiffsIfNeeded() {
     if (diffs == null) {
-      diffs = new ArrayList<>(INodeDirectory.DEFAULT_FILES_PER_DIRECTORY);
+      diffs =
+          new DiffListByArrayList<>(INodeDirectory.DEFAULT_FILES_PER_DIRECTORY);
     }
   }
 
@@ -169,7 +170,7 @@ abstract class AbstractINodeDiffList<N extends INode,
       }
       return last;
     }
-    final int i = Collections.binarySearch(diffs, anchorId);
+    final int i = diffs.binarySearch(anchorId);
     if (exclusive) { // must be the one before
       if (i == -1 || i == 0) {
         return Snapshot.NO_SNAPSHOT_ID;
@@ -208,7 +209,7 @@ abstract class AbstractINodeDiffList<N extends INode,
     if (snapshotId == Snapshot.CURRENT_STATE_ID || diffs == null) {
       return null;
     }
-    final int i = Collections.binarySearch(diffs, snapshotId);
+    final int i = diffs.binarySearch(snapshotId);
     if (i >= 0) {
       // exact match
       return diffs.get(i);
@@ -242,9 +243,9 @@ abstract class AbstractINodeDiffList<N extends INode,
     }
 
     final int size = diffs.size();
-    int earlierDiffIndex = Collections.binarySearch(diffs, earlier.getId());
-    int laterDiffIndex = later == null ? size : Collections
-        .binarySearch(diffs, later.getId());
+    int earlierDiffIndex = diffs.binarySearch(earlier.getId());
+    int laterDiffIndex = later == null ? size
+        : diffs.binarySearch(later.getId());
     if (-earlierDiffIndex - 1 == size) {
       // if the earlierSnapshot is after the latest SnapshotDiff stored in
       // diffs, no modification happened after the earlierSnapshot

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ea7d78c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffList.java
new file mode 100644
index 0000000..82fd3f9
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffList.java
@@ -0,0 +1,140 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import java.util.Collections;
+import java.util.Iterator;
+
+/**
+ * This interface defines the methods used to store and manage InodeDiffs.
+ * @param <T> Type of the object in this list.
+ */
+public interface DiffList<T extends Comparable<Integer>> extends Iterable<T> {
+  DiffList EMPTY_LIST = new DiffListByArrayList(Collections.emptyList());
+
+  /**
+   * Returns an empty DiffList.
+   */
+  static <T extends Comparable<Integer>> DiffList<T> emptyList() {
+    return EMPTY_LIST;
+  }
+
+  /**
+   * Returns an unmodifiable diffList.
+   * @param diffs DiffList
+   * @param <T> Type of the object in the the diffList
+   * @return Unmodifiable diffList
+   */
+  static <T extends Comparable<Integer>> DiffList<T> unmodifiableList(
+      DiffList<T> diffs) {
+    return new DiffList<T>() {
+      @Override
+      public T get(int i) {
+        return diffs.get(i);
+      }
+
+      @Override
+      public boolean isEmpty() {
+        return diffs.isEmpty();
+      }
+
+      @Override
+      public int size() {
+        return diffs.size();
+      }
+
+      @Override
+      public T remove(int i) {
+        throw new UnsupportedOperationException("This list is unmodifiable.");
+      }
+
+      @Override
+      public boolean addLast(T t) {
+        throw new UnsupportedOperationException("This list is unmodifiable.");
+      }
+
+      @Override
+      public void addFirst(T t) {
+        throw new UnsupportedOperationException("This list is unmodifiable.");
+      }
+
+      @Override
+      public int binarySearch(int i) {
+        return diffs.binarySearch(i);
+      }
+
+      @Override
+      public Iterator<T> iterator() {
+        return diffs.iterator();
+      }
+    };
+  }
+
+  /**
+   * Returns the element at the specified position in this list.
+   *
+   * @param index index of the element to return
+   * @return the element at the specified position in this list
+   * @throws IndexOutOfBoundsException if the index is out of range
+   *         (<tt>index &lt; 0 || index &gt;= size()</tt>)
+   */
+  T get(int index);
+
+  /**
+   * Returns true if this list contains no elements.
+   *
+   * @return true if this list contains no elements
+   */
+  boolean isEmpty();
+
+  /**
+   * Returns the number of elements in this list.
+   * @return the number of elements in this list.
+   */
+  int size();
+
+  /**
+   * Removes the element at the specified position in this list.
+   * @param index the index of the element to be removed
+   * @return the element previously at the specified position
+   */
+  T remove(int index);
+
+  /**
+   * Adds an element at the end of the list.
+   * @param t element to be appended to this list
+   * @return true, if insertion is successful
+   */
+  boolean addLast(T t);
+
+  /**
+   * Adds an element at the beginning of the list.
+   * @param t element to be added to this list
+   */
+  void addFirst(T t);
+
+  /**
+   * Searches the list for the specified object using the binary
+   * search algorithm.
+   * @param key key to be searched for
+   * @return the index of the search key, if it is contained in the list
+   *         otherwise, (-insertion point - 1).
+   */
+  int binarySearch(int key);
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ea7d78c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListByArrayList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListByArrayList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListByArrayList.java
new file mode 100644
index 0000000..03aa5c2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListByArrayList.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Resizable-array implementation of the DiffList interface.
+ * @param <T> Type of the object in the list
+ */
+public class DiffListByArrayList<T extends Comparable<Integer>>
+    implements DiffList<T> {
+  private final List<T> list;
+
+  DiffListByArrayList(List<T> list) {
+    this.list = list;
+  }
+
+  public DiffListByArrayList(int initialCapacity) {
+    this(new ArrayList<>(initialCapacity));
+  }
+
+  @Override
+  public T get(int i) {
+    return list.get(i);
+  }
+
+  @Override
+  public boolean isEmpty() {
+    return list.isEmpty();
+  }
+
+  @Override
+  public int size() {
+    return list.size();
+  }
+
+  @Override
+  public T remove(int i) {
+    return list.remove(i);
+  }
+
+  @Override
+  public boolean addLast(T t) {
+    return list.add(t);
+  }
+
+  @Override
+  public void addFirst(T t) {
+    list.add(0, t);
+  }
+
+  @Override
+  public int binarySearch(int i) {
+    return Collections.binarySearch(list, i);
+  }
+
+  @Override
+  public Iterator<T> iterator() {
+    return list.iterator();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ea7d78c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 7535879..8ed9c7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -225,7 +225,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
         private List<INode> initChildren() {
           if (children == null) {
             final ChildrenDiff combined = new ChildrenDiff();
-            for (DirectoryDiff d = DirectoryDiff.this; d != null; 
+            for (DirectoryDiff d = DirectoryDiff.this; d != null;
                 d = d.getPosterior()) {
               combined.combinePosterior(d.diff, null);
             }
@@ -334,7 +334,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
     /** Replace the given child in the created/deleted list, if there is any. */
     public boolean replaceChild(final ListType type, final INode oldChild,
         final INode newChild) {
-      final List<DirectoryDiff> diffList = asList();
+      final DiffList<DirectoryDiff> diffList = asList();
       for(int i = diffList.size() - 1; i >= 0; i--) {
         final ChildrenDiff diff = diffList.get(i).diff;
         if (diff.replace(type, oldChild, newChild)) {
@@ -346,7 +346,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
 
     /** Remove the given child in the created/deleted list, if there is any. */
     public boolean removeChild(final ListType type, final INode child) {
-      final List<DirectoryDiff> diffList = asList();
+      final DiffList<DirectoryDiff> diffList = asList();
       for(int i = diffList.size() - 1; i >= 0; i--) {
         final ChildrenDiff diff = diffList.get(i).diff;
         if (diff.removeChild(type, child)) {
@@ -363,7 +363,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
      * given inode is not in any of the snapshot.
      */
     public int findSnapshotDeleted(final INode child) {
-      final List<DirectoryDiff> diffList = asList();
+      final DiffList<DirectoryDiff> diffList = asList();
       for(int i = diffList.size() - 1; i >= 0; i--) {
         final ChildrenDiff diff = diffList.get(i).diff;
         final int d = diff.searchIndex(ListType.DELETED,
@@ -669,7 +669,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
 
     boolean dirMetadataChanged = false;
     INodeDirectoryAttributes dirCopy = null;
-    List<DirectoryDiff> difflist = diffs.asList();
+    DiffList<DirectoryDiff> difflist = diffs.asList();
     for (int i = earlierDiffIndex; i < laterDiffIndex; i++) {
       DirectoryDiff sdiff = difflist.get(i);
       diff.combinePosterior(sdiff.diff, null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ea7d78c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
index f31743a..4b619a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
@@ -523,7 +523,7 @@ public class FSImageFormatPBSnapshot {
         throws IOException {
       FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature();
       if (sf != null) {
-        List<FileDiff> diffList = sf.getDiffs().asList();
+        DiffList<FileDiff> diffList = sf.getDiffs().asList();
         SnapshotDiffSection.DiffEntry entry = SnapshotDiffSection.DiffEntry
             .newBuilder().setInodeId(file.getId()).setType(Type.FILEDIFF)
             .setNumOfDiff(diffList.size()).build();
@@ -563,7 +563,7 @@ public class FSImageFormatPBSnapshot {
         throws IOException {
       DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
       if (sf != null) {
-        List<DirectoryDiff> diffList = sf.getDiffs().asList();
+        DiffList<DirectoryDiff> diffList = sf.getDiffs().asList();
         SnapshotDiffSection.DiffEntry entry = SnapshotDiffSection.DiffEntry
             .newBuilder().setInodeId(dir.getId()).setType(Type.DIRECTORYDIFF)
             .setNumOfDiff(diffList.size()).build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ea7d78c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
index 9dcd4d8..2c04a49 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
@@ -17,9 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import java.util.Collections;
-import java.util.List;
-
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
@@ -66,8 +63,8 @@ public class FileDiffList extends
     if (snapshotId == Snapshot.CURRENT_STATE_ID) {
       return null;
     }
-    List<FileDiff> diffs = this.asList();
-    int i = Collections.binarySearch(diffs, snapshotId);
+    DiffList<FileDiff> diffs = this.asList();
+    int i = diffs.binarySearch(snapshotId);
     BlockInfo[] blocks = null;
     for(i = i >= 0 ? i : -i-2; i >= 0; i--) {
       blocks = diffs.get(i).getBlocks();
@@ -83,8 +80,8 @@ public class FileDiffList extends
     if (snapshotId == Snapshot.CURRENT_STATE_ID) {
       return null;
     }
-    List<FileDiff> diffs = this.asList();
-    int i = Collections.binarySearch(diffs, snapshotId);
+    DiffList<FileDiff> diffs = this.asList();
+    int i = diffs.binarySearch(snapshotId);
     BlockInfo[] blocks = null;
     for (i = i >= 0 ? i+1 : -i-1; i < diffs.size(); i++) {
       blocks = diffs.get(i).getBlocks();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ea7d78c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
index b52e8d6..80061c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
@@ -86,7 +86,7 @@ public class FileWithSnapshotFeature implements INode.Feature {
     int earlierDiffIndex = diffIndexPair[0];
     int laterDiffIndex = diffIndexPair[1];
 
-    final List<FileDiff> diffList = diffs.asList();
+    final DiffList<FileDiff> diffList = diffs.asList();
     final long earlierLength = diffList.get(earlierDiffIndex).getFileSize();
     final long laterLength = laterDiffIndex == diffList.size() ? file
         .computeFileSize(true, false) : diffList.get(laterDiffIndex)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ea7d78c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java
index fcab53a..d1ae293 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java
@@ -82,7 +82,7 @@ public class SnapshotFSImageFormat {
     if (diffs == null) {
       out.writeInt(-1); // no diffs
     } else {
-      final List<D> list = diffs.asList();
+      final DiffList<D> list = diffs.asList();
       final int size = list.size();
       out.writeInt(size);
       for (int i = size - 1; i >= 0; i--) {
@@ -306,7 +306,7 @@ public class SnapshotFSImageFormat {
     List<INode> deletedList = loadDeletedList(parent, createdList, in, loader);
     
     // 6. Compose the SnapshotDiff
-    List<DirectoryDiff> diffs = parent.getDiffs().asList();
+    DiffList<DirectoryDiff> diffs = parent.getDiffs().asList();
     DirectoryDiff sdiff = new DirectoryDiff(snapshot.getId(), snapshotINode,
         diffs.isEmpty() ? null : diffs.get(0), childrenSize, createdList,
         deletedList, snapshotINode == snapshot.getRoot());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ea7d78c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
index 82f5cfb..58ecc8a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.DiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
@@ -199,7 +200,7 @@ public class TestFSImageWithSnapshot {
     assertTrue("The children list of root should be empty", 
         rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
     // one snapshot on root: s1
-    List<DirectoryDiff> diffList = rootNode.getDiffs().asList();
+    DiffList<DirectoryDiff> diffList = rootNode.getDiffs().asList();
     assertEquals(1, diffList.size());
     Snapshot s1 = rootNode.getSnapshot(DFSUtil.string2Bytes("s1"));
     assertEquals(s1.getId(), diffList.get(0).getSnapshotId());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ea7d78c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index 91eec78..770651e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -983,7 +983,7 @@ public class TestRenameWithSnapshots {
     // 5 references: s1, s22, s333, s2222, current tree of sdir1
     assertEquals(5, fooWithCount.getReferenceCount());
     INodeDirectory foo = fooWithCount.asDirectory();
-    List<DirectoryDiff> fooDiffs = foo.getDiffs().asList();
+    DiffList<DirectoryDiff> fooDiffs = foo.getDiffs().asList();
     assertEquals(4, fooDiffs.size());
     
     Snapshot s2222 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2222"));
@@ -996,7 +996,7 @@ public class TestRenameWithSnapshots {
     assertEquals(s22.getId(), fooDiffs.get(1).getSnapshotId());
     assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
     INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile();
-    List<FileDiff> bar1Diffs = bar1.getDiffs().asList();
+    DiffList<FileDiff> bar1Diffs = bar1.getDiffs().asList();
     assertEquals(3, bar1Diffs.size());
     assertEquals(s333.getId(), bar1Diffs.get(2).getSnapshotId());
     assertEquals(s22.getId(), bar1Diffs.get(1).getSnapshotId());
@@ -1008,7 +1008,7 @@ public class TestRenameWithSnapshots {
     // 5 references: s1, s22, s333, s2222, current tree of sdir1
     assertEquals(5, barWithCount.getReferenceCount());
     INodeFile bar = barWithCount.asFile();
-    List<FileDiff> barDiffs = bar.getDiffs().asList();
+    DiffList<FileDiff> barDiffs = bar.getDiffs().asList();
     assertEquals(4, barDiffs.size());
     assertEquals(s2222.getId(), barDiffs.get(3).getSnapshotId());
     assertEquals(s333.getId(), barDiffs.get(2).getSnapshotId());
@@ -1188,7 +1188,7 @@ public class TestRenameWithSnapshots {
     INodeReference.WithCount fooWC = (WithCount) fooRef.getReferredINode();
     assertEquals(1, fooWC.getReferenceCount());
     INodeDirectory fooDir = fooWC.getReferredINode().asDirectory();
-    List<DirectoryDiff> diffs = fooDir.getDiffs().asList();
+    DiffList<DirectoryDiff> diffs = fooDir.getDiffs().asList();
     assertEquals(1, diffs.size());
     assertEquals(s2.getId(), diffs.get(0).getSnapshotId());
     
@@ -1294,7 +1294,7 @@ public class TestRenameWithSnapshots {
         .getChildrenList(Snapshot.CURRENT_STATE_ID);
     assertEquals(1, dir1Children.size());
     assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
-    List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
+    DiffList<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
     assertEquals(1, dir1Diffs.size());
     assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId());
     
@@ -1306,7 +1306,8 @@ public class TestRenameWithSnapshots {
     
     INode fooNode = fsdir.getINode4Write(foo.toString());
     assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot());
-    List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
+    DiffList<DirectoryDiff> fooDiffs =
+        fooNode.asDirectory().getDiffs().asList();
     assertEquals(1, fooDiffs.size());
     assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
     
@@ -1364,7 +1365,7 @@ public class TestRenameWithSnapshots {
         .getChildrenList(Snapshot.CURRENT_STATE_ID);
     assertEquals(1, dir1Children.size());
     assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
-    List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
+    DiffList<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
     assertEquals(1, dir1Diffs.size());
     assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId());
     
@@ -1433,7 +1434,7 @@ public class TestRenameWithSnapshots {
     ReadOnlyList<INode> dir2Children = dir2Node
         .getChildrenList(Snapshot.CURRENT_STATE_ID);
     assertEquals(1, dir2Children.size());
-    List<DirectoryDiff> dir2Diffs = dir2Node.getDiffs().asList();
+    DiffList<DirectoryDiff> dir2Diffs = dir2Node.getDiffs().asList();
     assertEquals(1, dir2Diffs.size());
     assertEquals(s2.getId(), dir2Diffs.get(0).getSnapshotId());
     ChildrenDiff childrenDiff = dir2Diffs.get(0).getChildrenDiff();
@@ -1445,7 +1446,8 @@ public class TestRenameWithSnapshots {
     INode fooNode = fsdir.getINode4Write(foo_dir2.toString());
     assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
     assertTrue(fooNode instanceof INodeReference.DstReference);
-    List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
+    DiffList<DirectoryDiff> fooDiffs =
+        fooNode.asDirectory().getDiffs().asList();
     assertEquals(1, fooDiffs.size());
     assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
     
@@ -1594,7 +1596,7 @@ public class TestRenameWithSnapshots {
     INode barNode = fsdir2.getINode4Write(bar.toString());
     assertTrue(barNode.getClass() == INodeFile.class);
     assertSame(fooNode, barNode.getParent());
-    List<DirectoryDiff> diffList = dir1Node
+    DiffList<DirectoryDiff> diffList = dir1Node
         .getDiffs().asList();
     assertEquals(1, diffList.size());
     DirectoryDiff diff = diffList.get(0);
@@ -1668,7 +1670,7 @@ public class TestRenameWithSnapshots {
     INode fooNode = childrenList.get(0);
     assertTrue(fooNode.asDirectory().isWithSnapshot());
     assertSame(dir1Node, fooNode.getParent());
-    List<DirectoryDiff> diffList = dir1Node
+    DiffList<DirectoryDiff> diffList = dir1Node
         .getDiffs().asList();
     assertEquals(1, diffList.size());
     DirectoryDiff diff = diffList.get(0);
@@ -1728,7 +1730,7 @@ public class TestRenameWithSnapshots {
     ReadOnlyList<INode> children = fooNode
         .getChildrenList(Snapshot.CURRENT_STATE_ID);
     assertEquals(1, children.size());
-    List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
+    DiffList<DirectoryDiff> diffList = fooNode.getDiffs().asList();
     assertEquals(1, diffList.size());
     DirectoryDiff diff = diffList.get(0);
     // this diff is generated while renaming
@@ -1742,7 +1744,7 @@ public class TestRenameWithSnapshots {
     INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
     assertSame(barNode, children.get(0));
     assertSame(fooNode, barNode.getParent());
-    List<FileDiff> barDiffList = barNode.getDiffs().asList();
+    DiffList<FileDiff> barDiffList = barNode.getDiffs().asList();
     assertEquals(1, barDiffList.size());
     FileDiff barDiff = barDiffList.get(0);
     assertEquals(s1.getId(), barDiff.getSnapshotId());
@@ -1982,7 +1984,7 @@ public class TestRenameWithSnapshots {
         .getChildrenList(Snapshot.CURRENT_STATE_ID);
     assertEquals(1, children.size());
     assertEquals(bar.getName(), children.get(0).getLocalName());
-    List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
+    DiffList<DirectoryDiff> diffList = fooNode.getDiffs().asList();
     assertEquals(1, diffList.size());
     Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
     assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
@@ -2054,7 +2056,7 @@ public class TestRenameWithSnapshots {
     assertEquals(bar.getName(), children.get(0).getLocalName());
     assertEquals(bar2.getName(), children.get(1).getLocalName());
     assertEquals(bar3.getName(), children.get(2).getLocalName());
-    List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
+    DiffList<DirectoryDiff> diffList = fooNode.getDiffs().asList();
     assertEquals(1, diffList.size());
     Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
     assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
@@ -2231,7 +2233,7 @@ public class TestRenameWithSnapshots {
     // check dir1: foo should be in the created list of s0
     INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString())
         .asDirectory();
-    List<DirectoryDiff> dir1DiffList = dir1Node.getDiffs().asList();
+    DiffList<DirectoryDiff> dir1DiffList = dir1Node.getDiffs().asList();
     assertEquals(1, dir1DiffList.size());
     List<INode> dList = dir1DiffList.get(0).getChildrenDiff()
         .getList(ListType.DELETED);
@@ -2249,7 +2251,7 @@ public class TestRenameWithSnapshots {
         .asDirectory();
     assertSame(fooNode.asDirectory(), barNode.getParent());
     // bar should only have a snapshot diff for s0
-    List<DirectoryDiff> barDiffList = barNode.getDiffs().asList();
+    DiffList<DirectoryDiff> barDiffList = barNode.getDiffs().asList();
     assertEquals(1, barDiffList.size());
     DirectoryDiff diff = barDiffList.get(0);
     INodeDirectory testNode = fsdir.getINode4Write(test.toString())
@@ -2264,7 +2266,7 @@ public class TestRenameWithSnapshots {
     // of the snapshot diff for s2
     INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString())
         .asDirectory();
-    List<DirectoryDiff> dir2DiffList = dir2Node.getDiffs().asList();
+    DiffList<DirectoryDiff> dir2DiffList = dir2Node.getDiffs().asList();
     // dir2Node should contain 1 snapshot diffs for s2
     assertEquals(1, dir2DiffList.size());
     dList = dir2DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
@@ -2318,7 +2320,7 @@ public class TestRenameWithSnapshots {
         "foo/bar");
     INodeDirectory barNode = fsdir.getINode(barInS0.toString()).asDirectory();
     assertEquals(0, barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).size());
-    List<DirectoryDiff> diffList = barNode.getDiffs().asList();
+    DiffList<DirectoryDiff> diffList = barNode.getDiffs().asList();
     assertEquals(1, diffList.size());
     DirectoryDiff diff = diffList.get(0);
     assertEquals(0, diff.getChildrenDiff().getList(ListType.DELETED).size());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ea7d78c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java
index c5ac26e..2fecbb1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java
@@ -149,7 +149,8 @@ public class TestSetQuotaWithSnapshot {
     hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
     INode subNode = fsdir.getINode4Write(subDir.toString());
     assertTrue(subNode.asDirectory().isWithSnapshot());
-    List<DirectoryDiff> diffList = subNode.asDirectory().getDiffs().asList();
+    DiffList<DirectoryDiff> diffList =
+        subNode.asDirectory().getDiffs().asList();
     assertEquals(1, diffList.size());
     Snapshot s2 = dirNode.getSnapshot(DFSUtil.string2Bytes("s2"));
     assertEquals(s2.getId(), diffList.get(0).getSnapshotId());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ea7d78c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java
index 8c8fca7..01157e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java
@@ -24,7 +24,6 @@ import static org.junit.Assert.fail;
 
 import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
-import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -101,7 +100,7 @@ public class TestSnapshotRename {
     for (int i = 0; i < listByName.size(); i++) {
       assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
     }
-    List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
+    DiffList<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
     assertEquals(names.length, listByTime.size());
     for (int i = 0; i < listByTime.size(); i++) {
       Snapshot s = srcRoot.getDirectorySnapshottableFeature().getSnapshotById(


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: YARN-7909. Add charset to YARN Service REST API. (Contributed by Eric Yang)

Posted by ae...@apache.org.
YARN-7909. Add charset to YARN Service REST API. (Contributed by Eric Yang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c97d5bce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c97d5bce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c97d5bce

Branch: refs/heads/HDFS-7240
Commit: c97d5bceb2305e02f9e8b6c2c10a2aba7fdc652b
Parents: 543f3ab
Author: Eric Yang <ey...@apache.org>
Authored: Fri Feb 9 20:01:25 2018 -0500
Committer: Eric Yang <ey...@apache.org>
Committed: Fri Feb 9 20:01:25 2018 -0500

----------------------------------------------------------------------
 .../apache/hadoop/yarn/service/webapp/ApiServer.java   | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c97d5bce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
index 16f8513..e58938e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
@@ -94,7 +94,7 @@ public class ApiServer {
   @GET
   @Path(VERSION)
   @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON })
+  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
   public Response getVersion() {
     String version = VersionInfo.getBuildVersion();
     LOG.info(version);
@@ -104,7 +104,7 @@ public class ApiServer {
   @POST
   @Path(SERVICE_ROOT_PATH)
   @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON })
+  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
   public Response createService(@Context HttpServletRequest request,
       Service service) {
     ServiceStatus serviceStatus = new ServiceStatus();
@@ -167,7 +167,7 @@ public class ApiServer {
   @GET
   @Path(SERVICE_PATH)
   @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON })
+  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
   public Response getService(@Context HttpServletRequest request,
       @PathParam(SERVICE_NAME) String appName) {
     ServiceStatus serviceStatus = new ServiceStatus();
@@ -210,7 +210,7 @@ public class ApiServer {
   @DELETE
   @Path(SERVICE_PATH)
   @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON })
+  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
   public Response deleteService(@Context HttpServletRequest request,
       @PathParam(SERVICE_NAME) String appName) {
     try {
@@ -273,7 +273,8 @@ public class ApiServer {
   @PUT
   @Path(COMPONENT_PATH)
   @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON, MediaType.TEXT_PLAIN  })
+  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8",
+              MediaType.TEXT_PLAIN  })
   public Response updateComponent(@Context HttpServletRequest request,
       @PathParam(SERVICE_NAME) String appName,
       @PathParam(COMPONENT_NAME) String componentName, Component component) {
@@ -322,7 +323,7 @@ public class ApiServer {
   @PUT
   @Path(SERVICE_PATH)
   @Consumes({ MediaType.APPLICATION_JSON })
-  @Produces({ MediaType.APPLICATION_JSON })
+  @Produces({ MediaType.APPLICATION_JSON + ";charset=utf-8" })
   public Response updateService(@Context HttpServletRequest request,
       @PathParam(SERVICE_NAME) String appName,
       Service updateServiceData) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: HDFS-11701. NPE from Unresolved Host causes permanent DFSInputStream failures. Contributed by Lokesh Jain.

Posted by ae...@apache.org.
HDFS-11701. NPE from Unresolved Host causes permanent DFSInputStream failures. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b061215e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b061215e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b061215e

Branch: refs/heads/HDFS-7240
Commit: b061215ecfebe476bf58f70788113d1af816f553
Parents: 456705a
Author: Jitendra Pandey <ji...@apache.org>
Authored: Wed Feb 7 11:21:41 2018 -0800
Committer: Jitendra Pandey <ji...@apache.org>
Committed: Wed Feb 7 11:22:36 2018 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/ClientContext.java   |  3 +-
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  6 ++-
 .../hdfs/client/impl/BlockReaderFactory.java    | 40 +++++++++++---------
 .../hdfs/shortcircuit/DomainSocketFactory.java  |  3 +-
 .../client/impl/TestBlockReaderFactory.java     | 33 ++++++++++++++++
 5 files changed, 64 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b061215e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
index a31945c..ad1b359 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED_DEFAULT;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -238,7 +239,7 @@ public class ClientContext {
     return byteArrayManager;
   }
 
-  public int getNetworkDistance(DatanodeInfo datanodeInfo) {
+  public int getNetworkDistance(DatanodeInfo datanodeInfo) throws IOException {
     // If applications disable the feature or the client machine can't
     // resolve its network location, clientNode will be set to null.
     if (clientNode == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b061215e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 32e5d0f..2edd755 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -550,7 +550,11 @@ public class DFSUtilClient {
   private static final Map<String, Boolean> localAddrMap = Collections
       .synchronizedMap(new HashMap<String, Boolean>());
 
-  public static boolean isLocalAddress(InetSocketAddress targetAddr) {
+  public static boolean isLocalAddress(InetSocketAddress targetAddr)
+      throws IOException {
+    if (targetAddr.isUnresolved()) {
+      throw new IOException("Unresolved host: " + targetAddr);
+    }
     InetAddress addr = targetAddr.getAddress();
     Boolean cached = localAddrMap.get(addr.getHostAddress());
     if (cached != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b061215e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
index 60dde82..e83c8ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
@@ -357,28 +357,32 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
       return reader;
     }
     final ShortCircuitConf scConf = conf.getShortCircuitConf();
-    if (scConf.isShortCircuitLocalReads() && allowShortCircuitLocalReads) {
-      if (clientContext.getUseLegacyBlockReaderLocal()) {
-        reader = getLegacyBlockReaderLocal();
-        if (reader != null) {
-          LOG.trace("{}: returning new legacy block reader local.", this);
-          return reader;
+    try {
+      if (scConf.isShortCircuitLocalReads() && allowShortCircuitLocalReads) {
+        if (clientContext.getUseLegacyBlockReaderLocal()) {
+          reader = getLegacyBlockReaderLocal();
+          if (reader != null) {
+            LOG.trace("{}: returning new legacy block reader local.", this);
+            return reader;
+          }
+        } else {
+          reader = getBlockReaderLocal();
+          if (reader != null) {
+            LOG.trace("{}: returning new block reader local.", this);
+            return reader;
+          }
         }
-      } else {
-        reader = getBlockReaderLocal();
+      }
+      if (scConf.isDomainSocketDataTraffic()) {
+        reader = getRemoteBlockReaderFromDomain();
         if (reader != null) {
-          LOG.trace("{}: returning new block reader local.", this);
+          LOG.trace("{}: returning new remote block reader using UNIX domain "
+              + "socket on {}", this, pathInfo.getPath());
           return reader;
         }
       }
-    }
-    if (scConf.isDomainSocketDataTraffic()) {
-      reader = getRemoteBlockReaderFromDomain();
-      if (reader != null) {
-        LOG.trace("{}: returning new remote block reader using UNIX domain "
-            + "socket on {}", this, pathInfo.getPath());
-        return reader;
-      }
+    } catch (IOException e) {
+      LOG.debug("Block read failed. Getting remote block reader using TCP", e);
     }
     Preconditions.checkState(!DFSInputStream.tcpReadsDisabledForTesting,
         "TCP reads were disabled for testing, but we failed to " +
@@ -469,7 +473,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
     return null;
   }
 
-  private BlockReader getBlockReaderLocal() throws InvalidToken {
+  private BlockReader getBlockReaderLocal() throws IOException {
     LOG.trace("{}: trying to construct a BlockReaderLocal for short-circuit "
         + " reads.", this);
     if (pathInfo == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b061215e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
index 25d80fa..760e920 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
@@ -133,7 +133,8 @@ public class DomainSocketFactory {
    *
    * @return             Information about the socket path.
    */
-  public PathInfo getPathInfo(InetSocketAddress addr, ShortCircuitConf conf) {
+  public PathInfo getPathInfo(InetSocketAddress addr, ShortCircuitConf conf)
+      throws IOException {
     // If there is no domain socket path configured, we can't use domain
     // sockets.
     if (conf.getDomainSocketPath().isEmpty()) return PathInfo.NOT_CONFIGURED;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b061215e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java
index 42a7310..6b04b14 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java
@@ -28,6 +28,7 @@ import static org.hamcrest.CoreMatchers.equalTo;
 
 import java.io.File;
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.nio.channels.ClosedByInterruptException;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -53,6 +54,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo;
 import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor;
+import org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplicaInfo;
 import org.apache.hadoop.io.IOUtils;
@@ -68,6 +70,7 @@ import org.junit.Rule;
 import org.junit.Test;
 
 import com.google.common.util.concurrent.Uninterruptibles;
+import org.junit.rules.ExpectedException;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -79,6 +82,9 @@ public class TestBlockReaderFactory {
   @Rule
   public final Timeout globalTimeout = new Timeout(180000);
 
+  @Rule
+  public ExpectedException thrown = ExpectedException.none();
+
   @Before
   public void init() {
     DomainSocket.disableBindPathValidation();
@@ -145,6 +151,33 @@ public class TestBlockReaderFactory {
   }
 
   /**
+   * Test the case where address passed to DomainSocketFactory#getPathInfo is
+   * unresolved. In such a case an exception should be thrown.
+   */
+  @Test(timeout=60000)
+  public void testGetPathInfoWithUnresolvedHost() throws Exception {
+    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
+
+    Configuration conf =
+        createShortCircuitConf("testGetPathInfoWithUnresolvedHost", sockDir);
+    conf.set(DFS_CLIENT_CONTEXT,
+        "testGetPathInfoWithUnresolvedHost_Context");
+    conf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
+
+    DfsClientConf.ShortCircuitConf shortCircuitConf =
+        new DfsClientConf.ShortCircuitConf(conf);
+    DomainSocketFactory domainSocketFactory =
+        new DomainSocketFactory(shortCircuitConf);
+    InetSocketAddress targetAddr =
+        InetSocketAddress.createUnresolved("random", 32456);
+
+    thrown.expect(IOException.class);
+    thrown.expectMessage("Unresolved host: " + targetAddr);
+    domainSocketFactory.getPathInfo(targetAddr, shortCircuitConf);
+    sockDir.close();
+  }
+
+  /**
    * Test the case where we have multiple threads waiting on the
    * ShortCircuitCache delivering a certain ShortCircuitReplica.
    *


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: HDFS-8693. Addendum patch to execute the command using UGI. Contributed by Brahma Reddy Battula.

Posted by ae...@apache.org.
HDFS-8693. Addendum patch to execute the command using UGI. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35c17351
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35c17351
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35c17351

Branch: refs/heads/HDFS-7240
Commit: 35c17351cab645dcc72e0d2ae1608507aa787ffb
Parents: 3414fd1
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Mon Feb 12 22:14:34 2018 +0530
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Mon Feb 12 22:14:34 2018 +0530

----------------------------------------------------------------------
 .../hdfs/server/datanode/BlockPoolManager.java       | 15 ++++++++++++++-
 1 file changed, 14 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35c17351/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
index f6a11c2..141550e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
@@ -253,7 +253,20 @@ class BlockPoolManager {
           lifelineAddrs.add(nnIdToLifelineAddr != null ?
               nnIdToLifelineAddr.get(nnId) : null);
         }
-        bpos.refreshNNList(addrs, lifelineAddrs);
+        try {
+          UserGroupInformation.getLoginUser()
+              .doAs(new PrivilegedExceptionAction<Object>() {
+                @Override
+                public Object run() throws Exception {
+                  bpos.refreshNNList(addrs, lifelineAddrs);
+                  return null;
+                }
+              });
+        } catch (InterruptedException ex) {
+          IOException ioe = new IOException();
+          ioe.initCause(ex.getCause());
+          throw ioe;
+        }
       }
     }
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: YARN-7697. NM goes down with OOM due to leak in log-aggregation. (Xuan Gong via wangda)

Posted by ae...@apache.org.
YARN-7697. NM goes down with OOM due to leak in log-aggregation. (Xuan Gong via wangda)

Change-Id: Ie4fc7979d834e25f37a033c314f3efceeb8f4a9e


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4c98579
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4c98579
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4c98579

Branch: refs/heads/HDFS-7240
Commit: d4c98579e36df7eeb788352d7b76cd2c7448c511
Parents: 789a185
Author: Wangda Tan <wa...@apache.org>
Authored: Mon Feb 12 10:28:35 2018 +0800
Committer: Wangda Tan <wa...@apache.org>
Committed: Mon Feb 12 10:28:35 2018 +0800

----------------------------------------------------------------------
 .../LogAggregationFileController.java           |  7 +-
 .../ifile/IndexedFileAggregatedLogsBlock.java   |  2 +-
 .../LogAggregationIndexedFileController.java    | 69 +++++++++++++-------
 .../tfile/LogAggregationTFileController.java    |  5 +-
 ...TestLogAggregationFileControllerFactory.java |  5 +-
 .../TestLogAggregationIndexFileController.java  | 21 ++++++
 6 files changed, 79 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4c98579/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
index 0590535..aeef574 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
@@ -226,10 +226,12 @@ public abstract class LogAggregationFileController {
    * Returns the owner of the application.
    *
    * @param aggregatedLogPath the aggregatedLog path
+   * @param appId the ApplicationId
    * @return the application owner
    * @throws IOException if we can not get the application owner
    */
-  public abstract String getApplicationOwner(Path aggregatedLogPath)
+  public abstract String getApplicationOwner(Path aggregatedLogPath,
+      ApplicationId appId)
       throws IOException;
 
   /**
@@ -237,11 +239,12 @@ public abstract class LogAggregationFileController {
    * found.
    *
    * @param aggregatedLogPath the aggregatedLog path.
+   * @param appId the ApplicationId
    * @return a map of the Application ACLs.
    * @throws IOException if we can not get the application acls
    */
   public abstract Map<ApplicationAccessType, String> getApplicationAcls(
-      Path aggregatedLogPath) throws IOException;
+      Path aggregatedLogPath, ApplicationId appId) throws IOException;
 
   /**
    * Verify and create the remote log directory.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4c98579/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
index 6d48d7a..c53ffcc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
@@ -135,7 +135,7 @@ public class IndexedFileAggregatedLogsBlock extends LogAggregationHtmlBlock {
         IndexedLogsMeta indexedLogsMeta = null;
         try {
           indexedLogsMeta = fileController.loadIndexedLogsMeta(
-              thisNodeFile.getPath(), endIndex);
+              thisNodeFile.getPath(), endIndex, appId);
         } catch (Exception ex) {
           // DO NOTHING
           LOG.warn("Can not load log meta from the log file:"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4c98579/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
index 800c0a2..56bae26 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
@@ -284,16 +284,8 @@ public class LogAggregationIndexedFileController
                 currentRemoteLogFile.getName())) {
               overwriteCheckSum = false;
               long endIndex = checksumFileInputStream.readLong();
-              IndexedLogsMeta recoveredLogsMeta = null;
-              try {
-                truncateFileWithRetries(fc, currentRemoteLogFile,
-                    endIndex);
-                recoveredLogsMeta = loadIndexedLogsMeta(
-                    currentRemoteLogFile);
-              } catch (Exception ex) {
-                recoveredLogsMeta = loadIndexedLogsMeta(
-                    currentRemoteLogFile, endIndex);
-              }
+              IndexedLogsMeta recoveredLogsMeta = loadIndexedLogsMeta(
+                  currentRemoteLogFile, endIndex, appId);
               if (recoveredLogsMeta != null) {
                 indexedLogsMeta = recoveredLogsMeta;
               }
@@ -524,11 +516,11 @@ public class LogAggregationIndexedFileController
       IndexedLogsMeta indexedLogsMeta = null;
       try {
         indexedLogsMeta = loadIndexedLogsMeta(thisNodeFile.getPath(),
-            endIndex);
+            endIndex, appId);
       } catch (Exception ex) {
         // DO NOTHING
         LOG.warn("Can not load log meta from the log file:"
-            + thisNodeFile.getPath());
+            + thisNodeFile.getPath() + "\n" + ex.getMessage());
         continue;
       }
       if (indexedLogsMeta == null) {
@@ -636,14 +628,14 @@ public class LogAggregationIndexedFileController
           endIndex = checkSumIndex.longValue();
         }
         IndexedLogsMeta current = loadIndexedLogsMeta(
-            thisNodeFile.getPath(), endIndex);
+            thisNodeFile.getPath(), endIndex, appId);
         if (current != null) {
           listOfLogsMeta.add(current);
         }
       } catch (IOException ex) {
         // DO NOTHING
         LOG.warn("Can not get log meta from the log file:"
-            + thisNodeFile.getPath());
+            + thisNodeFile.getPath() + "\n" + ex.getMessage());
       }
     }
     for (IndexedLogsMeta indexedLogMeta : listOfLogsMeta) {
@@ -721,6 +713,7 @@ public class LogAggregationIndexedFileController
           checkSumFiles.put(nodeName, Long.valueOf(index));
         }
       } catch (IOException ex) {
+        LOG.warn(ex.getMessage());
         continue;
       } finally {
         IOUtils.cleanupWithLogger(LOG, checksumFileInputStream);
@@ -773,25 +766,26 @@ public class LogAggregationIndexedFileController
   }
 
   @Override
-  public String getApplicationOwner(Path aggregatedLogPath)
+  public String getApplicationOwner(Path aggregatedLogPath,
+      ApplicationId appId)
       throws IOException {
     if (this.cachedIndexedLogsMeta == null
         || !this.cachedIndexedLogsMeta.getRemoteLogPath()
             .equals(aggregatedLogPath)) {
       this.cachedIndexedLogsMeta = new CachedIndexedLogsMeta(
-          loadIndexedLogsMeta(aggregatedLogPath), aggregatedLogPath);
+          loadIndexedLogsMeta(aggregatedLogPath, appId), aggregatedLogPath);
     }
     return this.cachedIndexedLogsMeta.getCachedIndexedLogsMeta().getUser();
   }
 
   @Override
   public Map<ApplicationAccessType, String> getApplicationAcls(
-      Path aggregatedLogPath) throws IOException {
+      Path aggregatedLogPath, ApplicationId appId) throws IOException {
     if (this.cachedIndexedLogsMeta == null
         || !this.cachedIndexedLogsMeta.getRemoteLogPath()
             .equals(aggregatedLogPath)) {
       this.cachedIndexedLogsMeta = new CachedIndexedLogsMeta(
-          loadIndexedLogsMeta(aggregatedLogPath), aggregatedLogPath);
+          loadIndexedLogsMeta(aggregatedLogPath, appId), aggregatedLogPath);
     }
     return this.cachedIndexedLogsMeta.getCachedIndexedLogsMeta().getAcls();
   }
@@ -804,8 +798,8 @@ public class LogAggregationIndexedFileController
   }
 
   @Private
-  public IndexedLogsMeta loadIndexedLogsMeta(Path remoteLogPath, long end)
-      throws IOException {
+  public IndexedLogsMeta loadIndexedLogsMeta(Path remoteLogPath, long end,
+      ApplicationId appId) throws IOException {
     FileContext fileContext =
         FileContext.getFileContext(remoteLogPath.toUri(), conf);
     FSDataInputStream fsDataIStream = null;
@@ -816,8 +810,36 @@ public class LogAggregationIndexedFileController
       }
       long fileLength = end < 0 ? fileContext.getFileStatus(
           remoteLogPath).getLen() : end;
+
       fsDataIStream.seek(fileLength - Integer.SIZE/ Byte.SIZE - UUID_LENGTH);
       int offset = fsDataIStream.readInt();
+      // If the offset/log meta size is larger than 64M,
+      // output a warn message for better debug.
+      if (offset > 64 * 1024 * 1024) {
+        LOG.warn("The log meta size read from " + remoteLogPath
+            + " is " + offset);
+      }
+
+      // Load UUID and make sure the UUID is correct.
+      byte[] uuidRead = new byte[UUID_LENGTH];
+      int uuidReadLen = fsDataIStream.read(uuidRead);
+      if (this.uuid == null) {
+        this.uuid = createUUID(appId);
+      }
+      if (uuidReadLen != UUID_LENGTH || !Arrays.equals(this.uuid, uuidRead)) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("the length of loaded UUID:" + uuidReadLen);
+          LOG.debug("the loaded UUID:" + new String(uuidRead,
+              Charset.forName("UTF-8")));
+          LOG.debug("the expected UUID:" + new String(this.uuid,
+              Charset.forName("UTF-8")));
+        }
+        throw new IOException("The UUID from "
+            + remoteLogPath + " is not correct. The offset of loaded UUID is "
+            + (fileLength - UUID_LENGTH));
+      }
+
+      // Load Log Meta
       byte[] array = new byte[offset];
       fsDataIStream.seek(
           fileLength - offset - Integer.SIZE/ Byte.SIZE - UUID_LENGTH);
@@ -833,9 +855,9 @@ public class LogAggregationIndexedFileController
     }
   }
 
-  private IndexedLogsMeta loadIndexedLogsMeta(Path remoteLogPath)
-      throws IOException {
-    return loadIndexedLogsMeta(remoteLogPath, -1);
+  private IndexedLogsMeta loadIndexedLogsMeta(Path remoteLogPath,
+      ApplicationId appId) throws IOException {
+    return loadIndexedLogsMeta(remoteLogPath, -1, appId);
   }
 
   /**
@@ -1040,6 +1062,7 @@ public class LogAggregationIndexedFileController
         this.out = compressAlgo.createCompressionStream(
             fsBufferedOutput, compressor, 0);
       } catch (IOException e) {
+        LOG.warn(e.getMessage());
         compressAlgo.returnCompressor(compressor);
         throw e;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4c98579/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java
index 5064e26..a4f50d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java
@@ -335,14 +335,15 @@ public class LogAggregationTFileController
   }
 
   @Override
-  public String getApplicationOwner(Path aggregatedLog) throws IOException {
+  public String getApplicationOwner(Path aggregatedLog, ApplicationId appId)
+      throws IOException {
     createTFileLogReader(aggregatedLog);
     return this.tfReader.getLogReader().getApplicationOwner();
   }
 
   @Override
   public Map<ApplicationAccessType, String> getApplicationAcls(
-      Path aggregatedLog) throws IOException {
+      Path aggregatedLog, ApplicationId appId) throws IOException {
     createTFileLogReader(aggregatedLog);
     return this.tfReader.getLogReader().getApplicationAcls();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4c98579/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/TestLogAggregationFileControllerFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/TestLogAggregationFileControllerFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/TestLogAggregationFileControllerFactory.java
index 2d0864a..99aca1b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/TestLogAggregationFileControllerFactory.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/TestLogAggregationFileControllerFactory.java
@@ -194,14 +194,15 @@ public class TestLogAggregationFileControllerFactory {
     }
 
     @Override
-    public String getApplicationOwner(Path aggregatedLogPath)
+    public String getApplicationOwner(Path aggregatedLogPath,
+        ApplicationId appId)
         throws IOException {
       return null;
     }
 
     @Override
     public Map<ApplicationAccessType, String> getApplicationAcls(
-        Path aggregatedLogPath) throws IOException {
+        Path aggregatedLogPath, ApplicationId appId) throws IOException {
       return null;
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4c98579/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexFileController.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexFileController.java
index 7d0205b..9c02c1b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexFileController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexFileController.java
@@ -55,7 +55,9 @@ import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils;
 import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
 import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue;
 import org.apache.hadoop.yarn.logaggregation.ContainerLogFileInfo;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController;
 import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerContext;
+import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.ControlledClock;
 import org.junit.After;
@@ -219,6 +221,25 @@ public class TestLogAggregationIndexFileController {
     }
     sysOutStream.reset();
 
+    Configuration factoryConf = new Configuration(conf);
+    factoryConf.set("yarn.log-aggregation.file-formats", "Indexed");
+    factoryConf.set("yarn.log-aggregation.file-controller.Indexed.class",
+        "org.apache.hadoop.yarn.logaggregation.filecontroller.ifile"
+        + ".LogAggregationIndexedFileController");
+    LogAggregationFileControllerFactory factory =
+        new LogAggregationFileControllerFactory(factoryConf);
+    LogAggregationFileController fileController = factory
+        .getFileControllerForRead(appId, USER_UGI.getShortUserName());
+    Assert.assertTrue(fileController instanceof
+        LogAggregationIndexedFileController);
+    foundLogs = fileController.readAggregatedLogs(logRequest, System.out);
+    Assert.assertTrue(foundLogs);
+    for (String logType : logTypes) {
+      Assert.assertTrue(sysOutStream.toString().contains(logMessage(
+          containerId, logType)));
+    }
+    sysOutStream.reset();
+
     // create a checksum file
     Path checksumFile = new Path(fileFormat.getRemoteAppLogDir(
         appId, USER_UGI.getShortUserName()),


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh may not be accessible. Contributed by Grigori Rybkine

Posted by ae...@apache.org.
HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh may not be accessible. Contributed by Grigori Rybkine


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b88cb33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b88cb33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b88cb33

Branch: refs/heads/HDFS-7240
Commit: 5b88cb339898f82519223bcd07e1caedff02d051
Parents: 5a1db60
Author: Chris Douglas <cd...@apache.org>
Authored: Mon Feb 12 21:00:47 2018 -0800
Committer: Chris Douglas <cd...@apache.org>
Committed: Mon Feb 12 21:00:47 2018 -0800

----------------------------------------------------------------------
 .../src/test/scripts/start-build-env.bats       | 102 +++++++++++++++++++
 start-build-env.sh                              |  32 +++++-
 2 files changed, 131 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b88cb33/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
new file mode 100644
index 0000000..0c32bcf
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
@@ -0,0 +1,102 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+# Mock docker command
+docker () {
+  if [ "$1" = "-v" ]; then
+    shift
+    echo Docker version ${DCKR_MOCK_VER:?}
+  elif [ "$1" = run ]; then
+    shift
+    until [ $# -eq 0 ]; do
+      if [ "$1" = -v ]; then
+        shift
+        echo "$1"|awk -F':' '{if (NF == 3 && $3 == "z")
+                  printf "Mounted %s with %s option.\n", $1, $3
+                              else if (NF == 2)
+                  printf "Mounted %s without %s option.\n", $1, "z"}'
+      fi
+      shift
+    done
+  fi
+}
+export -f docker
+export DCKR_MOCK_VER
+
+# Mock a SELinux enabled system
+enable_selinux () {
+  mkdir -p "${TMP}/bin"
+  echo true >"${TMP}/bin"/selinuxenabled
+  chmod a+x "${TMP}/bin"/selinuxenabled
+  if [ "${PATH#${TMP}/bin}" = "${PATH}" ]; then
+    PATH="${TMP}/bin":"$PATH"
+  fi
+}
+
+setup_user () {
+  if [ -z "$(printenv USER)" ]; then
+    if [ -z "$USER" ]; then
+      USER=${HOME##*/}
+    fi
+    export USER
+  fi
+}
+
+# Mock stat command as used in start-build-env.sh
+stat () {
+  if [ "$1" = --printf='%C' -a $# -eq 2 ]; then
+    printf 'mock_u:mock_r:mock_t:s0'
+  else
+    command stat "$@"
+  fi
+}
+export -f stat
+
+# Verify that host directories get mounted without z option
+# and INFO messages get printed out
+@test "start-build-env.sh (Docker without z mount option)" {
+  if [ "$(uname -s)" != "Linux" ]; then
+    skip "Not on Linux platform"
+  fi
+  enable_selinux
+  setup_user
+  DCKR_MOCK_VER=1.4
+  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
+  [ "$status" -eq 0 ]
+  [[ ${lines[0]} == "INFO: SELinux policy is enforced." ]]
+  [[ ${lines[1]} =~ \
+     "Mounted ".*" may not be accessible to the container." ]]
+  [[ ${lines[2]} == \
+     "INFO: If so, on the host, run the following command:" ]]
+  [[ ${lines[3]} =~ "# chcon -Rt svirt_sandbox_file_t " ]]
+  [[ ${lines[-2]} =~ "Mounted ".*" without z option." ]]
+  [[ ${lines[-1]} =~ "Mounted ".*" without z option." ]]
+}
+
+# Verify that host directories get mounted with z option
+@test "start-build-env.sh (Docker with z mount option)" {
+  if [ "$(uname -s)" != "Linux" ]; then
+    skip "Not on Linux platform"
+  fi
+  enable_selinux
+  setup_user
+  DCKR_MOCK_VER=1.7
+  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
+  [ "$status" -eq 0 ]
+  [[ ${lines[-2]} =~ "Mounted ".*" with z option." ]]
+  [[ ${lines[-1]} =~ "Mounted ".*" with z option." ]]
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b88cb33/start-build-env.sh
----------------------------------------------------------------------
diff --git a/start-build-env.sh b/start-build-env.sh
index 5a18151..60efea5 100755
--- a/start-build-env.sh
+++ b/start-build-env.sh
@@ -21,10 +21,36 @@ cd "$(dirname "$0")" # connect to root
 
 docker build -t hadoop-build dev-support/docker
 
-if [ "$(uname -s)" == "Linux" ]; then
+if [ "$(uname -s)" = "Linux" ]; then
   USER_NAME=${SUDO_USER:=$USER}
   USER_ID=$(id -u "${USER_NAME}")
   GROUP_ID=$(id -g "${USER_NAME}")
+  # man docker-run
+  # When using SELinux, mounted directories may not be accessible
+  # to the container. To work around this, with Docker prior to 1.7
+  # one needs to run the "chcon -Rt svirt_sandbox_file_t" command on
+  # the directories. With Docker 1.7 and later the z mount option
+  # does this automatically.
+  if command -v selinuxenabled >/dev/null && selinuxenabled; then
+    DCKR_VER=$(docker -v|awk '$1 == "Docker" && $2 == "version"\
+                         {split($3,ver,".");print ver[1]"."ver[2]}')
+    DCKR_MAJ=${DCKR_VER%.*}
+    DCKR_MIN=${DCKR_VER#*.}
+    if [ "${DCKR_MAJ}" -eq 1 ] && [ "${DCKR_MIN}" -ge 7 ] ||
+        [ "${DCKR_MAJ}" -gt 1 ]; then
+      V_OPTS=:z
+    else
+      for d in "${PWD}" "${HOME}/.m2"; do
+        ctx=$(stat --printf='%C' "$d"|cut -d':' -f3)
+        if [ "$ctx" != svirt_sandbox_file_t ] && [ "$ctx" != container_file_t ]; then
+          printf 'INFO: SELinux policy is enforced.\n'
+          printf '\tMounted %s may not be accessible to the container.\n' "$d"
+          printf 'INFO: If so, on the host, run the following command:\n'
+          printf '\t# chcon -Rt svirt_sandbox_file_t %s\n' "$d"
+        fi
+      done
+    fi
+  fi
 else # boot2docker uid and gid
   USER_NAME=$USER
   USER_ID=1000
@@ -45,8 +71,8 @@ UserSpecificDocker
 # system.  And this also is a significant speedup in subsequent
 # builds because the dependencies are downloaded only once.
 docker run --rm=true -t -i \
-  -v "${PWD}:/home/${USER_NAME}/hadoop" \
+  -v "${PWD}:/home/${USER_NAME}/hadoop${V_OPTS:-}" \
   -w "/home/${USER_NAME}/hadoop" \
-  -v "${HOME}/.m2:/home/${USER_NAME}/.m2" \
+  -v "${HOME}/.m2:/home/${USER_NAME}/.m2${V_OPTS:-}" \
   -u "${USER_NAME}" \
   "hadoop-build-${USER_ID}"


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: YARN-7914. Fix exit code handling for short lived Docker containers. Contributed by Shane Kumpf

Posted by ae...@apache.org.
YARN-7914. Fix exit code handling for short lived Docker containers. Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a1db60a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a1db60a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a1db60a

Branch: refs/heads/HDFS-7240
Commit: 5a1db60ab1e8b28cd73367c69970513de88cf4dd
Parents: 87e2570
Author: Jason Lowe <jl...@apache.org>
Authored: Mon Feb 12 15:50:10 2018 -0600
Committer: Jason Lowe <jl...@apache.org>
Committed: Mon Feb 12 15:50:10 2018 -0600

----------------------------------------------------------------------
 .../impl/container-executor.c                   | 92 ++++++++++----------
 1 file changed, 46 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a1db60a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 5ce6a00..035c694 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1577,58 +1577,58 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
       sleep(1);
     }
 #endif
+  }
 
-    sprintf(docker_inspect_exitcode_command,
-      "%s inspect --format {{.State.ExitCode}} %s",
-    docker_binary, container_id);
-    fprintf(LOGFILE, "Obtaining the exit code...\n");
-    fprintf(LOGFILE, "Docker inspect command: %s\n", docker_inspect_exitcode_command);
-    FILE* inspect_exitcode_docker = popen(docker_inspect_exitcode_command, "r");
-    if(inspect_exitcode_docker == NULL) {
-      fprintf(ERRORFILE, "Done with inspect_exitcode, inspect_exitcode_docker is null\n");
-      fflush(ERRORFILE);
-      exit_code = -1;
-      goto cleanup;
-    }
-    res = fscanf (inspect_exitcode_docker, "%d", &exit_code);
-    if (pclose (inspect_exitcode_docker) != 0 || res <= 0) {
-    fprintf (ERRORFILE,
-     "Could not inspect docker to get exitcode:  %s.\n", docker_inspect_exitcode_command);
-      fflush(ERRORFILE);
-      exit_code = -1;
-      goto cleanup;
-    }
-    fprintf(LOGFILE, "Exit code from docker inspect: %d\n", exit_code);
-    if(exit_code != 0) {
-      fprintf(ERRORFILE, "Docker container exit code was not zero: %d\n",
-      exit_code);
-      snprintf(docker_logs_command, command_size, "%s logs --tail=250 %s",
-        docker_binary, container_id);
-      FILE* logs = popen(docker_logs_command, "r");
-      if(logs != NULL) {
-        clearerr(logs);
-        res = fread(buffer, BUFFER_SIZE, 1, logs);
-        if(res < 1) {
-          fprintf(ERRORFILE, "%s %d %d\n",
-            "Unable to read from docker logs(ferror, feof):", ferror(logs), feof(logs));
-          fflush(ERRORFILE);
-        }
-        else {
-          fprintf(ERRORFILE, "%s\n", buffer);
-          fflush(ERRORFILE);
-        }
-      }
-      else {
-        fprintf(ERRORFILE, "%s\n", "Failed to get output of docker logs");
-        fprintf(ERRORFILE, "Command was '%s'\n", docker_logs_command);
-        fprintf(ERRORFILE, "%s\n", strerror(errno));
+  sprintf(docker_inspect_exitcode_command,
+    "%s inspect --format {{.State.ExitCode}} %s",
+  docker_binary, container_id);
+  fprintf(LOGFILE, "Obtaining the exit code...\n");
+  fprintf(LOGFILE, "Docker inspect command: %s\n", docker_inspect_exitcode_command);
+  FILE* inspect_exitcode_docker = popen(docker_inspect_exitcode_command, "r");
+  if(inspect_exitcode_docker == NULL) {
+    fprintf(ERRORFILE, "Done with inspect_exitcode, inspect_exitcode_docker is null\n");
+    fflush(ERRORFILE);
+    exit_code = -1;
+    goto cleanup;
+  }
+  res = fscanf (inspect_exitcode_docker, "%d", &exit_code);
+  if (pclose (inspect_exitcode_docker) != 0 || res <= 0) {
+  fprintf (ERRORFILE,
+   "Could not inspect docker to get exitcode:  %s.\n", docker_inspect_exitcode_command);
+    fflush(ERRORFILE);
+    exit_code = -1;
+    goto cleanup;
+  }
+  fprintf(LOGFILE, "Exit code from docker inspect: %d\n", exit_code);
+  if(exit_code != 0) {
+    fprintf(ERRORFILE, "Docker container exit code was not zero: %d\n",
+    exit_code);
+    snprintf(docker_logs_command, command_size, "%s logs --tail=250 %s",
+      docker_binary, container_id);
+    FILE* logs = popen(docker_logs_command, "r");
+    if(logs != NULL) {
+      clearerr(logs);
+      res = fread(buffer, BUFFER_SIZE, 1, logs);
+      if(res < 1) {
+        fprintf(ERRORFILE, "%s %d %d\n",
+          "Unable to read from docker logs(ferror, feof):", ferror(logs), feof(logs));
         fflush(ERRORFILE);
       }
-      if(pclose(logs) != 0) {
-        fprintf(ERRORFILE, "%s\n", "Failed to fetch docker logs");
+      else {
+        fprintf(ERRORFILE, "%s\n", buffer);
         fflush(ERRORFILE);
       }
     }
+    else {
+      fprintf(ERRORFILE, "%s\n", "Failed to get output of docker logs");
+      fprintf(ERRORFILE, "Command was '%s'\n", docker_logs_command);
+      fprintf(ERRORFILE, "%s\n", strerror(errno));
+      fflush(ERRORFILE);
+    }
+    if(pclose(logs) != 0) {
+      fprintf(ERRORFILE, "%s\n", "Failed to fetch docker logs");
+      fflush(ERRORFILE);
+    }
   }
 
 cleanup:


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh may not be accessible. Contributed by Grigori Rybkine

Posted by ae...@apache.org.
HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh may not be accessible. Contributed by Grigori Rybkine


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c5d7d71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c5d7d71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c5d7d71

Branch: refs/heads/HDFS-7240
Commit: 0c5d7d71a80bccd4ad7eab269d0727b999606a7e
Parents: 9cc6d1d
Author: Chris Douglas <cd...@apache.org>
Authored: Mon Feb 12 21:07:15 2018 -0800
Committer: Chris Douglas <cd...@apache.org>
Committed: Mon Feb 12 21:07:15 2018 -0800

----------------------------------------------------------------------
 .../src/test/scripts/start-build-env.bats       | 102 +++++++++++++++++++
 start-build-env.sh                              |  32 +++++-
 2 files changed, 131 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c5d7d71/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
new file mode 100644
index 0000000..dbb14ad
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
@@ -0,0 +1,102 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+# Mock docker command
+docker () {
+  if [ "$1" = "-v" ]; then
+    shift
+    echo Docker version ${DCKR_MOCK_VER:?}
+  elif [ "$1" = run ]; then
+    shift
+    until [ $# -eq 0 ]; do
+      if [ "$1" = -v ]; then
+        shift
+        echo "$1"|awk -F':' '{if (NF == 3 && $3 == "z")
+                  printf "Mounted %s with z option.\n", $1
+                              else if (NF == 2)
+                  printf "Mounted %s without z option.\n", $1}'
+      fi
+      shift
+    done
+  fi
+}
+export -f docker
+export DCKR_MOCK_VER
+
+# Mock a SELinux enabled system
+enable_selinux () {
+  mkdir -p "${TMP}/bin"
+  echo true >"${TMP}/bin"/selinuxenabled
+  chmod a+x "${TMP}/bin"/selinuxenabled
+  if [ "${PATH#${TMP}/bin}" = "${PATH}" ]; then
+    PATH="${TMP}/bin":"$PATH"
+  fi
+}
+
+setup_user () {
+  if [ -z "$(printenv USER)" ]; then
+    if [ -z "$USER" ]; then
+      USER=${HOME##*/}
+    fi
+    export USER
+  fi
+}
+
+# Mock stat command as used in start-build-env.sh
+stat () {
+  if [ "$1" = --printf='%C' -a $# -eq 2 ]; then
+    printf 'mock_u:mock_r:mock_t:s0'
+  else
+    command stat "$@"
+  fi
+}
+export -f stat
+
+# Verify that host directories get mounted without z option
+# and INFO messages get printed out
+@test "start-build-env.sh (Docker without z mount option)" {
+  if [ "$(uname -s)" != "Linux" ]; then
+    skip "Not on Linux platform"
+  fi
+  enable_selinux
+  setup_user
+  DCKR_MOCK_VER=1.4
+  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
+  [ "$status" -eq 0 ]
+  [[ ${lines[0]} == "INFO: SELinux is enabled." ]]
+  [[ ${lines[1]} =~ \
+     "Mounted ".*" may not be accessible to the container." ]]
+  [[ ${lines[2]} == \
+     "INFO: If so, on the host, run the following command:" ]]
+  [[ ${lines[3]} =~ "# chcon -Rt svirt_sandbox_file_t " ]]
+  [[ ${lines[-2]} =~ "Mounted ".*" without z option." ]]
+  [[ ${lines[-1]} =~ "Mounted ".*" without z option." ]]
+}
+
+# Verify that host directories get mounted with z option
+@test "start-build-env.sh (Docker with z mount option)" {
+  if [ "$(uname -s)" != "Linux" ]; then
+    skip "Not on Linux platform"
+  fi
+  enable_selinux
+  setup_user
+  DCKR_MOCK_VER=1.7
+  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
+  [ "$status" -eq 0 ]
+  [[ ${lines[-2]} =~ "Mounted ".*" with z option." ]]
+  [[ ${lines[-1]} =~ "Mounted ".*" with z option." ]]
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c5d7d71/start-build-env.sh
----------------------------------------------------------------------
diff --git a/start-build-env.sh b/start-build-env.sh
index 5a18151..4da55af 100755
--- a/start-build-env.sh
+++ b/start-build-env.sh
@@ -21,10 +21,36 @@ cd "$(dirname "$0")" # connect to root
 
 docker build -t hadoop-build dev-support/docker
 
-if [ "$(uname -s)" == "Linux" ]; then
+if [ "$(uname -s)" = "Linux" ]; then
   USER_NAME=${SUDO_USER:=$USER}
   USER_ID=$(id -u "${USER_NAME}")
   GROUP_ID=$(id -g "${USER_NAME}")
+  # man docker-run
+  # When using SELinux, mounted directories may not be accessible
+  # to the container. To work around this, with Docker prior to 1.7
+  # one needs to run the "chcon -Rt svirt_sandbox_file_t" command on
+  # the directories. With Docker 1.7 and later the z mount option
+  # does this automatically.
+  if command -v selinuxenabled >/dev/null && selinuxenabled; then
+    DCKR_VER=$(docker -v|
+    awk '$1 == "Docker" && $2 == "version" {split($3,ver,".");print ver[1]"."ver[2]}')
+    DCKR_MAJ=${DCKR_VER%.*}
+    DCKR_MIN=${DCKR_VER#*.}
+    if [ "${DCKR_MAJ}" -eq 1 ] && [ "${DCKR_MIN}" -ge 7 ] ||
+        [ "${DCKR_MAJ}" -gt 1 ]; then
+      V_OPTS=:z
+    else
+      for d in "${PWD}" "${HOME}/.m2"; do
+        ctx=$(stat --printf='%C' "$d"|cut -d':' -f3)
+        if [ "$ctx" != svirt_sandbox_file_t ] && [ "$ctx" != container_file_t ]; then
+          printf 'INFO: SELinux is enabled.\n'
+          printf '\tMounted %s may not be accessible to the container.\n' "$d"
+          printf 'INFO: If so, on the host, run the following command:\n'
+          printf '\t# chcon -Rt svirt_sandbox_file_t %s\n' "$d"
+        fi
+      done
+    fi
+  fi
 else # boot2docker uid and gid
   USER_NAME=$USER
   USER_ID=1000
@@ -45,8 +71,8 @@ UserSpecificDocker
 # system.  And this also is a significant speedup in subsequent
 # builds because the dependencies are downloaded only once.
 docker run --rm=true -t -i \
-  -v "${PWD}:/home/${USER_NAME}/hadoop" \
+  -v "${PWD}:/home/${USER_NAME}/hadoop${V_OPTS:-}" \
   -w "/home/${USER_NAME}/hadoop" \
-  -v "${HOME}/.m2:/home/${USER_NAME}/.m2" \
+  -v "${HOME}/.m2:/home/${USER_NAME}/.m2${V_OPTS:-}" \
   -u "${USER_NAME}" \
   "hadoop-build-${USER_ID}"


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: HDFS-13115. In getNumUnderConstructionBlocks(), ignore the inodeIds for which the inodes have been deleted. Contributed by Yongjun Zhang.

Posted by ae...@apache.org.
HDFS-13115. In getNumUnderConstructionBlocks(), ignore the inodeIds for which the inodes have been deleted. Contributed by Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f491f717
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f491f717
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f491f717

Branch: refs/heads/HDFS-7240
Commit: f491f717e9ee6b75ad5cfca48da9c6297e94a8f7
Parents: b061215
Author: Yongjun Zhang <yz...@cloudera.com>
Authored: Wed Feb 7 12:58:09 2018 -0800
Committer: Yongjun Zhang <yz...@cloudera.com>
Committed: Wed Feb 7 12:58:09 2018 -0800

----------------------------------------------------------------------
 .../hadoop/hdfs/server/namenode/LeaseManager.java      | 13 +++++++++++--
 1 file changed, 11 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f491f717/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index 1e7a174..31fb2bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -144,7 +144,15 @@ public class LeaseManager {
       + "acquired before counting under construction blocks";
     long numUCBlocks = 0;
     for (Long id : getINodeIdWithLeases()) {
-      final INodeFile cons = fsnamesystem.getFSDirectory().getInode(id).asFile();
+      INode inode = fsnamesystem.getFSDirectory().getInode(id);
+      if (inode == null) {
+        // The inode could have been deleted after getINodeIdWithLeases() is
+        // called, check here, and ignore it if so
+        LOG.warn("Failed to find inode {} in getNumUnderConstructionBlocks().",
+            id);
+        continue;
+      }
+      final INodeFile cons = inode.asFile();
       if (!cons.isUnderConstruction()) {
         LOG.warn("The file {} is not under construction but has lease.",
             cons.getFullPathName());
@@ -155,10 +163,11 @@ public class LeaseManager {
         continue;
       }
       for(BlockInfo b : blocks) {
-        if(!b.isComplete())
+        if(!b.isComplete()) {
           numUCBlocks++;
         }
       }
+    }
     LOG.info("Number of blocks under construction: {}", numUCBlocks);
     return numUCBlocks;
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: HADOOP-10571. Use Log.*(Object, Throwable) overload to log exceptions. Contributed by Andras Bokor.

Posted by ae...@apache.org.
HADOOP-10571. Use Log.*(Object, Throwable) overload to log exceptions.
Contributed by Andras Bokor.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f20dc0d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f20dc0d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f20dc0d5

Branch: refs/heads/HDFS-7240
Commit: f20dc0d5770d3876954faf0a6e8dcce6539ffc23
Parents: 042ef2f
Author: Steve Loughran <st...@apache.org>
Authored: Wed Feb 14 16:20:14 2018 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Wed Feb 14 16:20:14 2018 +0000

----------------------------------------------------------------------
 .../org/apache/hadoop/fs/LocalFileSystem.java   |   2 +-
 .../apache/hadoop/ha/ActiveStandbyElector.java  |  30 +-
 .../apache/hadoop/ha/FailoverController.java    |  20 +-
 .../org/apache/hadoop/ha/HealthMonitor.java     |   9 +-
 .../org/apache/hadoop/io/retry/RetryUtils.java  |  11 +-
 .../main/java/org/apache/hadoop/net/DNS.java    |  39 +-
 .../apache/hadoop/service/AbstractService.java  |  27 +-
 .../hadoop/service/ServiceOperations.java       |   6 +-
 .../hadoop/service/TestServiceOperations.java   |   3 +-
 .../hadoop/hdfs/nfs/nfs3/DFSClientCache.java    |  25 +-
 .../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java       | 314 +++++++---------
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java    | 370 +++++++++----------
 .../hadoop/hdfs/server/datanode/DataNode.java   | 211 +++++------
 .../hdfs/server/datanode/DataXceiver.java       | 172 ++++-----
 .../hdfs/server/namenode/FSNamesystem.java      |   2 +-
 .../server/namenode/ha/StandbyCheckpointer.java |  34 +-
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |   2 +-
 .../hadoop/test/MiniDFSClusterManager.java      |  26 +-
 .../apache/hadoop/mapred/gridmix/Gridmix.java   |  22 +-
 .../swift/http/HttpInputStreamWithRelease.java  |  29 +-
 20 files changed, 587 insertions(+), 767 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
index 91b2315..538ccdf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
@@ -139,7 +139,7 @@ public class LocalFileSystem extends ChecksumFileSystem {
           LOG.warn("Ignoring failure of renameTo");
         }
     } catch (IOException e) {
-      LOG.warn("Error moving bad file " + p + ": " + e);
+      LOG.warn("Error moving bad file " + p, e);
     }
     return false;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
index 93fd2cf..a23fb71 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
@@ -888,9 +888,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       Stat oldBreadcrumbStat = fenceOldActive();
       writeBreadCrumbNode(oldBreadcrumbStat);
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Becoming active for " + this);
-      }
+      LOG.debug("Becoming active for {}", this);
+
       appClient.becomeActive();
       state = State.ACTIVE;
       return true;
@@ -910,8 +909,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       throws KeeperException, InterruptedException {
     Preconditions.checkState(appData != null, "no appdata");
     
-    LOG.info("Writing znode " + zkBreadCrumbPath +
-        " to indicate that the local node is the most recent active...");
+    LOG.info("Writing znode {} to indicate that the local " +
+        "node is the most recent active...", zkBreadCrumbPath);
     if (oldBreadcrumbStat == null) {
       // No previous active, just create the node
       createWithRetries(zkBreadCrumbPath, appData, zkAcl,
@@ -948,9 +947,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       
       deleteWithRetries(zkBreadCrumbPath, stat.getVersion());
     } catch (Exception e) {
-      LOG.warn("Unable to delete our own bread-crumb of being active at " +
-          zkBreadCrumbPath + ": " + e.getLocalizedMessage() + ". " +
-          "Expecting to be fenced by the next active.");
+      LOG.warn("Unable to delete our own bread-crumb of being active at {}." +
+          ". Expecting to be fenced by the next active.", zkBreadCrumbPath, e);
     }
   }
 
@@ -984,7 +982,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       throw ke;
     }
 
-    LOG.info("Old node exists: " + StringUtils.byteToHexString(data));
+    LOG.info("Old node exists: {}", StringUtils.byteToHexString(data));
     if (Arrays.equals(data, appData)) {
       LOG.info("But old node has our own data, so don't need to fence it.");
     } else {
@@ -995,9 +993,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
 
   private void becomeStandby() {
     if (state != State.STANDBY) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Becoming standby for " + this);
-      }
+      LOG.debug("Becoming standby for {}", this);
       state = State.STANDBY;
       appClient.becomeStandby();
     }
@@ -1005,9 +1001,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
 
   private void enterNeutralMode() {
     if (state != State.NEUTRAL) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Entering neutral mode for " + this);
-      }
+      LOG.debug("Entering neutral mode for {}", this);
       state = State.NEUTRAL;
       appClient.enterNeutralMode();
     }
@@ -1124,7 +1118,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
   private synchronized boolean isStaleClient(Object ctx) {
     Preconditions.checkNotNull(ctx);
     if (zkClient != (ZooKeeper)ctx) {
-      LOG.warn("Ignoring stale result from old client with sessionId " +
+      LOG.warn("Ignoring stale result from old client with sessionId {}",
           String.format("0x%08x", ((ZooKeeper)ctx).getSessionId()));
       return true;
     }
@@ -1162,8 +1156,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
         throws KeeperException, IOException {
       try {
         if (!hasReceivedEvent.await(connectionTimeoutMs, TimeUnit.MILLISECONDS)) {
-          LOG.error("Connection timed out: couldn't connect to ZooKeeper in "
-              + connectionTimeoutMs + " milliseconds");
+          LOG.error("Connection timed out: couldn't connect to ZooKeeper in " +
+              "{} milliseconds", connectionTimeoutMs);
           zk.close();
           throw KeeperException.create(Code.CONNECTIONLOSS);
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
index 3c05a25..b86ae29 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
@@ -123,7 +123,7 @@ public class FailoverController {
       toSvcStatus = toSvc.getServiceStatus();
     } catch (IOException e) {
       String msg = "Unable to get service state for " + target;
-      LOG.error(msg + ": " + e.getLocalizedMessage());
+      LOG.error(msg, e);
       throw new FailoverFailedException(msg, e);
     }
 
@@ -139,7 +139,7 @@ public class FailoverController {
             target + " is not ready to become active: " +
             notReadyReason);
       } else {
-        LOG.warn("Service is not ready to become active, but forcing: " +
+        LOG.warn("Service is not ready to become active, but forcing: {}",
             notReadyReason);
       }
     }
@@ -172,11 +172,11 @@ public class FailoverController {
       proxy.transitionToStandby(createReqInfo());
       return true;
     } catch (ServiceFailedException sfe) {
-      LOG.warn("Unable to gracefully make " + svc + " standby (" +
-          sfe.getMessage() + ")");
+      LOG.warn("Unable to gracefully make {} standby ({})",
+          svc, sfe.getMessage());
     } catch (IOException ioe) {
-      LOG.warn("Unable to gracefully make " + svc +
-          " standby (unable to connect)", ioe);
+      LOG.warn("Unable to gracefully make {} standby (unable to connect)",
+          svc, ioe);
     } finally {
       if (proxy != null) {
         RPC.stopProxy(proxy);
@@ -227,13 +227,13 @@ public class FailoverController {
           toSvc.getProxy(conf, rpcTimeoutToNewActive),
           createReqInfo());
     } catch (ServiceFailedException sfe) {
-      LOG.error("Unable to make " + toSvc + " active (" +
-          sfe.getMessage() + "). Failing back.");
+      LOG.error("Unable to make {} active ({}). Failing back.",
+          toSvc, sfe.getMessage());
       failed = true;
       cause = sfe;
     } catch (IOException ioe) {
-      LOG.error("Unable to make " + toSvc +
-          " active (unable to connect). Failing back.", ioe);
+      LOG.error("Unable to make {} active (unable to connect). Failing back.",
+          toSvc, ioe);
       failed = true;
       cause = ioe;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
index a93df75..d1a858f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
@@ -204,12 +204,11 @@ public class HealthMonitor {
         healthy = true;
       } catch (Throwable t) {
         if (isHealthCheckFailedException(t)) {
-          LOG.warn("Service health check failed for " + targetToMonitor
-              + ": " + t.getMessage());
+          LOG.warn("Service health check failed for {}", targetToMonitor, t);
           enterState(State.SERVICE_UNHEALTHY);
         } else {
-          LOG.warn("Transport-level exception trying to monitor health of " +
-              targetToMonitor + ": " + t.getCause() + " " + t.getLocalizedMessage());
+          LOG.warn("Transport-level exception trying to monitor health of {}",
+              targetToMonitor, t);
           RPC.stopProxy(proxy);
           proxy = null;
           enterState(State.SERVICE_NOT_RESPONDING);
@@ -246,7 +245,7 @@ public class HealthMonitor {
 
   private synchronized void enterState(State newState) {
     if (newState != state) {
-      LOG.info("Entering state " + newState);
+      LOG.info("Entering state {}", newState);
       state = newState;
       synchronized (callbacks) {
         for (Callback cb : callbacks) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
index 1f5acfe..7e43974 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
@@ -72,9 +72,7 @@ public class RetryUtils {
             retryPolicySpecKey, defaultRetryPolicySpec
             );
     
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("multipleLinearRandomRetry = " + multipleLinearRandomRetry);
-    }
+    LOG.debug("multipleLinearRandomRetry = {}", multipleLinearRandomRetry);
 
     if (multipleLinearRandomRetry == null) {
       //no retry
@@ -124,10 +122,9 @@ public class RetryUtils {
         p = RetryPolicies.TRY_ONCE_THEN_FAIL;
       }
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("RETRY " + retries + ") policy="
-            + p.getClass().getSimpleName() + ", exception=" + e);
-      }
+      LOG.debug("RETRY {}) policy={}", retries,
+            p.getClass().getSimpleName(), e);
+
       return p.shouldRetry(e, retries, failovers, isMethodIdempotent);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java
index 81041c1..2fb4d3e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java
@@ -179,8 +179,7 @@ public class DNS {
         netIf = getSubinterface(strInterface);
       }
     } catch (SocketException e) {
-      LOG.warn("I/O error finding interface " + strInterface +
-          ": " + e.getMessage());
+      LOG.warn("I/O error finding interface {}", strInterface, e);
       return new String[] { cachedHostAddress };
     }
     if (netIf == null) {
@@ -265,7 +264,7 @@ public class DNS {
     }
 
     if (hosts.isEmpty()) {
-      LOG.warn("Unable to determine hostname for interface " +
+      LOG.warn("Unable to determine hostname for interface {}",
           strInterface);
       hosts.add(cachedHostname);
     }
@@ -283,8 +282,8 @@ public class DNS {
     try {
       localhost = InetAddress.getLocalHost().getCanonicalHostName();
     } catch (UnknownHostException e) {
-      LOG.warn("Unable to determine local hostname "
-          + "-falling back to \"" + LOCALHOST + "\"", e);
+      LOG.warn("Unable to determine local hostname -falling back to '{}'",
+          LOCALHOST, e);
       localhost = LOCALHOST;
     }
     return localhost;
@@ -303,21 +302,21 @@ public class DNS {
    */
   private static String resolveLocalHostIPAddress() {
     String address;
+    try {
+      address = InetAddress.getLocalHost().getHostAddress();
+    } catch (UnknownHostException e) {
+      LOG.warn("Unable to determine address of the host " +
+          "-falling back to '{}' address", LOCALHOST, e);
       try {
-        address = InetAddress.getLocalHost().getHostAddress();
-      } catch (UnknownHostException e) {
-        LOG.warn("Unable to determine address of the host"
-                + "-falling back to \"" + LOCALHOST + "\" address", e);
-        try {
-          address = InetAddress.getByName(LOCALHOST).getHostAddress();
-        } catch (UnknownHostException noLocalHostAddressException) {
-          //at this point, deep trouble
-          LOG.error("Unable to determine local loopback address "
-                  + "of \"" + LOCALHOST + "\" " +
-                  "-this system's network configuration is unsupported", e);
-          address = null;
-        }
+        address = InetAddress.getByName(LOCALHOST).getHostAddress();
+      } catch (UnknownHostException noLocalHostAddressException) {
+        //at this point, deep trouble
+        LOG.error("Unable to determine local loopback address of '{}' " +
+            "-this system's network configuration is unsupported",
+            LOCALHOST, e);
+        address = null;
       }
+    }
     return address;
   }
 
@@ -431,8 +430,8 @@ public class DNS {
         netIf = getSubinterface(strInterface);
       }
     } catch (SocketException e) {
-      LOG.warn("I/O error finding interface " + strInterface +
-          ": " + e.getMessage());
+      LOG.warn("I/O error finding interface {}: {}",
+          strInterface, e.getMessage());
       return Arrays.asList(InetAddress.getByName(cachedHostAddress));
     }
     if (netIf == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
index 2a1140f..70de647 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
@@ -194,9 +194,7 @@ public abstract class AbstractService implements Service {
           serviceStart();
           if (isInState(STATE.STARTED)) {
             //if the service started (and isn't now in a later state), notify
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Service " + getName() + " is started");
-            }
+            LOG.debug("Service {} is started", getName());
             notifyListeners();
           }
         } catch (Exception e) {
@@ -235,9 +233,7 @@ public abstract class AbstractService implements Service {
         }
       } else {
         //already stopped: note it
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Ignoring re-entrant call to stop()");
-        }
+        LOG.debug("Ignoring re-entrant call to stop()");
       }
     }
   }
@@ -258,9 +254,7 @@ public abstract class AbstractService implements Service {
    * @param exception the exception
    */
   protected final void noteFailure(Exception exception) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("noteFailure " + exception, (Throwable) null);
-    }
+    LOG.debug("noteFailure {}" + exception);
     if (exception == null) {
       //make sure failure logic doesn't itself cause problems
       return;
@@ -270,10 +264,8 @@ public abstract class AbstractService implements Service {
       if (failureCause == null) {
         failureCause = exception;
         failureState = getServiceState();
-        LOG.info("Service " + getName()
-                 + " failed in state " + failureState
-                 + "; cause: " + exception,
-                 exception);
+        LOG.info("Service {} failed in state {}",
+            getName(), failureState, exception);
       }
     }
   }
@@ -418,8 +410,7 @@ public abstract class AbstractService implements Service {
       listeners.notifyListeners(this);
       globalListeners.notifyListeners(this);
     } catch (Throwable e) {
-      LOG.warn("Exception while notifying listeners of " + this + ": " + e,
-               e);
+      LOG.warn("Exception while notifying listeners of {}", this, e);
     }
   }
 
@@ -449,10 +440,8 @@ public abstract class AbstractService implements Service {
     assert stateModel != null : "null state in " + name + " " + this.getClass();
     STATE oldState = stateModel.enterState(newState);
     if (oldState != newState) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-          "Service: " + getName() + " entered state " + getServiceState());
-      }
+      LOG.debug("Service: {} entered state {}", getName(), getServiceState());
+
       recordLifecycleEvent();
     }
     return oldState;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
index e7683a2..d064ef9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
@@ -81,9 +81,7 @@ public final class ServiceOperations {
     try {
       stop(service);
     } catch (Exception e) {
-      log.warn("When stopping the service " + service.getName()
-               + " : " + e,
-               e);
+      log.warn("When stopping the service " + service.getName(), e);
       return e;
     }
     return null;
@@ -103,7 +101,7 @@ public final class ServiceOperations {
     try {
       stop(service);
     } catch (Exception e) {
-      log.warn("When stopping the service {} : {}", service.getName(), e, e);
+      log.warn("When stopping the service {}", service.getName(), e);
       return e;
     }
     return null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
index 5df973d..cc1cfbf 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
@@ -57,8 +57,7 @@ public class TestServiceOperations {
     ServiceOperations.stopQuietly(logger, service);
 
     assertThat(logCapturer.getOutput(),
-        containsString("When stopping the service " + service.getName()
-            + " : " + e));
+        containsString("When stopping the service " + service.getName()));
     verify(e, times(1)).printStackTrace(Mockito.any(PrintWriter.class));
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
index 9a9366f..e0fb302 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs.nfs.nfs3;
 
-import org.apache.commons.logging.LogFactory;
-
 import java.io.IOException;
 import java.net.URI;
 import java.nio.file.FileSystemException;
@@ -32,7 +30,6 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 
 import com.google.common.base.Preconditions;
-import org.apache.commons.logging.Log;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSClient;
@@ -50,12 +47,15 @@ import com.google.common.cache.CacheLoader;
 import com.google.common.cache.LoadingCache;
 import com.google.common.cache.RemovalListener;
 import com.google.common.cache.RemovalNotification;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A cache saves DFSClient objects for different users.
  */
 class DFSClientCache {
-  private static final Log LOG = LogFactory.getLog(DFSClientCache.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(DFSClientCache.class);
   /**
    * Cache that maps User id to the corresponding DFSClient.
    */
@@ -169,8 +169,8 @@ class DFSClientCache {
       URI value = namenodeUriMap.get(namenodeId);
       // if a unique nnid, add it to the map
       if (value == null) {
-        LOG.info("Added export:" + exportPath + " FileSystem URI:" + exportURI
-              + " with namenodeId:" + namenodeId);
+        LOG.info("Added export: {} FileSystem URI: {} with namenodeId: {}",
+            exportPath, exportPath, namenodeId);
         namenodeUriMap.put(namenodeId, exportURI);
       } else {
         // if the nnid already exists, it better be the for the same namenode
@@ -194,7 +194,7 @@ class DFSClientCache {
       try {
         closeAll(true);
       } catch (IOException e) {
-        LOG.info("DFSClientCache.closeAll() threw an exception:\n", e);
+        LOG.info("DFSClientCache.closeAll() threw an exception", e);
       }
     }
   }
@@ -269,10 +269,7 @@ class DFSClientCache {
 
     UserGroupInformation ugi =
             UserGroupInformation.createProxyUser(effectiveUser, realUser);
-    if (LOG.isDebugEnabled()){
-      LOG.debug(String.format("Created ugi:" +
-              " %s for username: %s", ugi, effectiveUser));
-    }
+    LOG.debug("Created ugi: {} for username: {}", ugi, effectiveUser);
     return ugi;
   }
 
@@ -329,8 +326,7 @@ class DFSClientCache {
     try {
       client = clientCache.get(new DfsClientKey(userName, namenodeId));
     } catch (ExecutionException e) {
-      LOG.error("Failed to create DFSClient for user:" + userName + " Cause:"
-          + e);
+      LOG.error("Failed to create DFSClient for user: {}", userName, e);
     }
     return client;
   }
@@ -343,8 +339,7 @@ class DFSClientCache {
     try {
       s = inputstreamCache.get(k);
     } catch (ExecutionException e) {
-      LOG.warn("Failed to create DFSInputStream for user:" + userName
-          + " Cause:" + e);
+      LOG.warn("Failed to create DFSInputStream for user: {}", userName, e);
     }
     return s;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f20dc0d5/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
index 5b7dc14..6067a5d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
@@ -31,8 +31,6 @@ import java.util.concurrent.ConcurrentNavigableMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.atomic.AtomicLong;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
@@ -61,13 +59,15 @@ import org.jboss.netty.channel.Channel;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * OpenFileCtx saves the context of one HDFS file output stream. Access to it is
  * synchronized by its member lock.
  */
 class OpenFileCtx {
-  public static final Log LOG = LogFactory.getLog(OpenFileCtx.class);
+  public static final Logger LOG = LoggerFactory.getLogger(OpenFileCtx.class);
   
   // Pending writes water mark for dump, 1MB
   private static long DUMP_WRITE_WATER_MARK = 1024 * 1024;
@@ -210,10 +210,8 @@ class OpenFileCtx {
   /** Increase or decrease the memory occupation of non-sequential writes */
   private long updateNonSequentialWriteInMemory(long count) {
     long newValue = nonSequentialWriteInMemory.addAndGet(count);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Update nonSequentialWriteInMemory by " + count + " new value: "
-          + newValue);
-    }
+    LOG.debug("Update nonSequentialWriteInMemory by {} new value: {}",
+        count, newValue);
 
     Preconditions.checkState(newValue >= 0,
         "nonSequentialWriteInMemory is negative " + newValue
@@ -273,9 +271,7 @@ class OpenFileCtx {
   // Check if need to dump the new writes
   private void waitForDump() {
     if (!enabledDump) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Do nothing, dump is disabled.");
-      }
+      LOG.debug("Do nothing, dump is disabled.");
       return;
     }
 
@@ -286,9 +282,7 @@ class OpenFileCtx {
     // wake up the dumper thread to dump the data
     synchronized (this) {
       if (nonSequentialWriteInMemory.get() >= DUMP_WRITE_WATER_MARK) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Asking dumper to dump...");
-        }
+        LOG.debug("Asking dumper to dump...");
         if (dumpThread == null) {
           dumpThread = new Daemon(new Dumper());
           dumpThread.start();
@@ -312,7 +306,7 @@ class OpenFileCtx {
     private void dump() {
       // Create dump outputstream for the first time
       if (dumpOut == null) {
-        LOG.info("Create dump file: " + dumpFilePath);
+        LOG.info("Create dump file: {}", dumpFilePath);
         File dumpFile = new File(dumpFilePath);
         try {
           synchronized (this) {
@@ -322,13 +316,14 @@ class OpenFileCtx {
             dumpOut = new FileOutputStream(dumpFile);
           }
         } catch (IOException e) {
-          LOG.error("Got failure when creating dump stream " + dumpFilePath, e);
+          LOG.error("Got failure when creating dump stream {}",
+              dumpFilePath, e);
           enabledDump = false;
           if (dumpOut != null) {
             try {
               dumpOut.close();
             } catch (IOException e1) {
-              LOG.error("Can't close dump stream " + dumpFilePath, e);
+              LOG.error("Can't close dump stream {}", dumpFilePath, e);
             }
           }
           return;
@@ -340,17 +335,15 @@ class OpenFileCtx {
         try {
           raf = new RandomAccessFile(dumpFilePath, "r");
         } catch (FileNotFoundException e) {
-          LOG.error("Can't get random access to file " + dumpFilePath);
+          LOG.error("Can't get random access to file {}", dumpFilePath);
           // Disable dump
           enabledDump = false;
           return;
         }
       }
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Start dump. Before dump, nonSequentialWriteInMemory == "
-            + nonSequentialWriteInMemory.get());
-      }
+      LOG.debug("Start dump. Before dump, nonSequentialWriteInMemory == {}",
+            nonSequentialWriteInMemory.get());
 
       Iterator<OffsetRange> it = pendingWrites.keySet().iterator();
       while (activeState && it.hasNext()
@@ -367,18 +360,16 @@ class OpenFileCtx {
             updateNonSequentialWriteInMemory(-dumpedDataSize);
           }
         } catch (IOException e) {
-          LOG.error("Dump data failed: " + writeCtx + " with error: " + e
-              + " OpenFileCtx state: " + activeState);
+          LOG.error("Dump data failed: {} OpenFileCtx state: {}",
+              writeCtx, activeState, e);
           // Disable dump
           enabledDump = false;
           return;
         }
       }
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("After dump, nonSequentialWriteInMemory == "
-            + nonSequentialWriteInMemory.get());
-      }
+      LOG.debug("After dump, nonSequentialWriteInMemory == {}",
+          nonSequentialWriteInMemory.get());
     }
 
     @Override
@@ -393,26 +384,22 @@ class OpenFileCtx {
               OpenFileCtx.this.notifyAll();
               try {
                 OpenFileCtx.this.wait();
-                if (LOG.isDebugEnabled()) {
-                  LOG.debug("Dumper woke up");
-                }
+                LOG.debug("Dumper woke up");
               } catch (InterruptedException e) {
-                LOG.info("Dumper is interrupted, dumpFilePath= "
-                    + OpenFileCtx.this.dumpFilePath);
+                LOG.info("Dumper is interrupted, dumpFilePath = {}",
+                    OpenFileCtx.this.dumpFilePath);
               }
             }
           }
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Dumper checking OpenFileCtx activeState: " + activeState
-                + " enabledDump: " + enabledDump);
-          }
+          LOG.debug("Dumper checking OpenFileCtx activeState: {} " +
+              "enabledDump: {}", activeState, enabledDump);
         } catch (Throwable t) {
           // unblock threads with new request
           synchronized (OpenFileCtx.this) {
             OpenFileCtx.this.notifyAll();
           }
-          LOG.info("Dumper get Throwable: " + t + ". dumpFilePath: "
-              + OpenFileCtx.this.dumpFilePath, t);
+          LOG.info("Dumper got Throwable. dumpFilePath: {}",
+              OpenFileCtx.this.dumpFilePath, t);
           activeState = false;
         }
       }
@@ -428,8 +415,8 @@ class OpenFileCtx {
       return null;
     } else {
       if (xid != writeCtx.getXid()) {
-        LOG.warn("Got a repeated request, same range, with a different xid: "
-            + xid + " xid in old request: " + writeCtx.getXid());
+        LOG.warn("Got a repeated request, same range, with a different xid: " +
+            "{} xid in old request: {}", xid, writeCtx.getXid());
         //TODO: better handling.
       }
       return writeCtx;  
@@ -441,8 +428,8 @@ class OpenFileCtx {
       IdMappingServiceProvider iug) {
     
     if (!activeState) {
-      LOG.info("OpenFileCtx is inactive, fileId: "
-          + request.getHandle().dumpFileHandle());
+      LOG.info("OpenFileCtx is inactive, fileId: {}",
+          request.getHandle().dumpFileHandle());
       WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
       WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
           fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
@@ -460,15 +447,11 @@ class OpenFileCtx {
           xid);
       if (existantWriteCtx != null) {
         if (!existantWriteCtx.getReplied()) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Repeated write request which hasn't been served: xid="
-                + xid + ", drop it.");
-          }
+          LOG.debug("Repeated write request which hasn't been served: " +
+                    "xid={}, drop it.", xid);
         } else {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Repeated write request which is already served: xid="
-                + xid + ", resend response.");
-          }
+          LOG.debug("Repeated write request which is already served: xid={}" +
+              ", resend response.", xid);
           WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
           WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
               fileWcc, request.getCount(), request.getStableHow(),
@@ -489,13 +472,11 @@ class OpenFileCtx {
     long offset = request.getOffset();
     int count = request.getCount();
     long smallerCount = offset + count - cachedOffset;
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(String.format("Got overwrite with appended data [%d-%d),"
-          + " current offset %d," + " drop the overlapped section [%d-%d)"
-          + " and append new data [%d-%d).", offset, (offset + count),
-          cachedOffset, offset, cachedOffset, cachedOffset, (offset
-              + count)));
-    }
+    LOG.debug("Got overwrite with appended data [{}-{}),"
+            + " current offset {}," + " drop the overlapped section [{}-{})"
+            + " and append new data [{}-{}).", offset, (offset + count),
+            cachedOffset, offset, cachedOffset, cachedOffset,
+        (offset + count));
     
     ByteBuffer data = request.getData();
     Preconditions.checkState(data.position() == 0,
@@ -538,10 +519,8 @@ class OpenFileCtx {
     long cachedOffset = nextOffset.get();
     int originalCount = WriteCtx.INVALID_ORIGINAL_COUNT;
     
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("requested offset=" + offset + " and current offset="
-          + cachedOffset);
-    }
+    LOG.debug("requested offset={} and current offset={}",
+        offset, cachedOffset);
 
     // Ignore write request with range below the current offset
     if (offset + count <= cachedOffset) {
@@ -576,8 +555,8 @@ class OpenFileCtx {
     
     // Fail non-append call
     if (offset < cachedOffset) {
-      LOG.warn("(offset,count,nextOffset): " + "(" + offset + "," + count + ","
-          + nextOffset + ")");
+      LOG.warn("(offset,count,nextOffset): ({},{},{})",
+          offset, count, nextOffset);
       return null;
     } else {
       DataState dataState = offset == cachedOffset ? WriteCtx.DataState.NO_DUMP
@@ -586,10 +565,8 @@ class OpenFileCtx {
           request.getOffset(), request.getCount(), originalCount,
           request.getStableHow(), request.getData(), channel, xid, false,
           dataState);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Add new write to the list with nextOffset " + cachedOffset
-            + " and requested offset=" + offset);
-      }
+      LOG.debug("Add new write to the list with nextOffset {}" +
+          " and requested offset={}", cachedOffset, offset);
       if (writeCtx.getDataState() == WriteCtx.DataState.ALLOW_DUMP) {
         // update the memory size
         updateNonSequentialWriteInMemory(count);
@@ -598,14 +575,12 @@ class OpenFileCtx {
       WriteCtx oldWriteCtx = checkRepeatedWriteRequest(request, channel, xid);
       if (oldWriteCtx == null) {
         pendingWrites.put(new OffsetRange(offset, offset + count), writeCtx);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("New write buffered with xid " + xid + " nextOffset "
-              + cachedOffset + " req offset=" + offset + " mapsize="
-              + pendingWrites.size());
-        }
+        LOG.debug("New write buffered with xid {} nextOffset {}" +
+            "req offset={} mapsize={}",
+            xid, cachedOffset, offset, pendingWrites.size());
       } else {
-        LOG.warn("Got a repeated request, same range, with xid: " + xid
-            + " nextOffset " + +cachedOffset + " req offset=" + offset);
+        LOG.warn("Got a repeated request, same range, with xid: " +
+            "{} nextOffset {} req offset={}", xid, cachedOffset, offset);
       }
       return writeCtx;
     }
@@ -625,9 +600,7 @@ class OpenFileCtx {
       response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL, wccData, 0,
           WriteStableHow.UNSTABLE, Nfs3Constant.WRITE_COMMIT_VERF);
     } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Process perfectOverWrite");
-      }
+      LOG.debug("Process perfectOverWrite");
       // TODO: let executor handle perfect overwrite
       response = processPerfectOverWrite(dfsClient, offset, count, stableHow,
           request.getData().array(),
@@ -652,17 +625,13 @@ class OpenFileCtx {
     
     if (writeCtx.getOffset() == nextOffset.get()) {
       if (!asyncStatus) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Trigger the write back task. Current nextOffset: "
-              + nextOffset.get());
-        }
+        LOG.debug("Trigger the write back task. Current nextOffset: {}",
+            nextOffset.get());
         asyncStatus = true;
         asyncWriteBackStartOffset = writeCtx.getOffset();
         asyncDataService.execute(new AsyncDataService.WriteBackTask(this));
       } else {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("The write back thread is working.");
-        }
+        LOG.debug("The write back thread is working.");
       }
       return true;
     } else {
@@ -694,15 +663,13 @@ class OpenFileCtx {
         // responses of the previous batch. So here send response immediately
         // for unstable non-sequential write
         if (stableHow != WriteStableHow.UNSTABLE) {
-          LOG.info("Have to change stable write to unstable write: "
-              + request.getStableHow());
+          LOG.info("Have to change stable write to unstable write: {}",
+              request.getStableHow());
           stableHow = WriteStableHow.UNSTABLE;
         }
 
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("UNSTABLE write request, send response for offset: "
-              + writeCtx.getOffset());
-        }
+        LOG.debug("UNSTABLE write request, send response for offset: {}",
+            writeCtx.getOffset());
         WccData fileWcc = new WccData(preOpAttr, latestAttr);
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
             fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
@@ -738,8 +705,8 @@ class OpenFileCtx {
       LOG.info("The FSDataOutputStream has been closed. "
           + "Continue processing the perfect overwrite.");
     } catch (IOException e) {
-      LOG.info("hsync failed when processing possible perfect overwrite, path="
-          + path + " error: " + e);
+      LOG.info("hsync failed when processing possible perfect overwrite, " +
+              "path={} error: {}", path, e.toString());
       return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
           Nfs3Constant.WRITE_COMMIT_VERF);
     }
@@ -748,18 +715,18 @@ class OpenFileCtx {
       fis = dfsClient.createWrappedInputStream(dfsClient.open(path));
       readCount = fis.read(offset, readbuffer, 0, count);
       if (readCount < count) {
-        LOG.error("Can't read back " + count + " bytes, partial read size: "
-            + readCount);
+        LOG.error("Can't read back {} bytes, partial read size: {}",
+            count, readCount);
         return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
             Nfs3Constant.WRITE_COMMIT_VERF);
       }
     } catch (IOException e) {
-      LOG.info("Read failed when processing possible perfect overwrite, path="
-          + path, e);
+      LOG.info("Read failed when processing possible perfect overwrite, " +
+              "path={}", path, e);
       return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
           Nfs3Constant.WRITE_COMMIT_VERF);
     } finally {
-      IOUtils.cleanup(LOG, fis);
+      IOUtils.cleanupWithLogger(LOG, fis);
     }
 
     // Compare with the request
@@ -776,8 +743,8 @@ class OpenFileCtx {
         dfsClient.setTimes(path, Time.monotonicNow(), -1);
         postOpAttr = Nfs3Utils.getFileAttr(dfsClient, path, iug);
       } catch (IOException e) {
-        LOG.info("Got error when processing perfect overwrite, path=" + path
-            + " error: " + e);
+        LOG.info("Got error when processing perfect overwrite, path={} " +
+            "error: {}", path, e.toString());
         return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
             Nfs3Constant.WRITE_COMMIT_VERF);
       }
@@ -810,9 +777,7 @@ class OpenFileCtx {
 
     COMMIT_STATUS ret = checkCommitInternal(commitOffset, channel, xid,
         preOpAttr, fromRead);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Got commit status: " + ret.name());
-    }
+    LOG.debug("Got commit status: {}", ret.name());
     // Do the sync outside the lock
     if (ret == COMMIT_STATUS.COMMIT_DO_SYNC
         || ret == COMMIT_STATUS.COMMIT_FINISHED) {
@@ -828,7 +793,7 @@ class OpenFileCtx {
           ret = COMMIT_STATUS.COMMIT_ERROR;
         }
       } catch (IOException e) {
-        LOG.error("Got stream error during data sync: " + e);
+        LOG.error("Got stream error during data sync", e);
         // Do nothing. Stream will be closed eventually by StreamMonitor.
         // status = Nfs3Status.NFS3ERR_IO;
         ret = COMMIT_STATUS.COMMIT_ERROR;
@@ -867,9 +832,7 @@ class OpenFileCtx {
       CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid, preOpAttr);
       pendingCommits.put(commitOffset, commitCtx);
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("return COMMIT_SPECIAL_WAIT");
-    }
+    LOG.debug("return COMMIT_SPECIAL_WAIT");
     return COMMIT_STATUS.COMMIT_SPECIAL_WAIT;
   }
   
@@ -886,10 +849,8 @@ class OpenFileCtx {
     }
     
     long flushed = getFlushedOffset();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("getFlushedOffset=" + flushed + " commitOffset=" + commitOffset
-          + "nextOffset=" + nextOffset.get());
-    }
+    LOG.debug("getFlushedOffset={} commitOffset={} nextOffset={}",
+        flushed, commitOffset, nextOffset.get());
     
     if (pendingWrites.isEmpty()) {
       if (aixCompatMode) {
@@ -898,10 +859,8 @@ class OpenFileCtx {
         return COMMIT_STATUS.COMMIT_FINISHED;
       } else {
         if (flushed < nextOffset.get()) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("get commit while still writing to the requested offset,"
-                + " with empty queue");
-          }
+          LOG.debug("get commit while still writing to the requested offset,"
+              + " with empty queue");
           return handleSpecialWait(fromRead, nextOffset.get(), channel, xid,
               preOpAttr);
         } else {
@@ -920,18 +879,14 @@ class OpenFileCtx {
       if (co <= flushed) {
         return COMMIT_STATUS.COMMIT_DO_SYNC;
       } else if (co < nextOffset.get()) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("get commit while still writing to the requested offset");
-        }
+        LOG.debug("get commit while still writing to the requested offset");
         return handleSpecialWait(fromRead, co, channel, xid, preOpAttr);
       } else {
         // co >= nextOffset
         if (checkSequential(co, nextOffset.get())) {
           return handleSpecialWait(fromRead, co, channel, xid, preOpAttr);
         } else {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("return COMMIT_SPECIAL_SUCCESS");
-          }
+          LOG.debug("return COMMIT_SPECIAL_SUCCESS");
           return COMMIT_STATUS.COMMIT_SPECIAL_SUCCESS;
         }
       }
@@ -993,8 +948,8 @@ class OpenFileCtx {
     // Check the stream timeout
     if (checkStreamTimeout(streamTimeout)) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("stream can be closed for fileId: "
-            + handle.dumpFileHandle());
+        LOG.debug("stream can be closed for fileId: {}",
+            handle.dumpFileHandle());
       }
       flag = true;
     }
@@ -1009,10 +964,8 @@ class OpenFileCtx {
    */
   private synchronized WriteCtx offerNextToWrite() {
     if (pendingWrites.isEmpty()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("The async write task has no pending writes, fileId: "
-            + latestAttr.getFileId());
-      }
+      LOG.debug("The async write task has no pending writes, fileId: {}",
+          latestAttr.getFileId());
       // process pending commit again to handle this race: a commit is added
       // to pendingCommits map just after the last doSingleWrite returns.
       // There is no pending write and the commit should be handled by the
@@ -1029,49 +982,35 @@ class OpenFileCtx {
     OffsetRange range = lastEntry.getKey();
     WriteCtx toWrite = lastEntry.getValue();
 
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("range.getMin()=" + range.getMin() + " nextOffset="
-          + nextOffset);
-    }
+    LOG.trace("range.getMin()={} nextOffset={}",
+        range.getMin(), nextOffset);
 
     long offset = nextOffset.get();
     if (range.getMin() > offset) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("The next sequential write has not arrived yet");
-      }
+      LOG.debug("The next sequential write has not arrived yet");
       processCommits(nextOffset.get()); // handle race
       this.asyncStatus = false;
     } else if (range.getMax() <= offset) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Remove write " + range.toString()
-            + " which is already written from the list");
-      }
+      LOG.debug("Remove write {} which is already written from the list",
+          range);
       // remove the WriteCtx from cache
       pendingWrites.remove(range);
     } else if (range.getMin() < offset && range.getMax() > offset) {
-      LOG.warn("Got an overlapping write " + range.toString()
-          + ", nextOffset=" + offset
-          + ". Remove and trim it");
+      LOG.warn("Got an overlapping write {}, nextOffset={}. " +
+          "Remove and trim it", range, offset);
       pendingWrites.remove(range);
       trimWriteRequest(toWrite, offset);
       // update nextOffset
       nextOffset.addAndGet(toWrite.getCount());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Change nextOffset (after trim) to " + nextOffset.get());
-      }
+      LOG.debug("Change nextOffset (after trim) to {}", nextOffset.get());
       return toWrite;
     } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Remove write " + range.toString()
-            + " from the list");
-      }
+      LOG.debug("Remove write {} from the list", range);
       // after writing, remove the WriteCtx from cache
       pendingWrites.remove(range);
       // update nextOffset
       nextOffset.addAndGet(toWrite.getCount());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Change nextOffset to " + nextOffset.get());
-      }
+      LOG.debug("Change nextOffset to {}", nextOffset.get());
       return toWrite;
     }
     return null;
@@ -1095,9 +1034,9 @@ class OpenFileCtx {
         }
       }
       
-      if (!activeState && LOG.isDebugEnabled()) {
-        LOG.debug("The openFileCtx is not active anymore, fileId: "
-            + latestAttr.getFileId());
+      if (!activeState) {
+        LOG.debug("The openFileCtx is not active anymore, fileId: {}",
+            latestAttr.getFileId());
       }
     } finally {
       // Make sure to reset asyncStatus to false unless a race happens
@@ -1105,11 +1044,12 @@ class OpenFileCtx {
         if (startOffset == asyncWriteBackStartOffset) {
           asyncStatus = false;
         } else {
-          LOG.info("Another async task is already started before this one"
-              + " is finalized. fileId: " + latestAttr.getFileId()
-              + " asyncStatus: " + asyncStatus + " original startOffset: "
-              + startOffset + " new startOffset: " + asyncWriteBackStartOffset
-              + ". Won't change asyncStatus here.");
+          LOG.info("Another async task is already started before this one " +
+                  "is finalized. fileId: {} asyncStatus: {} " +
+                  "original startOffset: {} " +
+                  "new startOffset: {}. Won't change asyncStatus here.",
+              latestAttr.getFileId(), asyncStatus,
+              startOffset, asyncWriteBackStartOffset);
         }
       }
     }
@@ -1132,8 +1072,8 @@ class OpenFileCtx {
       status = Nfs3Status.NFS3_OK;
     } catch (ClosedChannelException cce) {
       if (!pendingWrites.isEmpty()) {
-        LOG.error("Can't sync for fileId: " + latestAttr.getFileId()
-            + ". Channel closed with writes pending.", cce);
+        LOG.error("Can't sync for fileId: {}. " +
+            "Channel closed with writes pending", latestAttr.getFileId(), cce);
       }
       status = Nfs3Status.NFS3ERR_IO;
     } catch (IOException e) {
@@ -1152,8 +1092,8 @@ class OpenFileCtx {
     }
 
     if (latestAttr.getSize() != offset) {
-      LOG.error("After sync, the expect file size: " + offset
-          + ", however actual file size is: " + latestAttr.getSize());
+      LOG.error("After sync, the expect file size: {}, " +
+          "however actual file size is: {}", offset, latestAttr.getSize());
       status = Nfs3Status.NFS3ERR_IO;
     }
     WccData wccData = new WccData(Nfs3Utils.getWccAttr(latestAttr), latestAttr);
@@ -1170,11 +1110,11 @@ class OpenFileCtx {
       Nfs3Utils.writeChannelCommit(commit.getChannel(), response
           .serialize(new XDR(), commit.getXid(),
               new VerifierNone()), commit.getXid());
-      
+
       if (LOG.isDebugEnabled()) {
-        LOG.debug("FileId: " + latestAttr.getFileId() + " Service time: "
-            + Nfs3Utils.getElapsedTime(commit.startTime)
-            + "ns. Sent response for commit: " + commit);
+        LOG.debug("FileId: {} Service time: {}ns. " +
+                "Sent response for commit: {}", latestAttr.getFileId(),
+            Nfs3Utils.getElapsedTime(commit.startTime), commit);
       }
       entry = pendingCommits.firstEntry();
     }
@@ -1190,8 +1130,8 @@ class OpenFileCtx {
     
     FileHandle handle = writeCtx.getHandle();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("do write, fileHandle " + handle.dumpFileHandle() + " offset: "
-          + offset + " length: " + count + " stableHow: " + stableHow.name());
+      LOG.debug("do write, fileHandle {} offset: {} length: {} stableHow: {}",
+          handle.dumpFileHandle(), offset, count, stableHow.name());
     }
 
     try {
@@ -1215,10 +1155,10 @@ class OpenFileCtx {
             writeCtx.setDataState(WriteCtx.DataState.NO_DUMP);
             updateNonSequentialWriteInMemory(-count);
             if (LOG.isDebugEnabled()) {
-              LOG.debug("After writing " + handle.dumpFileHandle()
-                  + " at offset " + offset
-                  + ", updated the memory count, new value: "
-                  + nonSequentialWriteInMemory.get());
+              LOG.debug("After writing {} at offset {}, " +
+                      "updated the memory count, new value: {}",
+                  handle.dumpFileHandle(), offset,
+                  nonSequentialWriteInMemory.get());
             }
           }
         }
@@ -1226,7 +1166,7 @@ class OpenFileCtx {
       
       if (!writeCtx.getReplied()) {
         if (stableHow != WriteStableHow.UNSTABLE) {
-          LOG.info("Do sync for stable write: " + writeCtx);
+          LOG.info("Do sync for stable write: {}", writeCtx);
           try {
             if (stableHow == WriteStableHow.DATA_SYNC) {
               fos.hsync();
@@ -1237,7 +1177,7 @@ class OpenFileCtx {
               fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
             }
           } catch (IOException e) {
-            LOG.error("hsync failed with writeCtx: " + writeCtx, e);
+            LOG.error("hsync failed with writeCtx: {}", writeCtx, e);
             throw e;
           }
         }
@@ -1245,8 +1185,8 @@ class OpenFileCtx {
         WccAttr preOpAttr = latestAttr.getWccAttr();
         WccData fileWcc = new WccData(preOpAttr, latestAttr);
         if (writeCtx.getOriginalCount() != WriteCtx.INVALID_ORIGINAL_COUNT) {
-          LOG.warn("Return original count: " + writeCtx.getOriginalCount()
-              + " instead of real data count: " + count);
+          LOG.warn("Return original count: {} instead of real data count: {}",
+              writeCtx.getOriginalCount(), count);
           count = writeCtx.getOriginalCount();
         }
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
@@ -1260,8 +1200,8 @@ class OpenFileCtx {
       processCommits(writeCtx.getOffset() + writeCtx.getCount());
      
     } catch (IOException e) {
-      LOG.error("Error writing to fileHandle " + handle.dumpFileHandle()
-          + " at offset " + offset + " and length " + count, e);
+      LOG.error("Error writing to fileHandle {} at offset {} and length {}",
+          handle.dumpFileHandle(), offset, count, e);
       if (!writeCtx.getReplied()) {
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO);
         Nfs3Utils.writeChannel(channel, response.serialize(
@@ -1269,8 +1209,8 @@ class OpenFileCtx {
         // Keep stream open. Either client retries or SteamMonitor closes it.
       }
 
-      LOG.info("Clean up open file context for fileId: "
-          + latestAttr.getFileId());
+      LOG.info("Clean up open file context for fileId: {}",
+          latestAttr.getFileId());
       cleanup();
     }
   }
@@ -1297,17 +1237,16 @@ class OpenFileCtx {
         fos.close();
       }
     } catch (IOException e) {
-      LOG.info("Can't close stream for fileId: " + latestAttr.getFileId()
-          + ", error: " + e);
+      LOG.info("Can't close stream for fileId: {}, error: {}",
+          latestAttr.getFileId(), e.toString());
     }
     
     // Reply error for pending writes
-    LOG.info("There are " + pendingWrites.size() + " pending writes.");
+    LOG.info("There are {} pending writes.", pendingWrites.size());
     WccAttr preOpAttr = latestAttr.getWccAttr();
     while (!pendingWrites.isEmpty()) {
       OffsetRange key = pendingWrites.firstKey();
-      LOG.info("Fail pending write: " + key.toString()
-          + ", nextOffset=" + nextOffset.get());
+      LOG.info("Fail pending write: {}, nextOffset={}", key, nextOffset.get());
       
       WriteCtx writeCtx = pendingWrites.remove(key);
       if (!writeCtx.getReplied()) {
@@ -1325,11 +1264,12 @@ class OpenFileCtx {
       try {
         dumpOut.close();
       } catch (IOException e) {
-        LOG.error("Failed to close outputstream of dump file" + dumpFilePath, e);
+        LOG.error("Failed to close outputstream of dump file {}",
+            dumpFilePath, e);
       }
       File dumpFile = new File(dumpFilePath);
       if (dumpFile.exists() && !dumpFile.delete()) {
-        LOG.error("Failed to delete dumpfile: " + dumpFile);
+        LOG.error("Failed to delete dumpfile: {}", dumpFile);
       }
     }
     if (raf != null) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. Contributed by Peter Bacsko

Posted by ae...@apache.org.
MAPREDUCE-7052. TestFixedLengthInputFormat#testFormatCompressedIn is flaky. Contributed by Peter Bacsko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a53d62ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a53d62ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a53d62ab

Branch: refs/heads/HDFS-7240
Commit: a53d62ab26e170a0338f93e228718da52e9196e4
Parents: da59acd
Author: Jason Lowe <jl...@apache.org>
Authored: Thu Feb 15 15:12:57 2018 -0600
Committer: Jason Lowe <jl...@apache.org>
Committed: Thu Feb 15 15:12:57 2018 -0600

----------------------------------------------------------------------
 .../java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a53d62ab/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
index 4864dd0..5134729 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java
@@ -301,7 +301,7 @@ public class TestFixedLengthInputFormat {
       if (i > 0) {
         if (i == (MAX_TESTS-1)) {
           // Test a split size that is less than record len
-          numSplits = (int)(fileSize/Math.floor(recordLength/2));
+          numSplits = (int)(fileSize/ Math.max(1, Math.floor(recordLength/2)));
         } else {
           if (MAX_TESTS % i == 0) {
             // Let us create a split size that is forced to be 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: YARN-7739. DefaultAMSProcessor should properly check customized resource types against minimum/maximum allocation. (wangda)

Posted by ae...@apache.org.
YARN-7739. DefaultAMSProcessor should properly check customized resource types against minimum/maximum allocation. (wangda)

Change-Id: I10cc9341237d9a2fc0f8c855efb98a36b91389e2


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d02e42ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d02e42ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d02e42ce

Branch: refs/heads/HDFS-7240
Commit: d02e42cee4a08a47ed2835f7a4a100daaa95833f
Parents: d4c9857
Author: Wangda Tan <wa...@apache.org>
Authored: Mon Feb 12 10:29:37 2018 +0800
Committer: Wangda Tan <wa...@apache.org>
Committed: Mon Feb 12 10:29:37 2018 +0800

----------------------------------------------------------------------
 .../scheduler/SchedulerUtils.java               |  36 ++--
 .../TestApplicationMasterService.java           | 190 +++++++++++++++++++
 .../scheduler/capacity/TestUtils.java           |   8 +-
 3 files changed, 214 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d02e42ce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
index 32f5824..0080a29 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.InvalidLabelResourceRequestException;
@@ -51,6 +52,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
 /**
@@ -276,23 +278,23 @@ public class SchedulerUtils {
       throw new InvalidResourceRequestException(ye);
     }
 
-    if (resReq.getCapability().getMemorySize() < 0 ||
-        resReq.getCapability().getMemorySize() > maximumResource.getMemorySize()) {
-      throw new InvalidResourceRequestException("Invalid resource request"
-          + ", requested memory < 0"
-          + ", or requested memory > max configured"
-          + ", requestedMemory=" + resReq.getCapability().getMemorySize()
-          + ", maxMemory=" + maximumResource.getMemorySize());
-    }
-    if (resReq.getCapability().getVirtualCores() < 0 ||
-        resReq.getCapability().getVirtualCores() >
-        maximumResource.getVirtualCores()) {
-      throw new InvalidResourceRequestException("Invalid resource request"
-          + ", requested virtual cores < 0"
-          + ", or requested virtual cores > max configured"
-          + ", requestedVirtualCores="
-          + resReq.getCapability().getVirtualCores()
-          + ", maxVirtualCores=" + maximumResource.getVirtualCores());
+    Resource requestedResource = resReq.getCapability();
+    for (int i = 0; i < ResourceUtils.getNumberOfKnownResourceTypes(); i++) {
+      ResourceInformation reqRI = requestedResource.getResourceInformation(i);
+      ResourceInformation maxRI = maximumResource.getResourceInformation(i);
+      if (reqRI.getValue() < 0 || reqRI.getValue() > maxRI.getValue()) {
+        throw new InvalidResourceRequestException(
+            "Invalid resource request, requested resource type=[" + reqRI
+                .getName()
+                + "] < 0 or greater than maximum allowed allocation. Requested "
+                + "resource=" + requestedResource
+                + ", maximum allowed allocation=" + maximumResource
+                + ", please note that maximum allowed allocation is calculated "
+                + "by scheduler based on maximum resource of registered "
+                + "NodeManagers, which might be less than configured "
+                + "maximum allocation=" + ResourceUtils
+                .getResourceTypesMaximumAllocation());
+      }
     }
     String labelExp = resReq.getNodeLabelExpression();
     // we don't allow specify label expression other than resourceName=ANY now

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d02e42ce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
index 421ddbc..ceaf236 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
@@ -19,6 +19,8 @@
 package org.apache.hadoop.yarn.server.resourcemanager;
 
 import static java.lang.Thread.sleep;
+import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB;
+import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -29,6 +31,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import com.google.common.collect.ImmutableMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -41,6 +44,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRespons
 import org.apache.hadoop.yarn.api.protocolrecords
     .RegisterApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateRequestPBImpl;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Container;
@@ -48,27 +52,39 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.ProfileCapability;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
 import org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException;
+import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.MockResourceProfileManager;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceProfilesManager;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.TestResourceProfiles;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.Assert;
 import org.junit.Before;
@@ -665,6 +681,180 @@ public class TestApplicationMasterService {
     rm.stop();
   }
 
+  @Test(timeout = 300000)
+  public void testCSValidateRequestCapacityAgainstMinMaxAllocation()
+      throws Exception {
+    testValidateRequestCapacityAgainstMinMaxAllocation(CapacityScheduler.class);
+  }
+
+  @Test(timeout = 300000)
+  public void testFSValidateRequestCapacityAgainstMinMaxAllocation()
+      throws Exception {
+    testValidateRequestCapacityAgainstMinMaxAllocation(FairScheduler.class);
+  }
+
+  private void testValidateRequestCapacityAgainstMinMaxAllocation(Class<?> schedulerCls)
+      throws Exception {
+
+    // Initialize resource map for 2 types.
+    Map<String, ResourceInformation> riMap = new HashMap<>();
+
+    // Initialize mandatory resources
+    ResourceInformation memory = ResourceInformation.newInstance(
+        ResourceInformation.MEMORY_MB.getName(),
+        ResourceInformation.MEMORY_MB.getUnits(),
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
+        DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
+    ResourceInformation vcores = ResourceInformation.newInstance(
+        ResourceInformation.VCORES.getName(),
+        ResourceInformation.VCORES.getUnits(),
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
+        DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
+    riMap.put(ResourceInformation.MEMORY_URI, memory);
+    riMap.put(ResourceInformation.VCORES_URI, vcores);
+
+    ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
+
+    CapacitySchedulerConfiguration csconf =
+        new CapacitySchedulerConfiguration();
+    csconf.setResourceComparator(DominantResourceCalculator.class);
+
+    YarnConfiguration conf = new YarnConfiguration(csconf);
+    // Don't reset resource types since we have already configured resource
+    // types
+    conf.setBoolean(TestResourceProfiles.TEST_CONF_RESET_RESOURCE_TYPES, false);
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, schedulerCls,
+        ResourceScheduler.class);
+    conf.setBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, false);
+
+    MockRM rm = new MockRM(conf);
+    rm.start();
+
+    MockNM nm1 = rm.registerNode("199.99.99.1:1234", TestUtils
+        .createResource(DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
+            DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, null));
+
+    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
+
+    // Now request resource, memory > allowed
+    boolean exception = false;
+    try {
+      am1.allocate(Arrays.asList(ResourceRequest.newBuilder().capability(
+          Resource.newInstance(9 * GB, 1)).numContainers(1).resourceName("*")
+          .build()), null);
+    } catch (InvalidResourceRequestException e) {
+      exception = true;
+    }
+    Assert.assertTrue(exception);
+
+    exception = false;
+    try {
+      // Now request resource, vcore > allowed
+      am1.allocate(Arrays.asList(ResourceRequest.newBuilder().capability(
+          Resource.newInstance(8 * GB, 18)).numContainers(1).resourceName("*")
+          .build()), null);
+    } catch (InvalidResourceRequestException e) {
+      exception = true;
+    }
+    Assert.assertTrue(exception);
+
+    rm.close();
+  }
+
+  @Test(timeout = 300000)
+  public void testValidateRequestCapacityAgainstMinMaxAllocationFor3rdResourceTypes()
+      throws Exception {
+
+    // Initialize resource map for 2 types.
+    Map<String, ResourceInformation> riMap = new HashMap<>();
+
+    // Initialize mandatory resources
+    ResourceInformation memory = ResourceInformation.newInstance(
+        ResourceInformation.MEMORY_MB.getName(),
+        ResourceInformation.MEMORY_MB.getUnits(),
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
+        DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
+    ResourceInformation vcores = ResourceInformation.newInstance(
+        ResourceInformation.VCORES.getName(),
+        ResourceInformation.VCORES.getUnits(),
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
+        DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
+    ResourceInformation res_1 = ResourceInformation.newInstance("res_1",
+        ResourceInformation.VCORES.getUnits(), 0, 4);
+    riMap.put(ResourceInformation.MEMORY_URI, memory);
+    riMap.put(ResourceInformation.VCORES_URI, vcores);
+    riMap.put("res_1", res_1);
+
+    ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
+
+    CapacitySchedulerConfiguration csconf =
+        new CapacitySchedulerConfiguration();
+    csconf.setResourceComparator(DominantResourceCalculator.class);
+
+    YarnConfiguration conf = new YarnConfiguration(csconf);
+    // Don't reset resource types since we have already configured resource
+    // types
+    conf.setBoolean(TestResourceProfiles.TEST_CONF_RESET_RESOURCE_TYPES, false);
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+    conf.setBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, false);
+
+    MockRM rm = new MockRM(conf);
+    rm.start();
+
+    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+    LeafQueue leafQueue = (LeafQueue) cs.getQueue("default");
+
+    MockNM nm1 = rm.registerNode("199.99.99.1:1234", TestUtils
+        .createResource(DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
+            DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
+            ImmutableMap.of("res_1", 4)));
+
+    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
+
+    Assert.assertEquals(Resource.newInstance(1 * GB, 1),
+        leafQueue.getUsedResources());
+
+    // Now request resource, memory > allowed
+    boolean exception = false;
+    try {
+      am1.allocate(Arrays.asList(ResourceRequest.newBuilder().capability(
+          TestUtils.createResource(9 * GB, 1, ImmutableMap.of("res_1", 1)))
+          .numContainers(1).resourceName("*").build()), null);
+    } catch (InvalidResourceRequestException e) {
+      exception = true;
+    }
+    Assert.assertTrue(exception);
+
+    exception = false;
+    try {
+      // Now request resource, vcore > allowed
+      am1.allocate(Arrays.asList(ResourceRequest.newBuilder().capability(
+          TestUtils.createResource(8 * GB, 18, ImmutableMap.of("res_1", 1)))
+          .numContainers(1).resourceName("*")
+          .build()), null);
+    } catch (InvalidResourceRequestException e) {
+      exception = true;
+    }
+    Assert.assertTrue(exception);
+
+    exception = false;
+    try {
+      // Now request resource, res_1 > allowed
+      am1.allocate(Arrays.asList(ResourceRequest.newBuilder().capability(
+          TestUtils.createResource(8 * GB, 1, ImmutableMap.of("res_1", 100)))
+          .numContainers(1).resourceName("*")
+          .build()), null);
+    } catch (InvalidResourceRequestException e) {
+      exception = true;
+    }
+    Assert.assertTrue(exception);
+
+    rm.close();
+  }
+
   private void sentRMContainerLaunched(MockRM rm, ContainerId containerId) {
     CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
     RMContainer rmContainer = cs.getRMContainer(containerId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d02e42ce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
index 542ba3e..7180e24 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
@@ -478,9 +478,11 @@ public class TestUtils {
   public static Resource createResource(long memory, int vcores,
       Map<String, Integer> nameToValues) {
     Resource res = Resource.newInstance(memory, vcores);
-    for (Map.Entry<String, Integer> entry : nameToValues.entrySet()) {
-      res.setResourceInformation(entry.getKey(), ResourceInformation
-          .newInstance(entry.getKey(), "", entry.getValue()));
+    if (nameToValues != null) {
+      for (Map.Entry<String, Integer> entry : nameToValues.entrySet()) {
+        res.setResourceInformation(entry.getKey(), ResourceInformation
+            .newInstance(entry.getKey(), "", entry.getValue()));
+      }
     }
     return res;
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: xattr api cleanup

Posted by ae...@apache.org.
xattr api cleanup


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da59acd8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da59acd8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da59acd8

Branch: refs/heads/HDFS-7240
Commit: da59acd8ca9ab5b49b988ffca64e8cce91c5f741
Parents: 481d79f
Author: Kihwal Lee <ki...@apache.org>
Authored: Thu Feb 15 11:11:55 2018 -0600
Committer: Kihwal Lee <ki...@apache.org>
Committed: Thu Feb 15 11:11:55 2018 -0600

----------------------------------------------------------------------
 .../hdfs/server/namenode/FSDirXAttrOp.java      |  3 +-
 .../hdfs/server/namenode/FSXAttrBaseTest.java   | 63 ++++++++++++++++----
 2 files changed, 51 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da59acd8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 3223467..be3092c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -137,8 +137,7 @@ class FSDirXAttrOp {
     final boolean isRawPath = FSDirectory.isReservedRawName(src);
     final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);
     if (fsd.isPermissionEnabled()) {
-      /* To access xattr names, you need EXECUTE in the owning directory. */
-      fsd.checkParentAccess(pc, iip, FsAction.EXECUTE);
+      fsd.checkPathAccess(pc, iip, FsAction.READ);
     }
     final List<XAttr> all = FSDirXAttrOp.getXAttrs(fsd, iip);
     return XAttrPermissionFilter.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da59acd8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
index 43eeadf..b5f7573 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
@@ -843,28 +843,37 @@ public class FSXAttrBaseTest {
     }
 
     /*
-     * Check that execute/scan access to the parent dir is sufficient to get
-     * xattr names.
+     * Check that execute/scan access to the parent dir is not
+     * sufficient to get xattr names.
      */
     fs.setPermission(path, new FsPermission((short) 0701));
     user.doAs(new PrivilegedExceptionAction<Object>() {
         @Override
         public Object run() throws Exception {
+        try {
           final FileSystem userFs = dfsCluster.getFileSystem();
           userFs.listXAttrs(childDir);
-          return null;
+          fail("expected AccessControlException");
+        } catch (AccessControlException ace) {
+          GenericTestUtils.assertExceptionContains("Permission denied", ace);
         }
+        return null;
+      }
       });
 
     /*
      * Test that xattrs in the "trusted" namespace are filtered correctly.
      */
+    // Allow the user to read child path.
+    fs.setPermission(childDir, new FsPermission((short) 0704));
     fs.setXAttr(childDir, "trusted.myxattr", "1234".getBytes());
     user.doAs(new PrivilegedExceptionAction<Object>() {
         @Override
         public Object run() throws Exception {
           final FileSystem userFs = dfsCluster.getFileSystem();
-          assertTrue(userFs.listXAttrs(childDir).size() == 1);
+          List<String> xattrs = userFs.listXAttrs(childDir);
+          assertTrue(xattrs.size() == 1);
+          assertEquals(name1, xattrs.get(0));
           return null;
         }
       });
@@ -1109,20 +1118,48 @@ public class FSXAttrBaseTest {
             }
 
             /*
-            * Test that only user who have parent directory execute access
-            *  can see raw.* xattrs returned from listXAttr
+            * Test that user who have parent directory execute access
+            *  can also not see raw.* xattrs returned from listXAttr
             */
-            // non-raw path
-            final List<String> xattrNames = userFs.listXAttrs(path);
-            assertTrue(xattrNames.size() == 0);
+            try {
+              // non-raw path
+              userFs.listXAttrs(path);
+              fail("listXAttr should have thrown AccessControlException");
+            } catch (AccessControlException ace) {
+              // expected
+            }
 
-            // raw path
-            List<String> rawXattrs = userFs.listXAttrs(rawPath);
-            assertTrue(rawXattrs.size() == 1);
-            assertTrue(rawXattrs.get(0).equals(raw1));
+            try {
+              // raw path
+              userFs.listXAttrs(rawPath);
+              fail("listXAttr should have thrown AccessControlException");
+            } catch (AccessControlException ace) {
+              // expected
+            }
             return null;
           }
         });
+      /*
+        Test user who have read access can list xattrs in "raw.*" namespace
+       */
+      fs.setPermission(path, new FsPermission((short) 0751));
+      final Path childDir = new Path(path, "child" + pathCount);
+      FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short)
+          0704));
+      final Path rawChildDir =
+          new Path("/.reserved/raw" + childDir.toString());
+      fs.setXAttr(rawChildDir, raw1, value1);
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final FileSystem userFs = dfsCluster.getFileSystem();
+          // raw path
+          List<String> xattrs = userFs.listXAttrs(rawChildDir);
+          assertEquals(1, xattrs.size());
+          assertEquals(raw1, xattrs.get(0));
+          return null;
+        }
+      });
       fs.removeXAttr(rawPath, raw1);
     }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: YARN-7813: Capacity Scheduler Intra-queue Preemption should be configurable for each queue

Posted by ae...@apache.org.
YARN-7813: Capacity Scheduler Intra-queue Preemption should be configurable for each queue


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5e6e3de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5e6e3de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5e6e3de

Branch: refs/heads/HDFS-7240
Commit: c5e6e3de1c31eda052f89eddd7bba288625936b9
Parents: 0c5d7d7
Author: Eric Payne <ep...@apache.org>
Authored: Tue Feb 13 10:11:02 2018 -0600
Committer: Eric Payne <ep...@apache.org>
Committed: Tue Feb 13 10:11:02 2018 -0600

----------------------------------------------------------------------
 .../hadoop/yarn/api/records/QueueInfo.java      | 35 +++++++
 .../src/main/proto/yarn_protos.proto            |  1 +
 .../apache/hadoop/yarn/client/cli/QueueCLI.java |  6 ++
 .../hadoop/yarn/client/ProtocolHATestBase.java  |  3 +-
 .../hadoop/yarn/client/cli/TestYarnCLI.java     | 96 ++++++++++++++++++--
 .../api/records/impl/pb/QueueInfoPBImpl.java    | 13 +++
 .../hadoop/yarn/api/TestPBImplRecords.java      |  2 +-
 .../capacity/IntraQueueCandidatesSelector.java  |  4 +-
 .../scheduler/capacity/AbstractCSQueue.java     | 72 +++++++++++++--
 .../scheduler/capacity/CSQueue.java             | 16 +++-
 .../CapacitySchedulerConfiguration.java         | 15 +++
 .../webapp/CapacitySchedulerPage.java           |  5 +-
 .../dao/CapacitySchedulerLeafQueueInfo.java     |  6 ++
 .../TestConfigurationMutationACLPolicies.java   |  2 +-
 .../TestSchedulerApplicationAttempt.java        |  2 +-
 .../scheduler/capacity/TestLeafQueue.java       |  2 +-
 .../src/site/markdown/CapacityScheduler.md      |  3 +-
 17 files changed, 257 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e6e3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
index 897b442..57ea9bf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
@@ -94,6 +94,26 @@ public abstract class QueueInfo {
     return queueInfo;
   }
 
+  @Private
+  @Unstable
+  public static QueueInfo newInstance(String queueName, float capacity,
+      float maximumCapacity, float currentCapacity,
+      List<QueueInfo> childQueues, List<ApplicationReport> applications,
+      QueueState queueState, Set<String> accessibleNodeLabels,
+      String defaultNodeLabelExpression, QueueStatistics queueStatistics,
+      boolean preemptionDisabled,
+      Map<String, QueueConfigurations> queueConfigurations,
+      boolean intraQueuePreemptionDisabled) {
+    QueueInfo queueInfo = QueueInfo.newInstance(queueName, capacity,
+        maximumCapacity, currentCapacity,
+        childQueues, applications,
+        queueState, accessibleNodeLabels,
+        defaultNodeLabelExpression, queueStatistics,
+        preemptionDisabled, queueConfigurations);
+    queueInfo.setIntraQueuePreemptionDisabled(intraQueuePreemptionDisabled);
+    return queueInfo;
+  }
+
   /**
    * Get the <em>name</em> of the queue.
    * @return <em>name</em> of the queue
@@ -261,4 +281,19 @@ public abstract class QueueInfo {
   @Unstable
   public abstract void setQueueConfigurations(
       Map<String, QueueConfigurations> queueConfigurations);
+
+
+  /**
+   * Get the intra-queue preemption status of the queue.
+   * @return if property is not in proto, return null;
+   *        otherwise, return intra-queue preemption status of the queue
+   */
+  @Public
+  @Stable
+  public abstract Boolean getIntraQueuePreemptionDisabled();
+
+  @Private
+  @Unstable
+  public abstract void setIntraQueuePreemptionDisabled(
+      boolean intraQueuePreemptionDisabled);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e6e3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 25c8569..b978761 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -569,6 +569,7 @@ message QueueInfoProto {
   optional QueueStatisticsProto queueStatistics = 10;
   optional bool preemptionDisabled = 11;
   repeated QueueConfigurationsMapProto queueConfigurationsMap = 12;
+  optional bool intraQueuePreemptionDisabled = 13;
 }
 
 message QueueConfigurationsProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e6e3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java
index 330b081..2c3dfd0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java
@@ -158,5 +158,11 @@ public class QueueCLI extends YarnCLI {
       writer.print("\tPreemption : ");
       writer.println(preemptStatus ? "disabled" : "enabled");
     }
+
+    Boolean intraQueuePreemption = queueInfo.getIntraQueuePreemptionDisabled();
+    if (intraQueuePreemption != null) {
+      writer.print("\tIntra-queue Preemption : ");
+      writer.println(intraQueuePreemption ? "disabled" : "enabled");
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e6e3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
index 54537ce..7937b15 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
@@ -665,7 +665,8 @@ public abstract class ProtocolHATestBase extends ClientBaseWithFixes {
 
     public QueueInfo createFakeQueueInfo() {
       return QueueInfo.newInstance("root", 100f, 100f, 50f, null,
-          createFakeAppReports(), QueueState.RUNNING, null, null, null, false);
+          createFakeAppReports(), QueueState.RUNNING, null, null, null, false,
+          null, false);
     }
 
     public List<QueueUserACLInfo> createFakeQueueUserACLInfoList() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e6e3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
index fdd3fc8..1f6488d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
@@ -1712,7 +1712,8 @@ public class TestYarnCLI {
     nodeLabels.add("GPU");
     nodeLabels.add("JDK_7");
     QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f,
-        null, null, QueueState.RUNNING, nodeLabels, "GPU", null, false, null);
+        null, null, QueueState.RUNNING, nodeLabels, "GPU", null, false, null,
+        false);
     when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo);
     int result = cli.run(new String[] { "-status", "queueA" });
     assertEquals(0, result);
@@ -1728,13 +1729,14 @@ public class TestYarnCLI {
     pw.println("\tDefault Node Label expression : " + "GPU");
     pw.println("\tAccessible Node Labels : " + "JDK_7,GPU");
     pw.println("\tPreemption : " + "enabled");
+    pw.println("\tIntra-queue Preemption : " + "enabled");
     pw.close();
     String queueInfoStr = baos.toString("UTF-8");
     Assert.assertEquals(queueInfoStr, sysOutStream.toString());
   }
 
   @Test
-  public void testGetQueueInfoPreemptionEnabled() throws Exception {
+  public void testGetQueueInfoOverrideIntraQueuePreemption() throws Exception {
     CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
     ReservationSystemTestUtil.setupQueueConfiguration(conf);
     conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
@@ -1743,9 +1745,80 @@ public class TestYarnCLI {
     conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
         "org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity."
         + "ProportionalCapacityPreemptionPolicy");
+    // Turn on cluster-wide intra-queue preemption
+    conf.setBoolean(
+        CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ENABLED, true);
+    // Disable intra-queue preemption for all queues
+    conf.setBoolean(CapacitySchedulerConfiguration.PREFIX
+        + "root.intra-queue-preemption.disable_preemption", true);
+    // Enable intra-queue preemption for the a1 queue
+    conf.setBoolean(CapacitySchedulerConfiguration.PREFIX
+        + "root.a.a1.intra-queue-preemption.disable_preemption", false);
+    MiniYARNCluster cluster =
+        new MiniYARNCluster("testGetQueueInfoOverrideIntraQueuePreemption",
+            2, 1, 1);
+
+    YarnClient yarnClient = null;
+    try {
+      cluster.init(conf);
+      cluster.start();
+      final Configuration yarnConf = cluster.getConfig();
+      yarnClient = YarnClient.createYarnClient();
+      yarnClient.init(yarnConf);
+      yarnClient.start();
+
+      QueueCLI cli = new QueueCLI();
+      cli.setClient(yarnClient);
+      cli.setSysOutPrintStream(sysOut);
+      cli.setSysErrPrintStream(sysErr);
+      sysOutStream.reset();
+      // Get status for the root.a queue
+      int result = cli.run(new String[] { "-status", "a" });
+      assertEquals(0, result);
+      String queueStatusOut = sysOutStream.toString();
+      Assert.assertTrue(queueStatusOut
+          .contains("\tPreemption : enabled"));
+      // In-queue preemption is disabled at the "root.a" queue level
+      Assert.assertTrue(queueStatusOut
+          .contains("Intra-queue Preemption : disabled"));
+      cli = new QueueCLI();
+      cli.setClient(yarnClient);
+      cli.setSysOutPrintStream(sysOut);
+      cli.setSysErrPrintStream(sysErr);
+      sysOutStream.reset();
+      // Get status for the root.a.a1 queue
+      result = cli.run(new String[] { "-status", "a1" });
+      assertEquals(0, result);
+      queueStatusOut = sysOutStream.toString();
+      Assert.assertTrue(queueStatusOut
+          .contains("\tPreemption : enabled"));
+      // In-queue preemption is enabled at the "root.a.a1" queue level
+      Assert.assertTrue(queueStatusOut
+          .contains("Intra-queue Preemption : enabled"));
+    } finally {
+      // clean-up
+      if (yarnClient != null) {
+        yarnClient.stop();
+      }
+      cluster.stop();
+      cluster.close();
+    }
+  }
+
+  @Test
+  public void testGetQueueInfoPreemptionEnabled() throws Exception {
+    CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
+    ReservationSystemTestUtil.setupQueueConfiguration(conf);
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
     conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
+    conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
+        "org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity."
+        + "ProportionalCapacityPreemptionPolicy");
+    conf.setBoolean(
+        CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ENABLED, true);
     MiniYARNCluster cluster =
-        new MiniYARNCluster("testReservationAPIs", 2, 1, 1);
+        new MiniYARNCluster("testGetQueueInfoPreemptionEnabled", 2, 1, 1);
 
     YarnClient yarnClient = null;
     try {
@@ -1763,8 +1836,11 @@ public class TestYarnCLI {
       sysOutStream.reset();
       int result = cli.run(new String[] { "-status", "a1" });
       assertEquals(0, result);
-      Assert.assertTrue(sysOutStream.toString()
-          .contains("Preemption : enabled"));
+      String queueStatusOut = sysOutStream.toString();
+      Assert.assertTrue(queueStatusOut
+          .contains("\tPreemption : enabled"));
+      Assert.assertTrue(queueStatusOut
+          .contains("Intra-queue Preemption : enabled"));
     } finally {
       // clean-up
       if (yarnClient != null) {
@@ -1804,8 +1880,11 @@ public class TestYarnCLI {
       sysOutStream.reset();
       int result = cli.run(new String[] { "-status", "a1" });
       assertEquals(0, result);
-      Assert.assertTrue(sysOutStream.toString()
-          .contains("Preemption : disabled"));
+      String queueStatusOut = sysOutStream.toString();
+      Assert.assertTrue(queueStatusOut
+          .contains("\tPreemption : disabled"));
+      Assert.assertTrue(queueStatusOut
+          .contains("Intra-queue Preemption : disabled"));
     }
   }
   
@@ -1813,7 +1892,7 @@ public class TestYarnCLI {
   public void testGetQueueInfoWithEmptyNodeLabel() throws Exception {
     QueueCLI cli = createAndGetQueueCLI();
     QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f,
-        null, null, QueueState.RUNNING, null, null, null, true, null);
+        null, null, QueueState.RUNNING, null, null, null, true, null, true);
     when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo);
     int result = cli.run(new String[] { "-status", "queueA" });
     assertEquals(0, result);
@@ -1830,6 +1909,7 @@ public class TestYarnCLI {
         + NodeLabel.DEFAULT_NODE_LABEL_PARTITION);
     pw.println("\tAccessible Node Labels : ");
     pw.println("\tPreemption : " + "disabled");
+    pw.println("\tIntra-queue Preemption : " + "disabled");
     pw.close();
     String queueInfoStr = baos.toString("UTF-8");
     Assert.assertEquals(queueInfoStr, sysOutStream.toString());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e6e3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java
index 1d2a6dd..f735139 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java
@@ -500,4 +500,17 @@ public class QueueInfoPBImpl extends QueueInfo {
     this.queueConfigurations.putAll(queueConfigurations);
   }
 
+  @Override
+  public Boolean getIntraQueuePreemptionDisabled() {
+    QueueInfoProtoOrBuilder p = viaProto ? proto : builder;
+    return (p.hasIntraQueuePreemptionDisabled()) ? p
+        .getIntraQueuePreemptionDisabled() : null;
+  }
+
+  @Override
+  public void setIntraQueuePreemptionDisabled(
+      boolean intraQueuePreemptionDisabled) {
+    maybeInitBuilder();
+    builder.setIntraQueuePreemptionDisabled(intraQueuePreemptionDisabled);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e6e3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
index ae80910..8c41906 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
@@ -414,7 +414,7 @@ public class TestPBImplRecords extends BasePBImplRecordsTest {
     // it is recursive(has sub queues)
     typeValueCache.put(QueueInfo.class, QueueInfo.newInstance("root", 1.0f,
         1.0f, 0.1f, null, null, QueueState.RUNNING, ImmutableSet.of("x", "y"),
-        "x && y", null, false));
+        "x && y", null, false, null, false));
     generateByNewInstance(QueueStatistics.class);
     generateByNewInstance(QueueUserACLInfo.class);
     generateByNewInstance(YarnClusterMetrics.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e6e3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
index 44fa736..5b6932e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
@@ -114,8 +114,8 @@ public class IntraQueueCandidatesSelector extends PreemptionCandidatesSelector {
           continue;
         }
 
-        // Don't preempt if disabled for this queue.
-        if (leafQueue.getPreemptionDisabled()) {
+        // Don't preempt if intra-queue preemption is disabled for this queue.
+        if (leafQueue.getIntraQueuePreemptionDisabled()) {
           continue;
         }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e6e3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 9afbdd5..b29ee29 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -97,6 +97,9 @@ public abstract class AbstractCSQueue implements CSQueue {
       new HashMap<AccessType, AccessControlList>();
   volatile boolean reservationsContinueLooking;
   private volatile boolean preemptionDisabled;
+  // Indicates if the in-queue preemption setting is ever disabled within the
+  // hierarchy of this queue.
+  private boolean intraQueuePreemptionDisabledInHierarchy;
 
   // Track resource usage-by-label like used-resource/pending-resource, etc.
   volatile ResourceUsage queueUsage;
@@ -405,6 +408,8 @@ public abstract class AbstractCSQueue implements CSQueue {
 
       this.preemptionDisabled = isQueueHierarchyPreemptionDisabled(this,
           configuration);
+      this.intraQueuePreemptionDisabledInHierarchy =
+          isIntraQueueHierarchyPreemptionDisabled(this, configuration);
 
       this.priority = configuration.getQueuePriority(
           getQueuePath());
@@ -613,6 +618,8 @@ public abstract class AbstractCSQueue implements CSQueue {
     queueInfo.setCurrentCapacity(getUsedCapacity());
     queueInfo.setQueueStatistics(getQueueStatistics());
     queueInfo.setPreemptionDisabled(preemptionDisabled);
+    queueInfo.setIntraQueuePreemptionDisabled(
+        getIntraQueuePreemptionDisabled());
     queueInfo.setQueueConfigurations(getQueueConfigurations());
     return queueInfo;
   }
@@ -735,6 +742,16 @@ public abstract class AbstractCSQueue implements CSQueue {
   public boolean getPreemptionDisabled() {
     return preemptionDisabled;
   }
+
+  @Private
+  public boolean getIntraQueuePreemptionDisabled() {
+    return intraQueuePreemptionDisabledInHierarchy || preemptionDisabled;
+  }
+
+  @Private
+  public boolean getIntraQueuePreemptionDisabledInHierarchy() {
+    return intraQueuePreemptionDisabledInHierarchy;
+  }
   
   @Private
   public QueueCapacities getQueueCapacities() {
@@ -757,17 +774,19 @@ public abstract class AbstractCSQueue implements CSQueue {
   }
 
   /**
-   * The specified queue is preemptable if system-wide preemption is turned on
-   * unless any queue in the <em>qPath</em> hierarchy has explicitly turned
-   * preemption off.
-   * NOTE: Preemptability is inherited from a queue's parent.
-   * 
-   * @return true if queue has preemption disabled, false otherwise
+   * The specified queue is cross-queue preemptable if system-wide cross-queue
+   * preemption is turned on unless any queue in the <em>qPath</em> hierarchy
+   * has explicitly turned cross-queue preemption off.
+   * NOTE: Cross-queue preemptability is inherited from a queue's parent.
+   *
+   * @param q queue to check preemption state
+   * @param configuration capacity scheduler config
+   * @return true if queue has cross-queue preemption disabled, false otherwise
    */
   private boolean isQueueHierarchyPreemptionDisabled(CSQueue q,
       CapacitySchedulerConfiguration configuration) {
     boolean systemWidePreemption =
-        csContext.getConfiguration()
+        configuration
             .getBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS,
                        YarnConfiguration.DEFAULT_RM_SCHEDULER_ENABLE_MONITORS);
     CSQueue parentQ = q.getParent();
@@ -790,7 +809,44 @@ public abstract class AbstractCSQueue implements CSQueue {
     return configuration.getPreemptionDisabled(q.getQueuePath(),
                                         parentQ.getPreemptionDisabled());
   }
-  
+
+  /**
+   * The specified queue is intra-queue preemptable if
+   * 1) system-wide intra-queue preemption is turned on
+   * 2) no queue in the <em>qPath</em> hierarchy has explicitly turned off intra
+   *    queue preemption.
+   * NOTE: Intra-queue preemptability is inherited from a queue's parent.
+   *
+   * @param q queue to check intra-queue preemption state
+   * @param configuration capacity scheduler config
+   * @return true if queue has intra-queue preemption disabled, false otherwise
+   */
+  private boolean isIntraQueueHierarchyPreemptionDisabled(CSQueue q,
+      CapacitySchedulerConfiguration configuration) {
+    boolean systemWideIntraQueuePreemption =
+        configuration.getBoolean(
+            CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ENABLED,
+            CapacitySchedulerConfiguration
+            .DEFAULT_INTRAQUEUE_PREEMPTION_ENABLED);
+    // Intra-queue preemption is disabled for this queue if the system-wide
+    // intra-queue preemption flag is false
+    if (!systemWideIntraQueuePreemption) return true;
+
+    // Check if this is the root queue and the root queue's intra-queue
+    // preemption disable switch is set
+    CSQueue parentQ = q.getParent();
+    if (parentQ == null) {
+      return configuration
+          .getIntraQueuePreemptionDisabled(q.getQueuePath(), false);
+    }
+
+    // At this point, the master preemption switch is enabled down to this
+    // queue's level. Determine whether or not intra-queue preemption is enabled
+    // down to this queu's level and return that value.
+    return configuration.getIntraQueuePreemptionDisabled(q.getQueuePath(),
+        parentQ.getIntraQueuePreemptionDisabledInHierarchy());
+  }
+
   private Resource getCurrentLimitResource(String nodePartition,
       Resource clusterResource, ResourceLimits currentResourceLimits,
       SchedulingMode schedulingMode) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e6e3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
index 5dd307c..3963dc0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
@@ -276,7 +276,21 @@ public interface CSQueue extends SchedulerQueue<CSQueue> {
    * @return true if <em>disable_preemption</em> is set, false if not
    */
   public boolean getPreemptionDisabled();
-  
+
+  /**
+   * Check whether intra-queue preemption is disabled for this queue
+   * @return true if either intra-queue preemption or inter-queue preemption
+   * is disabled for this queue, false if neither is disabled.
+   */
+  public boolean getIntraQueuePreemptionDisabled();
+
+  /**
+   * Determines whether or not the intra-queue preemption disabled switch is set
+   *  at any level in this queue's hierarchy.
+   * @return state of the intra-queue preemption switch at this queue level
+   */
+  public boolean getIntraQueuePreemptionDisabledInHierarchy();
+
   /**
    * Get QueueCapacities of this queue
    * @return queueCapacities

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e6e3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 00733a1..a9cf714 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -1216,6 +1216,21 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
   }
 
   /**
+   * Indicates whether intra-queue preemption is disabled on the specified queue
+   *
+   * @param queue queue path to query
+   * @param defaultVal used as default if the property is not set in the
+   * configuration
+   * @return true if preemption is disabled on queue, false otherwise
+   */
+  public boolean getIntraQueuePreemptionDisabled(String queue,
+      boolean defaultVal) {
+    return
+        getBoolean(getQueuePrefix(queue) + INTRA_QUEUE_PREEMPTION_CONFIG_PREFIX
+            + QUEUE_PREEMPTION_DISABLED, defaultVal);
+  }
+
+  /**
    * Get configured node labels in a given queuePath
    */
   public Set<String> getConfiguredNodeLabels(String queuePath) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e6e3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index 7f025a7..ed2f64e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -200,7 +200,10 @@ class CapacitySchedulerPage extends RmView {
           __("Configured User Limit Factor:", lqinfo.getUserLimitFactor()).
           __("Accessible Node Labels:", StringUtils.join(",", lqinfo.getNodeLabels())).
           __("Ordering Policy: ", lqinfo.getOrderingPolicyInfo()).
-          __("Preemption:", lqinfo.getPreemptionDisabled() ? "disabled" : "enabled").
+          __("Preemption:",
+              lqinfo.getPreemptionDisabled() ? "disabled" : "enabled").
+          __("Intra-queue Preemption:", lqinfo.getIntraQueuePreemptionDisabled()
+                  ? "disabled" : "enabled").
           __("Default Node Label Expression:",
               lqinfo.getDefaultNodeLabelExpression() == null
                   ? NodeLabel.DEFAULT_NODE_LABEL_PARTITION

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e6e3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
index b5f4e79..a53e921 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
@@ -49,6 +49,7 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
   protected ResourceInfo usedAMResource;
   protected ResourceInfo userAMResourceLimit;
   protected boolean preemptionDisabled;
+  protected boolean intraQueuePreemptionDisabled;
   protected String defaultNodeLabelExpression;
   protected int defaultPriority;
   protected boolean isAutoCreatedLeafQueue;
@@ -72,6 +73,7 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
     AMResourceLimit = new ResourceInfo(q.getAMResourceLimit());
     usedAMResource = new ResourceInfo(q.getQueueResourceUsage().getAMUsed());
     preemptionDisabled = q.getPreemptionDisabled();
+    intraQueuePreemptionDisabled = q.getIntraQueuePreemptionDisabled();
     orderingPolicyInfo = q.getOrderingPolicy().getInfo();
     defaultNodeLabelExpression = q.getDefaultNodeLabelExpression();
     defaultPriority = q.getDefaultApplicationPriority().getPriority();
@@ -150,6 +152,10 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
   public boolean getPreemptionDisabled() {
     return preemptionDisabled;
   }
+
+  public boolean getIntraQueuePreemptionDisabled() {
+    return intraQueuePreemptionDisabled;
+  }
   
   public String getOrderingPolicyInfo() {
     return orderingPolicyInfo;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e6e3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
index 398e909..9a23c1d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
@@ -67,7 +67,7 @@ public class TestConfigurationMutationACLPolicies {
   private void mockQueue(String queueName, MutableConfScheduler scheduler)
       throws IOException {
     QueueInfo queueInfo = QueueInfo.newInstance(queueName, 0, 0, 0, null, null,
-        null, null, null, null, false);
+        null, null, null, null, false, null, false);
     when(scheduler.getQueueInfo(eq(queueName), anyBoolean(), anyBoolean()))
         .thenReturn(queueInfo);
     Queue queue = mock(Queue.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e6e3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
index fa16eff..17f9d23 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
@@ -165,7 +165,7 @@ public class TestSchedulerApplicationAttempt {
   private Queue createQueue(String name, Queue parent, float capacity) {
     QueueMetrics metrics = QueueMetrics.forQueue(name, parent, false, conf);
     QueueInfo queueInfo = QueueInfo.newInstance(name, capacity, 1.0f, 0, null,
-        null, QueueState.RUNNING, null, "", null, false);
+        null, QueueState.RUNNING, null, "", null, false, null, false);
     ActiveUsersManager activeUsersManager = new ActiveUsersManager(metrics);
     Queue queue = mock(Queue.class);
     when(queue.getMetrics()).thenReturn(metrics);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e6e3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index c45bdb4..04bb791 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -4103,7 +4103,7 @@ public class TestLeafQueue {
       float absCap, Resource res) {
     CSQueueMetrics metrics = CSQueueMetrics.forQueue(name, parent, false, cs.getConf());
     QueueInfo queueInfo = QueueInfo.newInstance(name, capacity, 1.0f, 0, null,
-        null, QueueState.RUNNING, null, "", null, false);
+        null, QueueState.RUNNING, null, "", null, false, null, false);
     ActiveUsersManager activeUsersManager = new ActiveUsersManager(metrics);
     AbstractCSQueue queue = mock(AbstractCSQueue.class);
     when(queue.getMetrics()).thenReturn(metrics);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5e6e3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index 87cfd39..4ecc97a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -236,6 +236,7 @@ The following configuration parameters can be configured in yarn-site.xml to con
 | Property | Description |
 |:---- |:---- |
 | `yarn.scheduler.capacity.<queue-path>.disable_preemption` | This configuration can be set to `true` to selectively disable preemption of application containers submitted to a given queue. This property applies only when system wide preemption is enabled by configuring `yarn.resourcemanager.scheduler.monitor.enable` to *true* and `yarn.resourcemanager.scheduler.monitor.policies` to *ProportionalCapacityPreemptionPolicy*. If this property is not set for a queue, then the property value is inherited from the queue's parent. Default value is false.
+| `yarn.scheduler.capacity.<queue-path>.intra-queue-preemption.disable_preemption` | This configuration can be set to *true* to selectively disable intra-queue preemption of application containers submitted to a given queue. This property applies only when system wide preemption is enabled by configuring `yarn.resourcemanager.scheduler.monitor.enable` to *true*, `yarn.resourcemanager.scheduler.monitor.policies` to *ProportionalCapacityPreemptionPolicy*, and `yarn.resourcemanager.monitor.capacity.preemption.intra-queue-preemption.enabled` to *true*. If this property is not set for a queue, then the property value is inherited from the queue's parent. Default value is *false*.
 
 ###Reservation Properties
 
@@ -477,4 +478,4 @@ Updating a Container (Experimental - API may change in the future)
   
   The **DECREASE_RESOURCE** and **DEMOTE_EXECUTION_TYPE** container updates are automatic - the AM does not explicitly have to ask the NM to decrease the resources of the container. The other update types require the AM to explicitly ask the NM to update the container.
   
-  If the **yarn.resourcemanager.auto-update.containers** configuration parameter is set to **true** (false by default), The RM will ensure that all container updates are automatic.  
\ No newline at end of file
+  If the **yarn.resourcemanager.auto-update.containers** configuration parameter is set to **true** (false by default), The RM will ensure that all container updates are automatic.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: HADOOP-15214. Make Hadoop compatible with Guava 21.0. Contributed by Igor Dvorzhak

Posted by ae...@apache.org.
HADOOP-15214. Make Hadoop compatible with Guava 21.0.
Contributed by Igor Dvorzhak


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/996796f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/996796f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/996796f1

Branch: refs/heads/HDFS-7240
Commit: 996796f1048369e0f307f935ba01af64cc751a85
Parents: 8faf0b5
Author: Steve Loughran <st...@apache.org>
Authored: Thu Feb 8 10:55:54 2018 -0800
Committer: Steve Loughran <st...@apache.org>
Committed: Thu Feb 8 10:55:54 2018 -0800

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/util/RunJar.java             | 2 +-
 .../main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java   | 4 ++--
 .../apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java  | 3 +--
 3 files changed, 4 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/996796f1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
index 0ae9e47..9dd770c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
@@ -38,12 +38,12 @@ import java.util.jar.JarInputStream;
 import java.util.jar.Manifest;
 import java.util.regex.Pattern;
 
-import com.google.common.io.NullOutputStream;
 import org.apache.commons.io.input.TeeInputStream;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.IOUtils.NullOutputStream;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/996796f1/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
index dfc6872..b6b42544 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.crypto.key.kms.server;
 
 import com.google.common.base.Preconditions;
-import com.google.common.base.Stopwatch;
 import org.apache.hadoop.util.KMSUtil;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -32,6 +31,7 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
+import org.apache.hadoop.util.StopWatch;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -555,7 +555,7 @@ public class KMS {
       throws Exception {
     LOG.trace("Entering reencryptEncryptedKeys method.");
     try {
-      final Stopwatch sw = new Stopwatch().start();
+      final StopWatch sw = new StopWatch().start();
       checkNotEmpty(name, "name");
       checkNotNull(jsonPayload, "jsonPayload");
       final UserGroupInformation user = HttpUserGroupInformation.get();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/996796f1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
index 01c2038..65de397 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import com.google.common.base.Stopwatch;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -672,7 +671,7 @@ public class ReencryptionHandler implements Runnable {
       if (batch.isEmpty()) {
         return new ReencryptionTask(zoneNodeId, 0, batch);
       }
-      final Stopwatch kmsSW = new Stopwatch().start();
+      final StopWatch kmsSW = new StopWatch().start();
 
       int numFailures = 0;
       String result = "Completed";


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: Revert "HADOOP-12897. KerberosAuthenticator.authenticate to include URL on IO failures. Contributed by Ajay Kumar."

Posted by ae...@apache.org.
Revert "HADOOP-12897. KerberosAuthenticator.authenticate to include URL on IO failures. Contributed by Ajay Kumar."

This reverts commit 332269de065d0f40eb54ee5e53b765217c24081e.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f20f432
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f20f432
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f20f432

Branch: refs/heads/HDFS-7240
Commit: 1f20f432d2472f92797ea01711ca4cc97e7b2b23
Parents: f20dc0d
Author: Xiao Chen <xi...@apache.org>
Authored: Wed Feb 14 10:22:37 2018 -0800
Committer: Xiao Chen <xi...@apache.org>
Committed: Wed Feb 14 10:25:05 2018 -0800

----------------------------------------------------------------------
 .../client/KerberosAuthenticator.java           | 80 +++++++-------------
 .../client/TestKerberosAuthenticator.java       | 29 -------
 2 files changed, 27 insertions(+), 82 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f20f432/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
index 64d4330..942d13c 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
@@ -13,8 +13,6 @@
  */
 package org.apache.hadoop.security.authentication.client;
 
-import com.google.common.annotations.VisibleForTesting;
-import java.lang.reflect.Constructor;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.security.authentication.server.HttpConstants;
 import org.apache.hadoop.security.authentication.util.AuthToken;
@@ -179,62 +177,38 @@ public class KerberosAuthenticator implements Authenticator {
    */
   @Override
   public void authenticate(URL url, AuthenticatedURL.Token token)
-      throws IOException, AuthenticationException {
+    throws IOException, AuthenticationException {
     if (!token.isSet()) {
       this.url = url;
       base64 = new Base64(0);
-      try {
-        HttpURLConnection conn = token.openConnection(url, connConfigurator);
-        conn.setRequestMethod(AUTH_HTTP_METHOD);
-        conn.connect();
-
-        boolean needFallback = false;
-        if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
-          LOG.debug("JDK performed authentication on our behalf.");
-          // If the JDK already did the SPNEGO back-and-forth for
-          // us, just pull out the token.
-          AuthenticatedURL.extractToken(conn, token);
-          if (isTokenKerberos(token)) {
-            return;
-          }
-          needFallback = true;
+      HttpURLConnection conn = token.openConnection(url, connConfigurator);
+      conn.setRequestMethod(AUTH_HTTP_METHOD);
+      conn.connect();
+      
+      boolean needFallback = false;
+      if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
+        LOG.debug("JDK performed authentication on our behalf.");
+        // If the JDK already did the SPNEGO back-and-forth for
+        // us, just pull out the token.
+        AuthenticatedURL.extractToken(conn, token);
+        if (isTokenKerberos(token)) {
+          return;
         }
-        if (!needFallback && isNegotiate(conn)) {
-          LOG.debug("Performing our own SPNEGO sequence.");
-          doSpnegoSequence(token);
-        } else {
-          LOG.debug("Using fallback authenticator sequence.");
-          Authenticator auth = getFallBackAuthenticator();
-          // Make sure that the fall back authenticator have the same
-          // ConnectionConfigurator, since the method might be overridden.
-          // Otherwise the fall back authenticator might not have the
-          // information to make the connection (e.g., SSL certificates)
-          auth.setConnectionConfigurator(connConfigurator);
-          auth.authenticate(url, token);
-        }
-      } catch (IOException ex){
-        throw wrapExceptionWithMessage(ex,
-            "Error while authenticating with endpoint: " + url);
-      } catch (AuthenticationException ex){
-        throw wrapExceptionWithMessage(ex,
-            "Error while authenticating with endpoint: " + url);
+        needFallback = true;
+      }
+      if (!needFallback && isNegotiate(conn)) {
+        LOG.debug("Performing our own SPNEGO sequence.");
+        doSpnegoSequence(token);
+      } else {
+        LOG.debug("Using fallback authenticator sequence.");
+        Authenticator auth = getFallBackAuthenticator();
+        // Make sure that the fall back authenticator have the same
+        // ConnectionConfigurator, since the method might be overridden.
+        // Otherwise the fall back authenticator might not have the information
+        // to make the connection (e.g., SSL certificates)
+        auth.setConnectionConfigurator(connConfigurator);
+        auth.authenticate(url, token);
       }
-    }
-  }
-
-  @VisibleForTesting
-   static <T extends Exception> T wrapExceptionWithMessage(
-      T exception, String msg) {
-    Class<? extends Throwable> exceptionClass = exception.getClass();
-    try {
-      Constructor<? extends Throwable> ctor = exceptionClass
-          .getConstructor(String.class);
-      Throwable t = ctor.newInstance(msg);
-      return (T) (t.initCause(exception));
-    } catch (Throwable e) {
-      LOG.debug("Unable to wrap exception of type {}, it has "
-          + "no (String) constructor.", exceptionClass, e);
-      return exception;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f20f432/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
index 4aabb34..7db53ba 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
@@ -20,9 +20,6 @@ import static org.apache.hadoop.security.authentication.server.KerberosAuthentic
 import static org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler.KEYTAB;
 import static org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler.NAME_RULES;
 
-import java.io.IOException;
-import java.nio.charset.CharacterCodingException;
-import javax.security.sasl.AuthenticationException;
 import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
 import org.apache.hadoop.security.authentication.KerberosTestUtils;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
@@ -221,30 +218,4 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
     });
   }
 
-  @Test(timeout = 60000)
-  public void testWrapExceptionWithMessage() {
-    IOException ex;
-    ex = new IOException("Induced exception");
-    ex = KerberosAuthenticator.wrapExceptionWithMessage(ex, "Error while "
-        + "authenticating with endpoint: localhost");
-    Assert.assertEquals("Induced exception", ex.getCause().getMessage());
-    Assert.assertEquals("Error while authenticating with endpoint: localhost",
-        ex.getMessage());
-
-    ex = new AuthenticationException("Auth exception");
-    ex = KerberosAuthenticator.wrapExceptionWithMessage(ex, "Error while "
-        + "authenticating with endpoint: localhost");
-    Assert.assertEquals("Auth exception", ex.getCause().getMessage());
-    Assert.assertEquals("Error while authenticating with endpoint: localhost",
-        ex.getMessage());
-
-    // Test for Exception with  no (String) constructor
-    // redirect the LOG to and check log message
-    ex = new CharacterCodingException();
-    Exception ex2 = KerberosAuthenticator.wrapExceptionWithMessage(ex,
-        "Error while authenticating with endpoint: localhost");
-    Assert.assertTrue(ex instanceof CharacterCodingException);
-    Assert.assertTrue(ex.equals(ex2));
-  }
-
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

Posted by ae...@apache.org.
Merge branch 'trunk' into HDFS-7240

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

Added the following code in:
hadoop/ozone/container/common/impl/ContainerManagerImpl.java
  @Override
  public void readLockInterruptibly() throws InterruptedException {
    this.lock.readLock().lockInterruptibly();
  }

and Manually updated  the value of version in
modified: hadoop-tools/hadoop-ozone/pom.xml
to
<version>3.2.0-SNAPSHOT</version>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47919787
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47919787
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47919787

Branch: refs/heads/HDFS-7240
Commit: 479197872ba89159ec2160fbdda92a1665362b5d
Parents: fc84744 4747395
Author: Anu Engineer <ae...@apache.org>
Authored: Thu Feb 15 15:28:08 2018 -0800
Committer: Anu Engineer <ae...@apache.org>
Committed: Thu Feb 15 15:28:08 2018 -0800

----------------------------------------------------------------------
 hadoop-assemblies/pom.xml                       |   4 +-
 hadoop-build-tools/pom.xml                      |   2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml |   4 +-
 .../hadoop-client-check-invariants/pom.xml      |   4 +-
 .../hadoop-client-check-test-invariants/pom.xml |   4 +-
 .../hadoop-client-integration-tests/pom.xml     |   4 +-
 .../hadoop-client-minicluster/pom.xml           |   4 +-
 .../hadoop-client-runtime/pom.xml               |   4 +-
 hadoop-client-modules/hadoop-client/pom.xml     |   4 +-
 hadoop-client-modules/pom.xml                   |   2 +-
 .../hadoop-cloud-storage/pom.xml                |   4 +-
 hadoop-cloud-storage-project/pom.xml            |   4 +-
 .../hadoop-annotations/pom.xml                  |   4 +-
 .../hadoop-auth-examples/pom.xml                |   4 +-
 hadoop-common-project/hadoop-auth/pom.xml       |  14 +-
 .../client/AuthenticatorTestCase.java           |  51 +-
 .../client/TestKerberosAuthenticator.java       |  41 +-
 hadoop-common-project/hadoop-common/pom.xml     |   4 +-
 .../hadoop-common/src/main/bin/hadoop           |   4 +
 .../hadoop-common/src/main/bin/hadoop.cmd       |   7 +-
 .../org/apache/hadoop/conf/Configuration.java   |  80 ++
 .../org/apache/hadoop/conf/StorageSize.java     | 106 +++
 .../org/apache/hadoop/conf/StorageUnit.java     | 530 +++++++++++
 .../hadoop/fs/CommonConfigurationKeys.java      |   2 +-
 .../java/org/apache/hadoop/fs/FileUtil.java     | 257 +++++-
 .../org/apache/hadoop/fs/LocalFileSystem.java   |   2 +-
 .../apache/hadoop/ha/ActiveStandbyElector.java  |  30 +-
 .../apache/hadoop/ha/FailoverController.java    |  20 +-
 .../org/apache/hadoop/ha/HealthMonitor.java     |   9 +-
 .../org/apache/hadoop/http/HttpServer2.java     |   2 +-
 .../org/apache/hadoop/io/retry/RetryUtils.java  |  11 +-
 .../main/java/org/apache/hadoop/net/DNS.java    |  39 +-
 .../AbstractDelegationTokenSecretManager.java   |   6 +
 .../apache/hadoop/service/AbstractService.java  |  27 +-
 .../hadoop/service/ServiceOperations.java       |   6 +-
 .../org/apache/hadoop/util/CombinedIPList.java  |  59 ++
 .../hadoop/util/GenericOptionsParser.java       |   8 +-
 .../apache/hadoop/util/JsonSerialization.java   |   8 +
 .../java/org/apache/hadoop/util/RunJar.java     |  69 ++
 .../src/main/resources/core-default.xml         |  13 +-
 .../src/site/markdown/CommandsManual.md         |   6 +
 .../src/site/markdown/SecureMode.md             |  32 +-
 .../apache/hadoop/conf/TestConfiguration.java   |  76 ++
 .../org/apache/hadoop/conf/TestStorageUnit.java | 277 ++++++
 .../hadoop/fs/TestDelegateToFileSystem.java     |   2 +-
 .../java/org/apache/hadoop/fs/TestFileUtil.java |  86 ++
 .../apache/hadoop/ha/TestSshFenceByTcpPort.java |   2 +-
 .../hadoop/service/TestServiceOperations.java   |   3 +-
 .../org/apache/hadoop/test/LambdaTestUtils.java |  40 +-
 .../apache/hadoop/test/TestLambdaTestUtils.java |  36 +
 .../java/org/apache/hadoop/util/TestRunJar.java |  57 ++
 .../src/test/scripts/start-build-env.bats       | 102 +++
 hadoop-common-project/hadoop-kms/pom.xml        |   4 +-
 .../hadoop/crypto/key/kms/server/KMS.java       |   4 +-
 hadoop-common-project/hadoop-minikdc/pom.xml    |   4 +-
 hadoop-common-project/hadoop-nfs/pom.xml        |   4 +-
 hadoop-common-project/pom.xml                   |   4 +-
 hadoop-dist/pom.xml                             |   4 +-
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |   4 +-
 .../org/apache/hadoop/hdfs/ClientContext.java   |   3 +-
 .../hadoop/hdfs/DFSStripedOutputStream.java     |   4 +-
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |   6 +-
 .../hdfs/client/HdfsClientConfigKeys.java       |   5 +-
 .../hdfs/client/impl/BlockReaderFactory.java    |  56 +-
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |  19 +-
 .../sasl/SaslDataTransferClient.java            |  12 +-
 .../hdfs/shortcircuit/DomainSocketFactory.java  |  17 +-
 .../hdfs/shortcircuit/ShortCircuitCache.java    |  16 +-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java      |  11 +-
 .../ha/TestRequestHedgingProxyProvider.java     |   6 +-
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml  |   4 +-
 .../hadoop-hdfs-native-client/pom.xml           |   4 +-
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml     |   4 +-
 .../hadoop/hdfs/nfs/nfs3/DFSClientCache.java    |  25 +-
 .../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java       | 314 +++----
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java    | 370 ++++----
 hadoop-hdfs-project/hadoop-hdfs/pom.xml         |   4 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   9 +-
 .../java/org/apache/hadoop/hdfs/HAUtil.java     |   9 +-
 .../BlackListBasedTrustedChannelResolver.java   | 143 +++
 .../protocol/datatransfer/package-info.java     |  24 +
 ...uterAdminProtocolServerSideTranslatorPB.java |  60 ++
 .../RouterAdminProtocolTranslatorPB.java        |  60 +-
 .../protocol/InterQJournalProtocol.java         |   4 +-
 ...rQJournalProtocolServerSideTranslatorPB.java |  11 +-
 .../InterQJournalProtocolTranslatorPB.java      |  13 +-
 .../hadoop/hdfs/qjournal/server/JNStorage.java  |   9 +-
 .../hdfs/qjournal/server/JournalNode.java       | 129 ++-
 .../qjournal/server/JournalNodeRpcServer.java   |  21 +-
 .../hdfs/qjournal/server/JournalNodeSyncer.java |   8 +-
 .../DelegationTokenSecretManager.java           |  53 +-
 .../BlockReconstructionWork.java                |   6 +
 .../blockmanagement/ErasureCodingWork.java      |   3 +-
 .../blockmanagement/LowRedundancyBlocks.java    |   2 +-
 .../server/blockmanagement/ReplicationWork.java |   6 +-
 .../server/blockmanagement/SlowDiskTracker.java |   2 +-
 .../hadoop/hdfs/server/common/Storage.java      |  75 +-
 .../hdfs/server/datanode/BlockPoolManager.java  |  15 +-
 .../server/datanode/BlockPoolSliceStorage.java  |  88 +-
 .../hdfs/server/datanode/BlockSender.java       |  56 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   | 213 ++---
 .../hdfs/server/datanode/DataStorage.java       | 112 ++-
 .../hdfs/server/datanode/DataXceiver.java       | 172 ++--
 .../hdfs/server/datanode/FinalizedReplica.java  |  74 +-
 .../hdfs/server/datanode/ReplicaBuilder.java    |  11 +-
 .../datanode/checker/ThrottledAsyncChecker.java |   2 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 100 ++-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  21 +-
 .../federation/metrics/FederationMBean.java     |   6 +
 .../federation/metrics/FederationMetrics.java   |  71 +-
 .../federation/router/FederationUtil.java       |   8 +-
 .../federation/router/RouterAdminServer.java    |  46 +-
 .../server/federation/router/RouterClient.java  |   4 +
 .../federation/router/RouterStateManager.java   |  50 ++
 .../store/protocol/EnterSafeModeRequest.java    |  32 +
 .../store/protocol/EnterSafeModeResponse.java   |  50 ++
 .../store/protocol/GetSafeModeRequest.java      |  31 +
 .../store/protocol/GetSafeModeResponse.java     |  49 ++
 .../store/protocol/LeaveSafeModeRequest.java    |  32 +
 .../store/protocol/LeaveSafeModeResponse.java   |  50 ++
 .../impl/pb/EnterSafeModeRequestPBImpl.java     |  62 ++
 .../impl/pb/EnterSafeModeResponsePBImpl.java    |  73 ++
 .../impl/pb/GetSafeModeRequestPBImpl.java       |  62 ++
 .../impl/pb/GetSafeModeResponsePBImpl.java      |  73 ++
 .../impl/pb/LeaveSafeModeRequestPBImpl.java     |  62 ++
 .../impl/pb/LeaveSafeModeResponsePBImpl.java    |  73 ++
 .../federation/store/records/RouterState.java   |   8 +-
 .../records/impl/pb/RouterStatePBImpl.java      |  12 +-
 .../hdfs/server/namenode/FSDirConcatOp.java     |   4 +-
 .../server/namenode/FSDirErasureCodingOp.java   |  19 +-
 .../hdfs/server/namenode/FSDirXAttrOp.java      |   3 +-
 .../hdfs/server/namenode/FSNamesystem.java      |  21 +-
 .../hdfs/server/namenode/FSNamesystemLock.java  |   7 +
 .../hdfs/server/namenode/INodeDirectory.java    |   5 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   3 +-
 .../hdfs/server/namenode/LeaseManager.java      |  13 +-
 .../hadoop/hdfs/server/namenode/NNStorage.java  |  61 +-
 .../server/namenode/ReencryptionHandler.java    |   3 +-
 .../server/namenode/ha/StandbyCheckpointer.java |  34 +-
 .../snapshot/AbstractINodeDiffList.java         |  27 +-
 .../hdfs/server/namenode/snapshot/DiffList.java | 140 +++
 .../namenode/snapshot/DiffListByArrayList.java  |  80 ++
 .../snapshot/DirectorySnapshottableFeature.java |  12 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  |  10 +-
 .../snapshot/FSImageFormatPBSnapshot.java       |   7 +-
 .../server/namenode/snapshot/FileDiffList.java  |  11 +-
 .../snapshot/FileWithSnapshotFeature.java       |   2 +-
 .../snapshot/SnapshotFSImageFormat.java         |   4 +-
 .../namenode/snapshot/SnapshotManager.java      |  12 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 192 ++--
 .../hdfs/tools/federation/RouterAdmin.java      |  75 +-
 .../org/apache/hadoop/hdfs/util/RwLock.java     |   5 +-
 .../common/impl/ContainerManagerImpl.java       |   5 +
 .../src/main/proto/FederationProtocol.proto     |  27 +-
 .../src/main/proto/InterQJournalProtocol.proto  |  16 +-
 .../src/main/proto/RouterProtocol.proto         |  15 +
 .../src/main/resources/hdfs-default.xml         |  33 +-
 .../main/webapps/router/federationhealth.html   |  41 +
 .../src/main/webapps/router/federationhealth.js |  26 +
 .../src/site/markdown/HDFSCommands.md           |   2 +
 .../markdown/HDFSHighAvailabilityWithNFS.md     |   6 +-
 .../markdown/HDFSHighAvailabilityWithQJM.md     |   6 +-
 .../src/site/markdown/HDFSRouterFederation.md   |  10 +-
 .../hadoop-hdfs/src/site/markdown/HdfsDesign.md |   2 +-
 .../hadoop-hdfs/src/site/markdown/ViewFs.md     |  32 +-
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |  25 +-
 .../hadoop/hdfs/TestDFSClientFailover.java      |   2 +-
 .../org/apache/hadoop/hdfs/TestDFSUtil.java     |  28 +-
 .../hdfs/TestErasureCodingExerciseAPIs.java     | 546 ++++++++++++
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  34 +
 .../TestErasureCodingPolicyWithSnapshot.java    |  84 +-
 .../java/org/apache/hadoop/hdfs/TestQuota.java  |   2 +-
 .../hadoop/hdfs/TestSnapshotCommands.java       |  18 +
 .../client/impl/TestBlockReaderFactory.java     | 144 ++-
 ...estBlackListBasedTrustedChannelResolver.java |  89 ++
 .../datatransfer/sasl/TestSaslDataTransfer.java | 154 +++-
 .../hdfs/qjournal/server/TestJournalNode.java   | 148 +++-
 .../blockmanagement/TestBlockTokenWithDFS.java  |   2 +-
 .../TestBlockTokenWithDFSStriped.java           |   2 +-
 .../TestLowRedundancyBlockQueues.java           |  15 +
 .../server/datanode/TestBlockPoolManager.java   |  12 +-
 .../datanode/TestBlockPoolSliceStorage.java     |  14 +-
 .../fsdataset/impl/TestFsDatasetImpl.java       | 103 ++-
 .../server/federation/RouterConfigBuilder.java  |   6 +
 .../metrics/TestFederationMetrics.java          |  50 ++
 .../federation/metrics/TestMetricsBase.java     |  45 +
 .../federation/router/TestRouterAdminCLI.java   |  48 +
 .../store/FederationStateStoreTestUtils.java    |  17 +-
 .../store/TestStateStoreRouterState.java        |   3 +-
 .../store/records/TestRouterState.java          |   6 +-
 .../hadoop/hdfs/server/mover/TestMover.java     | 209 +++++
 .../hdfs/server/namenode/FSXAttrBaseTest.java   |  63 +-
 .../hdfs/server/namenode/TestAllowFormat.java   |   4 +-
 .../namenode/TestFSImageWithSnapshot.java       |   3 +-
 .../namenode/TestListCorruptFileBlocks.java     |   4 +-
 .../namenode/TestSecurityTokenEditLog.java      |  24 +-
 .../namenode/snapshot/TestNestedSnapshots.java  |   8 +-
 .../snapshot/TestRenameWithSnapshots.java       |  40 +-
 .../snapshot/TestSetQuotaWithSnapshot.java      |   3 +-
 .../server/namenode/snapshot/TestSnapshot.java  |   4 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 126 +++
 .../namenode/snapshot/TestSnapshotRename.java   |   3 +-
 .../hadoop/hdfs/tools/TestDFSAdminWithHA.java   | 464 +++++++++-
 .../apache/hadoop/hdfs/tools/TestGetConf.java   |   6 +-
 .../hadoop/test/MiniDFSClusterManager.java      |  26 +-
 hadoop-hdfs-project/pom.xml                     |   4 +-
 .../hadoop-mapreduce-client-app/pom.xml         |   4 +-
 .../v2/app/rm/TestRMContainerAllocator.java     |  15 +-
 .../hadoop-mapreduce-client-common/pom.xml      |   4 +-
 .../hadoop-mapreduce-client-core/pom.xml        |   4 +-
 .../java/org/apache/hadoop/mapred/MapTask.java  |  12 +
 .../java/org/apache/hadoop/mapred/Task.java     |  16 +-
 .../org/apache/hadoop/mapred/TestMapTask.java   |  87 ++
 .../java/org/apache/hadoop/mapred/TestTask.java |  89 ++
 .../hadoop-mapreduce-client-hs-plugins/pom.xml  |   4 +-
 .../hadoop-mapreduce-client-hs/pom.xml          |   4 +-
 .../src/test/resources/job_0.23.9-FAILED.jhist  |   2 +-
 .../hadoop-mapreduce-client-jobclient/pom.xml   |   4 +-
 .../mapred/TestFixedLengthInputFormat.java      |   2 +-
 .../hadoop-mapreduce-client-nativetask/pom.xml  |   4 +-
 .../hadoop-mapreduce-client-shuffle/pom.xml     |   4 +-
 .../hadoop-mapreduce-client-uploader/pom.xml    |   4 +-
 .../hadoop-mapreduce-client/pom.xml             |   4 +-
 .../hadoop-mapreduce-examples/pom.xml           |   4 +-
 hadoop-mapreduce-project/pom.xml                |   4 +-
 hadoop-maven-plugins/pom.xml                    |   2 +-
 hadoop-minicluster/pom.xml                      |   4 +-
 hadoop-project-dist/pom.xml                     |   4 +-
 hadoop-project/pom.xml                          |  16 +-
 hadoop-tools/hadoop-aliyun/pom.xml              |   2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml        |   4 +-
 hadoop-tools/hadoop-archives/pom.xml            |   4 +-
 hadoop-tools/hadoop-aws/pom.xml                 |   4 +-
 .../fs/s3a/AssumedRoleCredentialProvider.java   | 197 -----
 .../org/apache/hadoop/fs/s3a/Constants.java     |   2 +-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  17 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  53 +-
 .../s3a/auth/AssumedRoleCredentialProvider.java | 205 +++++
 .../apache/hadoop/fs/s3a/auth/RoleModel.java    | 314 +++++++
 .../apache/hadoop/fs/s3a/auth/RolePolicies.java | 228 +++++
 .../apache/hadoop/fs/s3a/auth/package-info.java |  27 +
 .../hadoop/fs/s3a/commit/CommitOperations.java  |   2 +-
 .../markdown/tools/hadoop-aws/assumed_roles.md  | 274 +++++-
 .../markdown/tools/hadoop-aws/encryption.md     |  21 +-
 .../src/site/markdown/tools/hadoop-aws/index.md |  77 +-
 .../markdown/tools/hadoop-aws/performance.md    | 518 +++++++++++
 .../site/markdown/tools/hadoop-aws/testing.md   |  15 +-
 .../tools/hadoop-aws/troubleshooting_s3a.md     | 753 ++++++++++------
 .../s3a/ITestS3AContractDistCpAssumedRole.java  |  52 --
 .../apache/hadoop/fs/s3a/ITestAssumeRole.java   | 324 -------
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  46 +-
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   |  40 +-
 .../hadoop/fs/s3a/auth/ITestAssumeRole.java     | 789 +++++++++++++++++
 .../auth/ITestAssumedRoleCommitOperations.java  | 130 +++
 .../hadoop/fs/s3a/auth/RoleTestUtils.java       | 171 ++++
 .../fs/s3a/commit/AbstractCommitITest.java      |  12 +-
 .../fs/s3a/commit/ITestCommitOperations.java    |   4 +-
 hadoop-tools/hadoop-azure-datalake/pom.xml      |   2 +-
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |   5 +
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  81 +-
 .../src/site/markdown/index.md                  |  41 +
 .../src/site/markdown/troubleshooting_adl.md    | 146 ++++
 .../apache/hadoop/fs/adl/AdlMockWebServer.java  | 102 ---
 .../apache/hadoop/fs/adl/TestACLFeatures.java   | 262 ------
 .../org/apache/hadoop/fs/adl/TestAdlRead.java   | 196 -----
 .../adl/TestConcurrentDataReadOperations.java   | 299 -------
 .../hadoop/fs/adl/TestCustomTokenProvider.java  | 140 ---
 .../apache/hadoop/fs/adl/TestGetFileStatus.java | 102 ---
 .../apache/hadoop/fs/adl/TestListStatus.java    | 137 ---
 .../fs/adl/TestValidateConfiguration.java       | 152 +++-
 .../hadoop/fs/adl/TestableAdlFileSystem.java    |  30 -
 .../hadoop/fs/adl/common/ExpectedResponse.java  |  71 --
 .../hadoop/fs/adl/common/Parallelized.java      |   2 +-
 .../hadoop/fs/adl/common/TestDataForRead.java   | 122 ---
 hadoop-tools/hadoop-azure/pom.xml               |   2 +-
 hadoop-tools/hadoop-datajoin/pom.xml            |   4 +-
 hadoop-tools/hadoop-distcp/pom.xml              |   4 +-
 .../org/apache/hadoop/tools/CopyFilter.java     |   2 +-
 .../src/site/markdown/DistCp.md.vm              |  70 +-
 .../apache/hadoop/tools/TestOptionsParser.java  | 294 +++----
 hadoop-tools/hadoop-extras/pom.xml              |   4 +-
 hadoop-tools/hadoop-fs2img/pom.xml              |   4 +-
 hadoop-tools/hadoop-gridmix/pom.xml             |   4 +-
 .../apache/hadoop/mapred/gridmix/Gridmix.java   |  22 +-
 hadoop-tools/hadoop-kafka/pom.xml               |   4 +-
 hadoop-tools/hadoop-openstack/pom.xml           |   4 +-
 .../swift/http/HttpInputStreamWithRelease.java  |  29 +-
 .../hadoop-openstack/src/site/markdown/index.md |   2 +-
 hadoop-tools/hadoop-ozone/pom.xml               |   2 +-
 hadoop-tools/hadoop-pipes/pom.xml               |   4 +-
 hadoop-tools/hadoop-resourceestimator/pom.xml   |   2 +-
 hadoop-tools/hadoop-rumen/pom.xml               |   4 +-
 hadoop-tools/hadoop-sls/pom.xml                 |   4 +-
 .../src/main/data/2jobs2min-rumen-jh.json       |  12 +-
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |   6 +
 .../yarn/sls/scheduler/RMNodeWrapper.java       |   6 +
 .../sls/scheduler/SLSCapacityScheduler.java     |  15 +-
 .../yarn/sls/scheduler/SLSFairScheduler.java    |  12 +-
 hadoop-tools/hadoop-streaming/pom.xml           |   4 +-
 hadoop-tools/hadoop-tools-dist/pom.xml          |   4 +-
 hadoop-tools/pom.xml                            |   4 +-
 .../dev-support/findbugs-exclude.xml            |   8 +
 .../hadoop-yarn/hadoop-yarn-api/pom.xml         |   4 +-
 .../yarn/ams/ApplicationMasterServiceUtils.java |  16 +
 .../api/protocolrecords/AllocateRequest.java    |  42 +
 .../api/protocolrecords/AllocateResponse.java   |  23 +
 .../RegisterApplicationMasterRequest.java       |  42 +-
 .../hadoop/yarn/api/records/Container.java      |  15 +
 .../api/records/RejectedSchedulingRequest.java  |  70 ++
 .../yarn/api/records/RejectionReason.java       |  44 +
 .../hadoop/yarn/api/records/ResourceSizing.java |  91 ++
 .../yarn/api/records/SchedulingRequest.java     | 206 +++++
 .../yarn/api/resource/PlacementConstraint.java  | 744 ++++++++++++++++
 .../yarn/api/resource/PlacementConstraints.java | 320 +++++++
 .../hadoop/yarn/api/resource/package-info.java  |  21 +
 .../hadoop/yarn/conf/YarnConfiguration.java     |  31 +
 ...SchedulerInvalidResoureRequestException.java |  47 +
 .../PlacementConstraintParseException.java      |  28 +
 .../constraint/PlacementConstraintParser.java   | 615 +++++++++++++
 .../yarn/util/constraint/package-info.java      |  22 +
 .../src/main/proto/yarn_protos.proto            |  86 ++
 .../src/main/proto/yarn_service_protos.proto    |   3 +
 .../resource/TestPlacementConstraintParser.java | 372 ++++++++
 .../api/resource/TestPlacementConstraints.java  | 107 +++
 .../pom.xml                                     |   4 +-
 .../distributedshell/ApplicationMaster.java     | 132 ++-
 .../applications/distributedshell/Client.java   |  58 +-
 .../distributedshell/PlacementSpec.java         |  95 ++
 .../pom.xml                                     |   4 +-
 .../hadoop-yarn-services-api/pom.xml            |   2 +-
 .../yarn/service/client/ApiServiceClient.java   |  50 +-
 .../hadoop/yarn/service/webapp/ApiServer.java   |  13 +-
 .../hadoop-yarn-services-core/pom.xml           |   2 +-
 .../yarn/service/client/ServiceClient.java      |   4 +
 .../hadoop/yarn/service/ServiceTestUtils.java   |   5 +
 .../yarn/service/TestYarnNativeServices.java    | 128 +++
 .../hadoop-yarn-services/pom.xml                |   2 +-
 .../hadoop-yarn-applications/pom.xml            |   4 +-
 .../hadoop-yarn/hadoop-yarn-client/pom.xml      |   4 +-
 .../hadoop/yarn/client/api/AMRMClient.java      |  38 +-
 .../yarn/client/api/async/AMRMClientAsync.java  |  48 +
 .../api/async/impl/AMRMClientAsyncImpl.java     |  49 +-
 .../yarn/client/api/impl/AMRMClientImpl.java    | 142 ++-
 .../client/api/impl/BaseAMRMClientTest.java     | 212 +++++
 .../yarn/client/api/impl/TestAMRMClient.java    | 156 +---
 .../api/impl/TestAMRMClientOnRMRestart.java     |   9 +-
 .../TestAMRMClientPlacementConstraints.java     | 204 +++++
 .../hadoop-yarn/hadoop-yarn-common/pom.xml      |   4 +-
 .../PlacementConstraintFromProtoConverter.java  | 116 +++
 .../pb/PlacementConstraintToProtoConverter.java | 174 ++++
 .../apache/hadoop/yarn/api/pb/package-info.java |  23 +
 .../impl/pb/AllocateRequestPBImpl.java          |  84 ++
 .../impl/pb/AllocateResponsePBImpl.java         |  85 ++
 .../RegisterApplicationMasterRequestPBImpl.java | 106 ++-
 .../api/records/impl/pb/ContainerPBImpl.java    |  31 +
 .../yarn/api/records/impl/pb/ProtoUtils.java    |  43 +
 .../pb/RejectedSchedulingRequestPBImpl.java     | 148 ++++
 .../records/impl/pb/ResourceSizingPBImpl.java   | 128 +++
 .../impl/pb/SchedulingRequestPBImpl.java        | 296 +++++++
 .../PlacementConstraintTransformations.java     | 200 +++++
 .../hadoop/yarn/api/resource/package-info.java  |  23 +
 .../LogAggregationFileController.java           |   7 +-
 .../ifile/IndexedFileAggregatedLogsBlock.java   |   2 +-
 .../LogAggregationIndexedFileController.java    |  69 +-
 .../tfile/LogAggregationTFileController.java    |   5 +-
 .../yarn/security/ContainerTokenIdentifier.java |  69 +-
 .../DockerCredentialTokenIdentifier.java        | 159 ++++
 .../yarn/util/DockerClientConfigHandler.java    | 183 ++++
 .../org/apache/hadoop/yarn/util/FSDownload.java | 215 +++--
 .../src/main/proto/yarn_security_token.proto    |   6 +
 ...apache.hadoop.security.token.TokenIdentifier |   1 +
 .../src/main/resources/yarn-default.xml         |  36 +
 .../hadoop/yarn/api/BasePBImplRecordsTest.java  |  11 +
 .../hadoop/yarn/api/TestPBImplRecords.java      |  21 +
 .../TestPlacementConstraintPBConversion.java    | 195 +++++
 .../TestPlacementConstraintTransformations.java | 166 ++++
 ...TestLogAggregationFileControllerFactory.java |   5 +-
 .../TestLogAggregationIndexFileController.java  |  21 +
 .../security/TestDockerClientConfigHandler.java | 129 +++
 .../apache/hadoop/yarn/util/TestFSDownload.java |  28 +-
 .../hadoop-yarn/hadoop-yarn-registry/pom.xml    |   4 +-
 .../hadoop/registry/RegistryTestHelper.java     |   4 +-
 .../pom.xml                                     |   4 +-
 .../hadoop-yarn-server-common/pom.xml           |   4 +-
 .../api/protocolrecords/NMContainerStatus.java  |  14 +
 .../impl/pb/NMContainerStatusPBImpl.java        |  33 +
 .../server/scheduler/SchedulerRequestKey.java   |  11 +
 .../yarn_server_common_service_protos.proto     |   1 +
 .../hadoop-yarn-server-nodemanager/pom.xml      |   4 +-
 .../nodemanager/LinuxContainerExecutor.java     |   3 +
 .../containermanager/ContainerManagerImpl.java  |   3 +-
 .../container/ContainerImpl.java                |  19 +-
 .../launcher/ContainerLaunch.java               |  52 +-
 .../launcher/ContainerRelaunch.java             |  10 +-
 .../runtime/DockerLinuxContainerRuntime.java    |  62 +-
 .../runtime/LinuxContainerRuntimeConstants.java |   4 +
 .../linux/runtime/docker/DockerCommand.java     |  16 +
 .../linux/runtime/docker/DockerRunCommand.java  |  14 +
 .../localizer/ResourceLocalizationService.java  |  30 -
 .../executor/ContainerStartContext.java         |  26 +
 .../impl/container-executor.c                   |  92 +-
 .../container-executor/impl/utils/docker-util.c |  92 +-
 .../container-executor/impl/utils/docker-util.h |   3 +-
 .../test/utils/test_docker_util.cc              | 256 ++++--
 .../TestLinuxContainerExecutorWithMocks.java    |   8 +
 .../impl/pb/TestPBRecordImpl.java               |   2 +-
 .../launcher/TestContainerRelaunch.java         |  99 +++
 .../runtime/TestDockerContainerRuntime.java     | 376 ++++----
 .../runtime/docker/TestDockerRunCommand.java    |   8 +
 .../TestResourceLocalizationService.java        | 144 ---
 .../hadoop-yarn-server-resourcemanager/pom.xml  |   4 +-
 .../ApplicationMasterService.java               |  15 +
 .../resourcemanager/DefaultAMSProcessor.java    |  13 +-
 .../resourcemanager/RMActiveServiceContext.java |  30 +
 .../yarn/server/resourcemanager/RMContext.java  |  11 +
 .../server/resourcemanager/RMContextImpl.java   |  25 +
 .../server/resourcemanager/ResourceManager.java |  22 +
 .../rmapp/attempt/RMAppAttemptImpl.java         |   5 +-
 .../rmcontainer/RMContainer.java                |   8 +
 .../rmcontainer/RMContainerImpl.java            |  37 +-
 .../server/resourcemanager/rmnode/RMNode.java   |   7 +
 .../resourcemanager/rmnode/RMNodeImpl.java      |   6 +
 .../scheduler/AbstractYarnScheduler.java        |  21 +-
 .../scheduler/AppSchedulingInfo.java            | 214 ++++-
 .../ApplicationPlacementAllocatorFactory.java   |  68 ++
 .../scheduler/ApplicationPlacementFactory.java  |  63 --
 .../scheduler/ContainerUpdateContext.java       |   4 +-
 .../scheduler/ResourceScheduler.java            |  13 +
 .../scheduler/SchedulerApplicationAttempt.java  |  24 +-
 .../scheduler/SchedulerNode.java                |  20 +-
 .../scheduler/SchedulerUtils.java               |  36 +-
 .../scheduler/YarnScheduler.java                |  15 +-
 .../scheduler/capacity/CapacityScheduler.java   | 158 +++-
 .../CapacitySchedulerConfiguration.java         |   5 +
 .../allocator/RegularContainerAllocator.java    |   3 +-
 .../scheduler/common/ContainerRequest.java      |  12 +
 .../scheduler/common/PendingAsk.java            |   6 +
 .../common/ResourceAllocationCommitter.java     |  12 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java |  43 +-
 .../constraint/AllocationTagsManager.java       | 564 ++++++++++++
 .../InvalidAllocationTagsQueryException.java    |  35 +
 .../MemoryPlacementConstraintManager.java       | 324 +++++++
 .../constraint/PlacementConstraintManager.java  | 164 ++++
 .../PlacementConstraintManagerService.java      |  93 ++
 .../constraint/PlacementConstraintsUtil.java    | 261 ++++++
 .../constraint/algorithm/CircularIterator.java  |  86 ++
 .../algorithm/DefaultPlacementAlgorithm.java    | 285 ++++++
 .../algorithm/LocalAllocationTagsManager.java   | 167 ++++
 .../iterators/PopularTagsIterator.java          |  71 ++
 .../algorithm/iterators/SerialIterator.java     |  53 ++
 .../algorithm/iterators/package-info.java       |  29 +
 .../constraint/algorithm/package-info.java      |  29 +
 .../api/ConstraintPlacementAlgorithm.java       |  43 +
 .../api/ConstraintPlacementAlgorithmInput.java  |  32 +
 .../api/ConstraintPlacementAlgorithmOutput.java |  57 ++
 ...traintPlacementAlgorithmOutputCollector.java |  32 +
 .../constraint/api/PlacedSchedulingRequest.java |  79 ++
 .../SchedulingRequestWithPlacementAttempt.java  |  52 ++
 .../constraint/api/SchedulingResponse.java      |  70 ++
 .../scheduler/constraint/api/package-info.java  |  28 +
 .../scheduler/constraint/package-info.java      |  29 +
 .../constraint/processor/BatchedRequests.java   | 144 +++
 .../processor/NodeCandidateSelector.java        |  38 +
 .../processor/PlacementDispatcher.java          | 145 ++++
 .../processor/PlacementProcessor.java           | 377 ++++++++
 .../constraint/processor/package-info.java      |  29 +
 .../scheduler/fair/AllocationConfiguration.java |  99 +--
 .../fair/AllocationFileLoaderService.java       | 481 ++--------
 .../scheduler/fair/FSPreemptionThread.java      |  62 +-
 .../scheduler/fair/FairScheduler.java           |  12 +-
 .../fair/allocation/AllocationFileParser.java   | 258 ++++++
 .../allocation/AllocationFileQueueParser.java   | 268 ++++++
 .../fair/allocation/QueueProperties.java        | 280 ++++++
 .../scheduler/fifo/FifoScheduler.java           |   7 +-
 .../placement/AppPlacementAllocator.java        |  68 +-
 .../LocalityAppPlacementAllocator.java          |  35 +-
 .../SingleConstraintAppPlacementAllocator.java  | 533 ++++++++++++
 .../security/RMContainerTokenSecretManager.java |  21 +-
 .../resourcemanager/webapp/NodesPage.java       |   3 +
 .../webapp/dao/AllocationTagInfo.java           |  56 ++
 .../webapp/dao/AllocationTagsInfo.java          |  59 ++
 .../resourcemanager/webapp/dao/NodeInfo.java    |  15 +
 .../server/resourcemanager/Application.java     |   9 +-
 .../yarn/server/resourcemanager/MockAM.java     |  77 ++
 .../yarn/server/resourcemanager/MockNodes.java  |   6 +
 .../yarn/server/resourcemanager/MockRM.java     |  14 +
 .../TestApplicationMasterService.java           | 190 ++++
 .../attempt/TestRMAppAttemptTransitions.java    |  10 +-
 .../rmcontainer/TestRMContainerImpl.java        | 151 +++-
 .../scheduler/TestAppSchedulingInfo.java        |   4 +-
 .../capacity/CapacitySchedulerTestBase.java     |  79 ++
 .../capacity/TestCapacityScheduler.java         |  91 +-
 .../TestCapacitySchedulerAsyncScheduling.java   |  12 +-
 .../TestCapacitySchedulerAutoQueueCreation.java |   2 +-
 ...apacitySchedulerSchedulingRequestUpdate.java | 262 ++++++
 ...CapacitySchedulerWithMultiResourceTypes.java |  37 +
 .../capacity/TestContainerAllocation.java       |   5 +-
 .../capacity/TestIncreaseAllocationExpirer.java |   2 +-
 ...estSchedulingRequestContainerAllocation.java | 269 ++++++
 ...hedulingRequestContainerAllocationAsync.java | 138 +++
 .../scheduler/capacity/TestUtils.java           |  19 +-
 .../constraint/TestAllocationTagsManager.java   | 413 +++++++++
 .../TestBatchedRequestsIterators.java           |  82 ++
 .../TestPlacementConstraintManagerService.java  | 264 ++++++
 .../TestPlacementConstraintsUtil.java           | 511 +++++++++++
 .../constraint/TestPlacementProcessor.java      | 870 +++++++++++++++++++
 .../algorithm/TestCircularIterator.java         |  84 ++
 .../TestLocalAllocationTagsManager.java         | 139 +++
 .../scheduler/fair/FairSchedulerTestBase.java   |   6 +-
 .../fair/TestAllocationFileLoaderService.java   | 187 ++--
 .../fair/TestContinuousScheduling.java          |  10 +-
 .../scheduler/fair/TestFairScheduler.java       |  30 +-
 .../fair/TestFairSchedulerPreemption.java       |  55 ++
 .../allocationfile/AllocationFileQueue.java     |  82 ++
 .../AllocationFileQueueBuilder.java             | 115 +++
 .../AllocationFileQueueProperties.java          | 202 +++++
 .../AllocationFileSimpleQueueBuilder.java       |  64 ++
 .../AllocationFileSubQueueBuilder.java          |  54 ++
 .../allocationfile/AllocationFileWriter.java    | 175 ++++
 .../fair/allocationfile/UserSettings.java       |  80 ++
 .../scheduler/fifo/TestFifoScheduler.java       |  33 +-
 ...stSingleConstraintAppPlacementAllocator.java | 408 +++++++++
 .../resourcemanager/webapp/TestNodesPage.java   |   4 +-
 .../webapp/TestRMWebServiceAppsNodelabel.java   |  13 +-
 .../webapp/TestRMWebServicesNodes.java          |  77 +-
 .../hadoop-yarn-server-router/pom.xml           |   4 +-
 .../pom.xml                                     |   4 +-
 .../hadoop-yarn-server-tests/pom.xml            |   4 +-
 .../hadoop/yarn/server/MiniYARNCluster.java     |   5 +-
 .../TestMiniYarnClusterNodeUtilization.java     |  20 +-
 .../pom.xml                                     |   4 +-
 .../pom.xml                                     |   4 +-
 .../pom.xml                                     |   2 +-
 .../hadoop-yarn-server-timelineservice/pom.xml  |   4 +-
 .../hadoop-yarn-server-web-proxy/pom.xml        |   4 +-
 .../hadoop-yarn/hadoop-yarn-server/pom.xml      |   4 +-
 .../hadoop-yarn/hadoop-yarn-site/pom.xml        |   4 +-
 .../src/site/markdown/DockerContainers.md       |  29 +-
 .../site/markdown/PlacementConstraints.md.vm    | 149 ++++
 .../src/site/markdown/yarn-service/Examples.md  |   4 +
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml          |   4 +-
 .../hadoop-yarn-ui/public/crossdomain.xml       |  15 -
 .../main/webapp/app/adapters/yarn-servicedef.js |   9 +-
 .../webapp/app/components/deploy-service.js     |  12 +-
 .../main/webapp/app/components/timeline-view.js |   5 +
 .../webapp/app/controllers/app-table-columns.js |   2 +-
 .../webapp/app/controllers/yarn-app-attempt.js  |   2 +-
 .../src/main/webapp/app/controllers/yarn-app.js |   6 +-
 .../app/controllers/yarn-app/components.js      |   5 +
 .../webapp/app/controllers/yarn-app/info.js     |   4 +-
 .../webapp/app/controllers/yarn-app/logs.js     |  13 +-
 .../webapp/app/controllers/yarn-apps/apps.js    |   1 +
 .../app/controllers/yarn-component-instance.js  |   4 +-
 .../app/controllers/yarn-component-instances.js |   2 +-
 .../yarn-component-instances/info.js            |   5 +
 .../app/controllers/yarn-deploy-service.js      |  12 +-
 .../webapp/app/controllers/yarn-flowrun/info.js |   5 +
 .../app/controllers/yarn-flowrun/metrics.js     |   5 +
 .../webapp/app/controllers/yarn-nodes/table.js  |   4 +-
 .../webapp/app/controllers/yarn-queue/apps.js   |   1 +
 .../webapp/app/controllers/yarn-services.js     |   1 +
 .../app/controllers/yarn-tools/yarn-conf.js     |   4 +
 .../src/main/webapp/app/helpers/lower.js        |   3 +
 .../src/main/webapp/app/initializers/loader.js  |   6 +
 .../src/main/webapp/app/models/yarn-app.js      |  18 +-
 .../src/main/webapp/app/router.js               |   1 +
 .../src/main/webapp/app/routes/application.js   |   2 +
 .../src/main/webapp/app/routes/yarn-app/logs.js |   4 +-
 .../app/templates/components/deploy-service.hbs |  10 +
 .../app/templates/components/timeline-view.hbs  |   2 +-
 .../src/main/webapp/app/templates/notauth.hbs   |  20 +
 .../src/main/webapp/app/templates/yarn-app.hbs  |  16 +-
 .../app/templates/yarn-app/components.hbs       |   2 +-
 .../main/webapp/app/templates/yarn-app/info.hbs |   4 +-
 .../main/webapp/app/templates/yarn-app/logs.hbs |   4 +
 .../templates/yarn-component-instances/info.hbs |   2 +-
 .../webapp/app/templates/yarn-flowrun/info.hbs  |   2 +-
 .../app/templates/yarn-flowrun/metrics.hbs      |   6 +-
 .../app/templates/yarn-tools/yarn-conf.hbs      |   6 +-
 .../src/main/webapp/app/utils/error-utils.js    |   3 +
 .../src/main/webapp/app/utils/info-seeder.js    |   3 +-
 .../src/main/webapp/config/default-config.js    |   2 +-
 .../hadoop-yarn-ui/src/main/webapp/package.json |   6 +-
 .../hadoop-yarn-ui/src/main/webapp/yarn.lock    |   6 +-
 hadoop-yarn-project/hadoop-yarn/pom.xml         |   4 +-
 hadoop-yarn-project/pom.xml                     |   4 +-
 pom.xml                                         |   2 +-
 start-build-env.sh                              |  32 +-
 588 files changed, 30516 insertions(+), 6345 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47919787/hadoop-client-modules/hadoop-client-minicluster/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47919787/hadoop-client-modules/hadoop-client-runtime/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47919787/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47919787/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
----------------------------------------------------------------------
diff --cc hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 80cf5ed,8743be5..63f3376
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@@ -18,26 -18,9 +18,27 @@@
  
  package org.apache.hadoop.fs;
  
 +import com.google.common.base.Preconditions;
 +import org.apache.commons.collections.map.CaseInsensitiveMap;
 +import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
 +import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
 +import org.apache.commons.io.FileUtils;
 +import org.apache.hadoop.classification.InterfaceAudience;
 +import org.apache.hadoop.classification.InterfaceStability;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.permission.FsAction;
 +import org.apache.hadoop.fs.permission.FsPermission;
 +import org.apache.hadoop.io.IOUtils;
 +import org.apache.hadoop.io.nativeio.NativeIO;
 +import org.apache.hadoop.util.Shell;
 +import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 +import org.apache.hadoop.util.StringUtils;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
  import java.io.BufferedInputStream;
  import java.io.BufferedOutputStream;
+ import java.io.BufferedReader;
  import java.io.File;
  import java.io.FileInputStream;
  import java.io.FileNotFoundException;
@@@ -48,12 -32,18 +50,19 @@@ import java.io.OutputStream
  import java.net.InetAddress;
  import java.net.URI;
  import java.net.UnknownHostException;
+ import java.nio.charset.Charset;
  import java.nio.file.AccessDeniedException;
+ import java.nio.file.FileSystems;
+ import java.nio.file.Files;
  import java.util.ArrayList;
  import java.util.Enumeration;
 +import java.util.Iterator;
  import java.util.List;
  import java.util.Map;
+ import java.util.concurrent.ExecutionException;
+ import java.util.concurrent.ExecutorService;
+ import java.util.concurrent.Executors;
+ import java.util.concurrent.Future;
  import java.util.jar.Attributes;
  import java.util.jar.JarOutputStream;
  import java.util.jar.Manifest;
@@@ -62,10 -50,26 +71,11 @@@ import java.util.zip.CheckedOutputStrea
  import java.util.zip.GZIPInputStream;
  import java.util.zip.ZipEntry;
  import java.util.zip.ZipFile;
 +import java.util.zip.ZipOutputStream;
+ import java.util.zip.ZipInputStream;
  
 -import org.apache.commons.collections.map.CaseInsensitiveMap;
 -import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
 -import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
 -import org.apache.hadoop.classification.InterfaceAudience;
 -import org.apache.hadoop.classification.InterfaceStability;
 -import org.apache.hadoop.conf.Configuration;
 -import org.apache.hadoop.fs.permission.FsAction;
 -import org.apache.hadoop.fs.permission.FsPermission;
 -import org.apache.hadoop.io.IOUtils;
 -import org.apache.hadoop.io.nativeio.NativeIO;
 -import org.apache.hadoop.util.Shell;
 -import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 -import org.apache.hadoop.util.StringUtils;
 -import org.slf4j.Logger;
 -import org.slf4j.LoggerFactory;
 -
  /**
 - * A collection of file-processing util methods
 + * A collection of file-processing util methods.
   */
  @InterfaceAudience.Public
  @InterfaceStability.Evolving
@@@ -587,70 -607,48 +613,108 @@@ public class FileUtil 
    }
  
    /**
 -   * Given a stream input it will unzip the it in the unzip directory.
++<<<<<<< HEAD
 +   * creates zip archieve of the source dir and writes a zip file.
 +   *
 +   * @param sourceDir - The directory to zip.
 +   * @param archiveName - The destination file, the parent directory is assumed
 +   * to exist.
 +   * @return Checksum of the Archive.
 +   * @throws IOException - Throws if zipFileName already exists or if the
 +   *                     sourceDir does not exist.
 +   */
 +  public static Long zip(File sourceDir, File archiveName) throws IOException {
 +    Preconditions.checkNotNull(sourceDir, "source directory cannot be null");
 +    Preconditions.checkState(sourceDir.exists(), "source directory must " +
 +        "exist");
 +
 +    Preconditions.checkNotNull(archiveName, "Destination file cannot be null");
 +    Preconditions.checkNotNull(archiveName.getParent(), "Destination " +
 +        "directory cannot be null");
 +    Preconditions.checkState(new File(archiveName.getParent()).exists(),
 +        "Destination directory must exist");
 +    Preconditions.checkState(!archiveName.exists(), "Destination file " +
 +        "already exists. Refusing to overwrite existing file.");
 +
 +    CheckedOutputStream checksum;
 +    try (FileOutputStream outputStream =
 +             new FileOutputStream(archiveName)) {
 +      checksum = new CheckedOutputStream(outputStream, new CRC32());
 +      byte[] data = new byte[BUFFER_SIZE];
 +      try (ZipOutputStream out =
 +               new ZipOutputStream(new BufferedOutputStream(checksum))) {
 +
 +        Iterator<File> fileIter = FileUtils.iterateFiles(sourceDir, null, true);
 +        while (fileIter.hasNext()) {
 +          File file = fileIter.next();
 +          LOG.debug("Compressing file : " + file.getPath());
 +          try (FileInputStream currentFile = new FileInputStream(file)) {
 +            ZipEntry entry = new ZipEntry(file.getCanonicalPath());
 +            out.putNextEntry(entry);
 +            try (BufferedInputStream sourceFile
 +                     = new BufferedInputStream(currentFile, BUFFER_SIZE)) {
 +              int bytesRead;
 +              while ((bytesRead = sourceFile.read(data, 0, BUFFER_SIZE)) !=
 +                  -1) {
 +                out.write(data, 0, bytesRead);
 +              }
 +            }
 +          }
 +        }
 +        out.flush();
 +      }
 +    }
 +    // Exit condition -- ZipFile must exist.
 +    Preconditions.checkState(archiveName.exists(),
 +        "Expected archive file missing: {}", archiveName.toPath());
 +    long crc32 = checksum.getChecksum().getValue();
 +    checksum.close();
 +    return crc32;
 +  }
 +
 +  /**
 +   * Given a File input it will unzip the file in a the unzip directory
     * passed as the second parameter
+    * @param inputStream The zip file as input
+    * @param toDir The unzip directory where to unzip the zip file.
+    * @throws IOException an exception occurred
+    */
+   public static void unZip(InputStream inputStream, File toDir)
+       throws IOException {
+     try (ZipInputStream zip = new ZipInputStream(inputStream)) {
+       int numOfFailedLastModifiedSet = 0;
+       for(ZipEntry entry = zip.getNextEntry();
+           entry != null;
+           entry = zip.getNextEntry()) {
+         if (!entry.isDirectory()) {
+           File file = new File(toDir, entry.getName());
+           File parent = file.getParentFile();
+           if (!parent.mkdirs() &&
+               !parent.isDirectory()) {
+             throw new IOException("Mkdirs failed to create " +
+                 parent.getAbsolutePath());
+           }
+           try (OutputStream out = new FileOutputStream(file)) {
+             IOUtils.copyBytes(zip, out, BUFFER_SIZE);
+           }
+           if (!file.setLastModified(entry.getTime())) {
+             numOfFailedLastModifiedSet++;
+           }
+         }
+       }
+       if (numOfFailedLastModifiedSet > 0) {
+         LOG.warn("Could not set last modfied time for {} file(s)",
+             numOfFailedLastModifiedSet);
+       }
+     }
+   }
+ 
+   /**
+    * Given a File input it will unzip it in the unzip directory.
+    * passed as the second parameter
     * @param inFile The zip file as input
     * @param unzipDir The unzip directory where to unzip the zip file.
-    * @throws IOException
+    * @throws IOException An I/O exception has occurred
     */
    public static void unZip(File inFile, File unzipDir) throws IOException {
      Enumeration<? extends ZipEntry> entries;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47919787/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47919787/hadoop-dist/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47919787/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47919787/hadoop-hdfs-project/hadoop-hdfs/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47919787/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47919787/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 3f2939e5,30f75ba..61accc9
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@@ -2143,10 -2080,10 +2138,10 @@@ public class DataNode extends Reconfigu
        try {
          this.blockPoolManager.shutDownAll(bposArray);
        } catch (InterruptedException ie) {
-         LOG.warn("Received exception in BlockPoolManager#shutDownAll: ", ie);
+         LOG.warn("Received exception in BlockPoolManager#shutDownAll", ie);
        }
      }
 -    
 +
      if (storage != null) {
        try {
          this.storage.unlockAll();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47919787/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index a842a98,c141293..1852192
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@@ -1689,14 -1745,9 +1745,9 @@@ class FsDatasetImpl implements FsDatase
        }
        assert newReplicaInfo.getState() == ReplicaState.FINALIZED
            : "Replica should be finalized";
-       if(volumeMap.get(bpid, replicaInfo.getBlockId()).getGenerationStamp() <=
-           newReplicaInfo.getGenerationStamp()) {
-         volumeMap.add(bpid, newReplicaInfo);
-         return newReplicaInfo;
-       } else {
-          throw new IOException("Generation Stamp should be monotonically " +
-              "increased. That assumption is violated here.");
-       }
 -
++      
+       volumeMap.add(bpid, newReplicaInfo);
+       return newReplicaInfo;
      }
    }
  


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: YARN-7838. Support AND/OR constraints in Distributed Shell. Contributed by Weiwei Yang.

Posted by ae...@apache.org.
YARN-7838. Support AND/OR constraints in Distributed Shell. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a08c0488
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a08c0488
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a08c0488

Branch: refs/heads/HDFS-7240
Commit: a08c048832d68c203fbdfce8d9f0e7dcccb02a55
Parents: 25fbec6
Author: Weiwei Yang <ww...@apache.org>
Authored: Sun Feb 11 14:20:46 2018 +0800
Committer: Weiwei Yang <ww...@apache.org>
Committed: Sun Feb 11 14:20:46 2018 +0800

----------------------------------------------------------------------
 .../PlacementConstraintParseException.java      |  28 +
 .../constraint/PlacementConstraintParser.java   | 615 +++++++++++++++++++
 .../yarn/util/constraint/package-info.java      |  22 +
 .../resource/TestPlacementConstraintParser.java | 372 +++++++++++
 .../distributedshell/ApplicationMaster.java     |  10 +-
 .../applications/distributedshell/Client.java   |   8 +-
 .../distributedshell/PlacementSpec.java         |  86 +--
 7 files changed, 1075 insertions(+), 66 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a08c0488/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParseException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParseException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParseException.java
new file mode 100644
index 0000000..8f3e28c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParseException.java
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.util.constraint;
+
+/**
+ * Exception when the placement constraint parser fails to parse an expression.
+ */
+public class PlacementConstraintParseException extends Exception {
+
+  public PlacementConstraintParseException(String msg) {
+    super(msg);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a08c0488/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java
new file mode 100644
index 0000000..603e692
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java
@@ -0,0 +1,615 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.util.constraint;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.LinkedHashMap;
+import java.util.StringTokenizer;
+import java.util.Enumeration;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Stack;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.Optional;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * Placement constraint expression parser.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public final class PlacementConstraintParser {
+
+  private static final char EXPRESSION_DELIM = ':';
+  private static final char KV_SPLIT_DELIM = '=';
+  private static final char EXPRESSION_VAL_DELIM = ',';
+  private static final char BRACKET_START = '(';
+  private static final char BRACKET_END = ')';
+  private static final String IN = "in";
+  private static final String NOT_IN = "notin";
+  private static final String AND = "and";
+  private static final String OR = "or";
+  private static final String CARDINALITY = "cardinality";
+  private static final String SCOPE_NODE = PlacementConstraints.NODE;
+  private static final String SCOPE_RACK = PlacementConstraints.RACK;
+
+  private PlacementConstraintParser() {
+    // Private constructor for this utility class.
+  }
+
+  /**
+   * Constraint Parser used to parse placement constraints from a
+   * given expression.
+   */
+  public static abstract class ConstraintParser {
+
+    private final ConstraintTokenizer tokenizer;
+
+    public ConstraintParser(ConstraintTokenizer tk){
+      this.tokenizer = tk;
+    }
+
+    void validate() throws PlacementConstraintParseException {
+      tokenizer.validate();
+    }
+
+    void shouldHaveNext()
+        throws PlacementConstraintParseException {
+      if (!tokenizer.hasMoreElements()) {
+        throw new PlacementConstraintParseException("Expecting more tokens");
+      }
+    }
+
+    String nextToken() {
+      return this.tokenizer.nextElement().trim();
+    }
+
+    boolean hasMoreTokens() {
+      return this.tokenizer.hasMoreElements();
+    }
+
+    int toInt(String name) throws PlacementConstraintParseException {
+      try {
+        return Integer.parseInt(name);
+      } catch (NumberFormatException e) {
+        throw new PlacementConstraintParseException(
+            "Expecting an Integer, but get " + name);
+      }
+    }
+
+    String parseScope(String scopeString)
+        throws PlacementConstraintParseException {
+      if (scopeString.equalsIgnoreCase(SCOPE_NODE)) {
+        return SCOPE_NODE;
+      } else if (scopeString.equalsIgnoreCase(SCOPE_RACK)) {
+        return SCOPE_RACK;
+      } else {
+        throw new PlacementConstraintParseException(
+            "expecting scope to " + SCOPE_NODE + " or " + SCOPE_RACK
+                + ", but met " + scopeString);
+      }
+    }
+
+    public AbstractConstraint tryParse() {
+      try {
+        return parse();
+      } catch (PlacementConstraintParseException e) {
+        // unable to parse, simply return null
+        return null;
+      }
+    }
+
+    public abstract AbstractConstraint parse()
+        throws PlacementConstraintParseException;
+  }
+
+  /**
+   * Tokenizer interface that used to parse an expression. It first
+   * validates if the syntax of the given expression is valid, then traverse
+   * the expression and parse it to an enumeration of strings. Each parsed
+   * string can be further consumed by a {@link ConstraintParser} and
+   * transformed to a {@link AbstractConstraint}.
+   */
+  public interface ConstraintTokenizer extends Enumeration<String> {
+
+    /**
+     * Validate the schema before actual parsing the expression.
+     * @throws PlacementConstraintParseException
+     */
+    default void validate() throws PlacementConstraintParseException {
+      // do nothing
+    }
+  }
+
+  /**
+   * A basic tokenizer that splits an expression by a given delimiter.
+   */
+  public static class BaseStringTokenizer implements ConstraintTokenizer {
+    private final StringTokenizer tokenizer;
+    BaseStringTokenizer(String expr, String delimiter) {
+      this.tokenizer = new StringTokenizer(expr, delimiter);
+    }
+
+    @Override
+    public boolean hasMoreElements() {
+      return tokenizer.hasMoreTokens();
+    }
+
+    @Override
+    public String nextElement() {
+      return tokenizer.nextToken();
+    }
+  }
+
+  /**
+   * Tokenizer used to parse conjunction form of a constraint expression,
+   * [AND|OR](C1:C2:...:Cn). Each Cn is a constraint expression.
+   */
+  public static final class ConjunctionTokenizer
+      implements ConstraintTokenizer {
+
+    private final String expression;
+    private Iterator<String> iterator;
+
+    private ConjunctionTokenizer(String expr) {
+      this.expression = expr;
+    }
+
+    // Traverse the expression and try to get a list of parsed elements
+    // based on schema.
+    @Override
+    public void validate() throws PlacementConstraintParseException {
+      List<String> parsedElements = new ArrayList<>();
+      // expression should start with AND or OR
+      String op;
+      if (expression.startsWith(AND) ||
+          expression.startsWith(AND.toUpperCase())) {
+        op = AND;
+      } else if(expression.startsWith(OR) ||
+          expression.startsWith(OR.toUpperCase())) {
+        op = OR;
+      } else {
+        throw new PlacementConstraintParseException(
+            "Excepting starting with \"" + AND + "\" or \"" + OR + "\","
+                + " but met " + expression);
+      }
+      parsedElements.add(op);
+      Pattern p = Pattern.compile("\\((.*)\\)");
+      Matcher m = p.matcher(expression);
+      if (!m.find()) {
+        throw new PlacementConstraintParseException("Unexpected format,"
+            + " expecting [AND|OR](A:B...) "
+            + "but current expression is " + expression);
+      }
+      String childStrs = m.group(1);
+      MultipleConstraintsTokenizer ct =
+          new MultipleConstraintsTokenizer(childStrs);
+      ct.validate();
+      while(ct.hasMoreElements()) {
+        parsedElements.add(ct.nextElement());
+      }
+      this.iterator = parsedElements.iterator();
+    }
+
+    @Override
+    public boolean hasMoreElements() {
+      return iterator.hasNext();
+    }
+
+    @Override
+    public String nextElement() {
+      return iterator.next();
+    }
+  }
+
+  /**
+   * Tokenizer used to parse allocation tags expression, which should be
+   * in tag=numOfAllocations syntax.
+   */
+  public static class SourceTagsTokenizer implements ConstraintTokenizer {
+
+    private final String expression;
+    private StringTokenizer st;
+    private Iterator<String> iterator;
+    public SourceTagsTokenizer(String expr) {
+      this.expression = expr;
+      st = new StringTokenizer(expr, String.valueOf(KV_SPLIT_DELIM));
+    }
+
+    @Override
+    public void validate() throws PlacementConstraintParseException {
+      ArrayList<String> parsedValues = new ArrayList<>();
+      if (st.countTokens() != 2) {
+        throw new PlacementConstraintParseException(
+            "Expecting source allocation tag to be specified"
+                + " sourceTag=numOfAllocations syntax,"
+                + " but met " + expression);
+      }
+
+      String sourceTag = st.nextToken();
+      parsedValues.add(sourceTag);
+      String num = st.nextToken();
+      try {
+        Integer.parseInt(num);
+        parsedValues.add(num);
+      } catch (NumberFormatException e) {
+        throw new PlacementConstraintParseException("Value of the expression"
+            + " must be an integer, but met " + num);
+      }
+      iterator = parsedValues.iterator();
+    }
+
+    @Override
+    public boolean hasMoreElements() {
+      return iterator.hasNext();
+    }
+
+    @Override
+    public String nextElement() {
+      return iterator.next();
+    }
+  }
+
+  /**
+   * Tokenizer used to handle a placement spec composed by multiple
+   * constraint expressions. Each of them is delimited with the
+   * given delimiter, e.g ':'.
+   */
+  public static class MultipleConstraintsTokenizer
+      implements ConstraintTokenizer {
+
+    private final String expr;
+    private Iterator<String> iterator;
+
+    public MultipleConstraintsTokenizer(String expression) {
+      this.expr = expression;
+    }
+
+    @Override
+    public void validate() throws PlacementConstraintParseException {
+      ArrayList<String> parsedElements = new ArrayList<>();
+      char[] arr = expr.toCharArray();
+      // Memorize the location of each delimiter in a stack,
+      // removes invalid delimiters that embraced in brackets.
+      Stack<Integer> stack = new Stack<>();
+      for (int i=0; i<arr.length; i++) {
+        char current = arr[i];
+        switch (current) {
+        case EXPRESSION_DELIM:
+          stack.add(i);
+          break;
+        case BRACKET_START:
+          stack.add(i);
+          break;
+        case BRACKET_END:
+          while(!stack.isEmpty()) {
+            if (arr[stack.pop()] == BRACKET_START) {
+              break;
+            }
+          }
+          break;
+        default:
+          break;
+        }
+      }
+
+      if (stack.isEmpty()) {
+        // Single element
+        parsedElements.add(expr);
+      } else {
+        Iterator<Integer> it = stack.iterator();
+        int currentPos = 0;
+        while (it.hasNext()) {
+          int pos = it.next();
+          String sub = expr.substring(currentPos, pos);
+          if (sub != null && !sub.isEmpty()) {
+            parsedElements.add(sub);
+          }
+          currentPos = pos+1;
+        }
+        if (currentPos < expr.length()) {
+          parsedElements.add(expr.substring(currentPos, expr.length()));
+        }
+      }
+      iterator = parsedElements.iterator();
+    }
+
+    @Override
+    public boolean hasMoreElements() {
+      return iterator.hasNext();
+    }
+
+    @Override
+    public String nextElement() {
+      return iterator.next();
+    }
+  }
+
+  /**
+   * Constraint parser used to parse a given target expression, such as
+   * "NOTIN, NODE, foo, bar".
+   */
+  public static class TargetConstraintParser extends ConstraintParser {
+
+    public TargetConstraintParser(String expression) {
+      super(new BaseStringTokenizer(expression,
+          String.valueOf(EXPRESSION_VAL_DELIM)));
+    }
+
+    @Override
+    public AbstractConstraint parse()
+        throws PlacementConstraintParseException {
+      PlacementConstraint.AbstractConstraint placementConstraints;
+      String op = nextToken();
+      if (op.equalsIgnoreCase(IN) || op.equalsIgnoreCase(NOT_IN)) {
+        String scope = nextToken();
+        scope = parseScope(scope);
+
+        Set<String> allocationTags = new TreeSet<>();
+        while(hasMoreTokens()) {
+          String tag = nextToken();
+          allocationTags.add(tag);
+        }
+        PlacementConstraint.TargetExpression target =
+            PlacementConstraints.PlacementTargets.allocationTag(
+                allocationTags.toArray(new String[allocationTags.size()]));
+        if (op.equalsIgnoreCase(IN)) {
+          placementConstraints = PlacementConstraints
+              .targetIn(scope, target);
+        } else {
+          placementConstraints = PlacementConstraints
+              .targetNotIn(scope, target);
+        }
+      } else {
+        throw new PlacementConstraintParseException(
+            "expecting " + IN + " or " + NOT_IN + ", but get " + op);
+      }
+      return placementConstraints;
+    }
+  }
+
+  /**
+   * Constraint parser used to parse a given target expression, such as
+   * "cardinality, NODE, foo, 0, 1".
+   */
+  public static class CardinalityConstraintParser extends ConstraintParser {
+
+    public CardinalityConstraintParser(String expr) {
+      super(new BaseStringTokenizer(expr,
+          String.valueOf(EXPRESSION_VAL_DELIM)));
+    }
+
+    @Override
+    public AbstractConstraint parse()
+        throws PlacementConstraintParseException {
+      String op = nextToken();
+      if (!op.equalsIgnoreCase(CARDINALITY)) {
+        throw new PlacementConstraintParseException("expecting " + CARDINALITY
+            + " , but met " + op);
+      }
+
+      shouldHaveNext();
+      String scope = nextToken();
+      scope = parseScope(scope);
+
+      Stack<String> resetElements = new Stack<>();
+      while(hasMoreTokens()) {
+        resetElements.add(nextToken());
+      }
+
+      // At least 3 elements
+      if (resetElements.size() < 3) {
+        throw new PlacementConstraintParseException(
+            "Invalid syntax for a cardinality expression, expecting"
+                + " \"cardinality,SCOPE,TARGET_TAG,...,TARGET_TAG,"
+                + "MIN_CARDINALITY,MAX_CARDINALITY\" at least 5 elements,"
+                + " but only " + (resetElements.size() + 2) + " is given.");
+      }
+
+      String maxCardinalityStr = resetElements.pop();
+      Integer max = toInt(maxCardinalityStr);
+
+      String minCardinalityStr = resetElements.pop();
+      Integer min = toInt(minCardinalityStr);
+
+      ArrayList<String> targetTags = new ArrayList<>();
+      while (!resetElements.empty()) {
+        targetTags.add(resetElements.pop());
+      }
+
+      return PlacementConstraints.cardinality(scope, min, max,
+          targetTags.toArray(new String[targetTags.size()]));
+    }
+  }
+
+  /**
+   * Parser used to parse conjunction form of constraints, such as
+   * AND(A, ..., B), OR(A, ..., B).
+   */
+  public static class ConjunctionConstraintParser extends ConstraintParser {
+
+    public ConjunctionConstraintParser(String expr) {
+      super(new ConjunctionTokenizer(expr));
+    }
+
+    @Override
+    public AbstractConstraint parse() throws PlacementConstraintParseException {
+      // do pre-process, validate input.
+      validate();
+      String op = nextToken();
+      shouldHaveNext();
+      List<AbstractConstraint> constraints = new ArrayList<>();
+      while(hasMoreTokens()) {
+        // each child expression can be any valid form of
+        // constraint expressions.
+        String constraintStr = nextToken();
+        AbstractConstraint constraint = parseExpression(constraintStr);
+        constraints.add(constraint);
+      }
+      if (AND.equalsIgnoreCase(op)) {
+        return PlacementConstraints.and(
+            constraints.toArray(
+                new AbstractConstraint[constraints.size()]));
+      } else if (OR.equalsIgnoreCase(op)) {
+        return PlacementConstraints.or(
+            constraints.toArray(
+                new AbstractConstraint[constraints.size()]));
+      } else {
+        throw new PlacementConstraintParseException(
+            "Unexpected conjunction operator : " + op
+                + ", expecting " + AND + " or " + OR);
+      }
+    }
+  }
+
+  /**
+   * A helper class to encapsulate source tags and allocations in the
+   * placement specification.
+   */
+  public static final class SourceTags {
+    private String tag;
+    private int num;
+
+    private SourceTags(String sourceTag, int number) {
+      this.tag = sourceTag;
+      this.num = number;
+    }
+
+    public String getTag() {
+      return this.tag;
+    }
+
+    public int getNumOfAllocations() {
+      return this.num;
+    }
+
+    /**
+     * Parses source tags from expression "sourceTags=numOfAllocations".
+     * @param expr
+     * @return source tags, see {@link SourceTags}
+     * @throws PlacementConstraintParseException
+     */
+    public static SourceTags parseFrom(String expr)
+        throws PlacementConstraintParseException {
+      SourceTagsTokenizer stt = new SourceTagsTokenizer(expr);
+      stt.validate();
+
+      // During validation we already checked the number of parsed elements.
+      String allocTag = stt.nextElement();
+      int allocNum = Integer.parseInt(stt.nextElement());
+      return new SourceTags(allocTag, allocNum);
+    }
+  }
+
+  /**
+   * Parses a given constraint expression to a {@link AbstractConstraint},
+   * this expression can be any valid form of constraint expressions.
+   *
+   * @param constraintStr expression string
+   * @return a parsed {@link AbstractConstraint}
+   * @throws PlacementConstraintParseException when given expression
+   * is malformed
+   */
+  public static AbstractConstraint parseExpression(String constraintStr)
+      throws PlacementConstraintParseException {
+    // Try parse given expression with all allowed constraint parsers,
+    // fails if no one could parse it.
+    TargetConstraintParser tp = new TargetConstraintParser(constraintStr);
+    Optional<AbstractConstraint> constraintOptional =
+        Optional.ofNullable(tp.tryParse());
+    if (!constraintOptional.isPresent()) {
+      CardinalityConstraintParser cp =
+          new CardinalityConstraintParser(constraintStr);
+      constraintOptional = Optional.ofNullable(cp.tryParse());
+      if (!constraintOptional.isPresent()) {
+        ConjunctionConstraintParser jp =
+            new ConjunctionConstraintParser(constraintStr);
+        constraintOptional = Optional.ofNullable(jp.tryParse());
+      }
+      if (!constraintOptional.isPresent()) {
+        throw new PlacementConstraintParseException(
+            "Invalid constraint expression " + constraintStr);
+      }
+    }
+    return constraintOptional.get();
+  }
+
+  /**
+   * Parses a placement constraint specification. A placement constraint spec
+   * is a composite expression which is composed by multiple sub constraint
+   * expressions delimited by ":". With following syntax:
+   *
+   * <p>Tag1=N1,P1:Tag2=N2,P2:...:TagN=Nn,Pn</p>
+   *
+   * where <b>TagN=Nn</b> is a key value pair to determine the source
+   * allocation tag and the number of allocations, such as:
+   *
+   * <p>foo=3</p>
+   *
+   * and where <b>Pn</b> can be any form of a valid constraint expression,
+   * such as:
+   *
+   * <ul>
+   *   <li>in,node,foo,bar</li>
+   *   <li>notin,node,foo,bar,1,2</li>
+   *   <li>and(notin,node,foo:notin,node,bar)</li>
+   * </ul>
+   * @param expression expression string.
+   * @return a map of source tags to placement constraint mapping.
+   * @throws PlacementConstraintParseException
+   */
+  public static Map<SourceTags, PlacementConstraint> parsePlacementSpec(
+      String expression) throws PlacementConstraintParseException {
+    // Respect insertion order.
+    Map<SourceTags, PlacementConstraint> result = new LinkedHashMap<>();
+    PlacementConstraintParser.ConstraintTokenizer tokenizer =
+        new PlacementConstraintParser.MultipleConstraintsTokenizer(expression);
+    tokenizer.validate();
+    while(tokenizer.hasMoreElements()) {
+      String specStr = tokenizer.nextElement();
+      // each spec starts with sourceAllocationTag=numOfContainers and
+      // followed by a constraint expression.
+      // foo=4,Pn
+      String[] splitted = specStr.split(
+          String.valueOf(EXPRESSION_VAL_DELIM), 2);
+      if (splitted.length != 2) {
+        throw new PlacementConstraintParseException(
+            "Unexpected placement constraint expression " + specStr);
+      }
+
+      String tagAlloc = splitted[0];
+      SourceTags st = SourceTags.parseFrom(tagAlloc);
+      String exprs = splitted[1];
+      AbstractConstraint constraint =
+          PlacementConstraintParser.parseExpression(exprs);
+
+      result.put(st, constraint.build());
+    }
+
+    return result;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a08c0488/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/package-info.java
new file mode 100644
index 0000000..890d5ec
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Package org.apache.hadoop.yarn.util.constraint contains classes
+ * which is used as utility class for placement constraints.
+ */
+package org.apache.hadoop.yarn.util.constraint;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a08c0488/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java
new file mode 100644
index 0000000..941f971
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java
@@ -0,0 +1,372 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.resource;
+
+import com.google.common.collect.Sets;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.util.constraint.PlacementConstraintParseException;
+import org.apache.hadoop.yarn.util.constraint.PlacementConstraintParser;
+import org.apache.hadoop.yarn.util.constraint.PlacementConstraintParser.SourceTags;
+import org.apache.hadoop.yarn.util.constraint.PlacementConstraintParser.TargetConstraintParser;
+import org.apache.hadoop.yarn.util.constraint.PlacementConstraintParser.ConstraintParser;
+import org.apache.hadoop.yarn.util.constraint.PlacementConstraintParser.CardinalityConstraintParser;
+import org.apache.hadoop.yarn.util.constraint.PlacementConstraintParser.ConjunctionConstraintParser;
+import org.apache.hadoop.yarn.util.constraint.PlacementConstraintParser.MultipleConstraintsTokenizer;
+import org.apache.hadoop.yarn.util.constraint.PlacementConstraintParser.SourceTagsTokenizer;
+import org.apache.hadoop.yarn.util.constraint.PlacementConstraintParser.ConstraintTokenizer;
+
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.*;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.allocationTag;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Class to test placement constraint parser.
+ */
+public class TestPlacementConstraintParser {
+
+  @Test
+  public void testTargetExpressionParser()
+      throws PlacementConstraintParseException {
+    ConstraintParser parser;
+    AbstractConstraint constraint;
+    SingleConstraint single;
+
+    // Anti-affinity with single target tag
+    // NOTIN,NDOE,foo
+    parser = new TargetConstraintParser("NOTIN, NODE, foo");
+    constraint = parser.parse();
+    Assert.assertTrue(constraint instanceof SingleConstraint);
+    single = (SingleConstraint) constraint;
+    Assert.assertEquals("node", single.getScope());
+    Assert.assertEquals(0, single.getMinCardinality());
+    Assert.assertEquals(0, single.getMaxCardinality());
+
+    // lower cases is also valid
+    parser = new TargetConstraintParser("notin, node, foo");
+    constraint = parser.parse();
+    Assert.assertTrue(constraint instanceof SingleConstraint);
+    single = (SingleConstraint) constraint;
+    Assert.assertEquals("node", single.getScope());
+    Assert.assertEquals(0, single.getMinCardinality());
+    Assert.assertEquals(0, single.getMaxCardinality());
+
+    // Affinity with single target tag
+    // IN,NODE,foo
+    parser = new TargetConstraintParser("IN, NODE, foo");
+    constraint = parser.parse();
+    Assert.assertTrue(constraint instanceof SingleConstraint);
+    single = (SingleConstraint) constraint;
+    Assert.assertEquals("node", single.getScope());
+    Assert.assertEquals(1, single.getMinCardinality());
+    Assert.assertEquals(Integer.MAX_VALUE, single.getMaxCardinality());
+
+    // Anti-affinity with multiple target tags
+    // NOTIN,NDOE,foo,bar,exp
+    parser = new TargetConstraintParser("NOTIN, NODE, foo, bar, exp");
+    constraint = parser.parse();
+    Assert.assertTrue(constraint instanceof SingleConstraint);
+    single = (SingleConstraint) constraint;
+    Assert.assertEquals("node", single.getScope());
+    Assert.assertEquals(0, single.getMinCardinality());
+    Assert.assertEquals(0, single.getMaxCardinality());
+    Assert.assertEquals(1, single.getTargetExpressions().size());
+    TargetExpression exp =
+        single.getTargetExpressions().iterator().next();
+    Assert.assertEquals("ALLOCATION_TAG", exp.getTargetType().toString());
+    Assert.assertEquals(3, exp.getTargetValues().size());
+
+    // Invalid OP
+    parser = new TargetConstraintParser("XYZ, NODE, foo");
+    try {
+      parser.parse();
+    } catch (Exception e) {
+      Assert.assertTrue(e instanceof PlacementConstraintParseException);
+      Assert.assertTrue(e.getMessage().contains("expecting in or notin"));
+    }
+  }
+
+  @Test
+  public void testCardinalityConstraintParser()
+      throws PlacementConstraintParseException {
+    ConstraintParser parser;
+    AbstractConstraint constraint;
+    SingleConstraint single;
+
+    // cardinality,NODE,foo,0,1
+    parser = new CardinalityConstraintParser("cardinality, NODE, foo, 0, 1");
+    constraint = parser.parse();
+    Assert.assertTrue(constraint instanceof SingleConstraint);
+    single = (SingleConstraint) constraint;
+    Assert.assertEquals("node", single.getScope());
+    Assert.assertEquals(0, single.getMinCardinality());
+    Assert.assertEquals(1, single.getMaxCardinality());
+    Assert.assertEquals(1, single.getTargetExpressions().size());
+    TargetExpression exp =
+        single.getTargetExpressions().iterator().next();
+    Assert.assertEquals("ALLOCATION_TAG", exp.getTargetType().toString());
+    Assert.assertEquals(1, exp.getTargetValues().size());
+    Assert.assertEquals("foo", exp.getTargetValues().iterator().next());
+
+    // cardinality,NODE,foo,bar,moo,0,1
+    parser = new CardinalityConstraintParser(
+        "cardinality,RACK,foo,bar,moo,0,1");
+    constraint = parser.parse();
+    Assert.assertTrue(constraint instanceof SingleConstraint);
+    single = (SingleConstraint) constraint;
+    Assert.assertEquals("rack", single.getScope());
+    Assert.assertEquals(0, single.getMinCardinality());
+    Assert.assertEquals(1, single.getMaxCardinality());
+    Assert.assertEquals(1, single.getTargetExpressions().size());
+    exp = single.getTargetExpressions().iterator().next();
+    Assert.assertEquals("ALLOCATION_TAG", exp.getTargetType().toString());
+    Assert.assertEquals(3, exp.getTargetValues().size());
+    Set<String> expectedTags = Sets.newHashSet("foo", "bar", "moo");
+    Assert.assertTrue(Sets.difference(expectedTags, exp.getTargetValues())
+        .isEmpty());
+
+    // Invalid scope string
+    try {
+      parser = new CardinalityConstraintParser(
+          "cardinality,NOWHERE,foo,bar,moo,0,1");
+      parser.parse();
+      Assert.fail("Expecting a parsing failure!");
+    } catch (PlacementConstraintParseException e) {
+      Assert.assertTrue(e.getMessage()
+          .contains("expecting scope to node or rack, but met NOWHERE"));
+    }
+
+    // Invalid number of expression elements
+    try {
+      parser = new CardinalityConstraintParser(
+          "cardinality,NODE,0,1");
+      parser.parse();
+      Assert.fail("Expecting a parsing failure!");
+    } catch (PlacementConstraintParseException e) {
+      Assert.assertTrue(e.getMessage()
+          .contains("at least 5 elements, but only 4 is given"));
+    }
+  }
+
+  @Test
+  public void testAndConstraintParser()
+      throws PlacementConstraintParseException {
+    ConstraintParser parser;
+    AbstractConstraint constraint;
+    And and;
+
+    parser = new ConjunctionConstraintParser(
+        "AND(NOTIN,NODE,foo:NOTIN,NODE,bar)");
+    constraint = parser.parse();
+    Assert.assertTrue(constraint instanceof And);
+    and = (And) constraint;
+    Assert.assertEquals(2, and.getChildren().size());
+
+    parser = new ConjunctionConstraintParser(
+        "AND(NOTIN,NODE,foo:cardinality,NODE,foo,0,1)");
+    constraint = parser.parse();
+    Assert.assertTrue(constraint instanceof And);
+    Assert.assertEquals(2, and.getChildren().size());
+
+    parser = new ConjunctionConstraintParser(
+        "AND(NOTIN,NODE,foo:AND(NOTIN,NODE,foo:cardinality,NODE,foo,0,1))");
+    constraint = parser.parse();
+    Assert.assertTrue(constraint instanceof And);
+    and = (And) constraint;
+    Assert.assertTrue(and.getChildren().get(0) instanceof SingleConstraint);
+    Assert.assertTrue(and.getChildren().get(1) instanceof And);
+    and = (And) and.getChildren().get(1);
+    Assert.assertEquals(2, and.getChildren().size());
+  }
+
+  @Test
+  public void testMultipleConstraintsTokenizer()
+      throws PlacementConstraintParseException {
+    MultipleConstraintsTokenizer ct;
+    SourceTagsTokenizer st;
+    TokenizerTester mp;
+
+    ct = new MultipleConstraintsTokenizer(
+        "foo=1,A1,A2,A3:bar=2,B1,B2:moo=3,C1,C2");
+    mp = new TokenizerTester(ct,
+        "foo=1,A1,A2,A3", "bar=2,B1,B2", "moo=3,C1,C2");
+    mp.verify();
+
+    ct = new MultipleConstraintsTokenizer(
+        "foo=1,AND(A2:A3):bar=2,OR(B1:AND(B2:B3)):moo=3,C1,C2");
+    mp = new TokenizerTester(ct,
+        "foo=1,AND(A2:A3)", "bar=2,OR(B1:AND(B2:B3))", "moo=3,C1,C2");
+    mp.verify();
+
+    ct = new MultipleConstraintsTokenizer("A:B:C");
+    mp = new TokenizerTester(ct, "A", "B", "C");
+    mp.verify();
+
+    ct = new MultipleConstraintsTokenizer("A:AND(B:C):D");
+    mp = new TokenizerTester(ct, "A", "AND(B:C)", "D");
+    mp.verify();
+
+    ct = new MultipleConstraintsTokenizer("A:AND(B:OR(C:D)):E");
+    mp = new TokenizerTester(ct, "A", "AND(B:OR(C:D))", "E");
+    mp.verify();
+
+    ct = new MultipleConstraintsTokenizer("A:AND(B:OR(C:D)):E");
+    mp = new TokenizerTester(ct, "A", "AND(B:OR(C:D))", "E");
+    mp.verify();
+
+    st = new SourceTagsTokenizer("A=4");
+    mp = new TokenizerTester(st, "A", "4");
+    mp.verify();
+
+    try {
+      st = new SourceTagsTokenizer("A=B");
+      mp = new TokenizerTester(st, "A", "B");
+      mp.verify();
+      Assert.fail("Expecting a parsing failure");
+    } catch (PlacementConstraintParseException e) {
+      Assert.assertTrue(e.getMessage()
+          .contains("Value of the expression must be an integer"));
+    }
+  }
+
+  private static class TokenizerTester {
+
+    private ConstraintTokenizer tokenizer;
+    private String[] expectedExtractions;
+
+    protected TokenizerTester(ConstraintTokenizer tk,
+        String... expctedStrings) {
+      this.tokenizer = tk;
+      this.expectedExtractions = expctedStrings;
+    }
+
+    void verify()
+        throws PlacementConstraintParseException {
+      tokenizer.validate();
+      int i = 0;
+      while (tokenizer.hasMoreElements()) {
+        String current = tokenizer.nextElement();
+        Assert.assertTrue(i < expectedExtractions.length);
+        Assert.assertEquals(expectedExtractions[i], current);
+        i++;
+      }
+    }
+  }
+
+  @Test
+  public void testParsePlacementSpec()
+      throws PlacementConstraintParseException {
+    Map<SourceTags, PlacementConstraint> result;
+    PlacementConstraint expectedPc1, expectedPc2;
+    PlacementConstraint actualPc1, actualPc2;
+    SourceTags tag1, tag2;
+
+    // A single anti-affinity constraint
+    result = PlacementConstraintParser
+        .parsePlacementSpec("foo=3,notin,node,foo");
+    Assert.assertEquals(1, result.size());
+    tag1 = result.keySet().iterator().next();
+    Assert.assertEquals("foo", tag1.getTag());
+    Assert.assertEquals(3, tag1.getNumOfAllocations());
+    expectedPc1 = targetNotIn("node", allocationTag("foo")).build();
+    actualPc1 = result.values().iterator().next();
+    Assert.assertEquals(expectedPc1, actualPc1);
+
+    // Upper case
+    result = PlacementConstraintParser
+        .parsePlacementSpec("foo=3,NOTIN,NODE,foo");
+    Assert.assertEquals(1, result.size());
+    tag1 = result.keySet().iterator().next();
+    Assert.assertEquals("foo", tag1.getTag());
+    Assert.assertEquals(3, tag1.getNumOfAllocations());
+    expectedPc1 = targetNotIn("node", allocationTag("foo")).build();
+    actualPc1 = result.values().iterator().next();
+    Assert.assertEquals(expectedPc1, actualPc1);
+
+    // A single cardinality constraint
+    result = PlacementConstraintParser
+        .parsePlacementSpec("foo=10,cardinality,node,foo,bar,0,100");
+    Assert.assertEquals(1, result.size());
+    tag1 = result.keySet().iterator().next();
+    Assert.assertEquals("foo", tag1.getTag());
+    Assert.assertEquals(10, tag1.getNumOfAllocations());
+    expectedPc1 = cardinality("node", 0, 100, "foo", "bar").build();
+    Assert.assertEquals(expectedPc1, result.values().iterator().next());
+
+    // Two constraint expressions
+    result = PlacementConstraintParser
+        .parsePlacementSpec("foo=3,notin,node,foo:bar=2,in,node,foo");
+    Assert.assertEquals(2, result.size());
+    Iterator<SourceTags> keyIt = result.keySet().iterator();
+    tag1 = keyIt.next();
+    Assert.assertEquals("foo", tag1.getTag());
+    Assert.assertEquals(3, tag1.getNumOfAllocations());
+    tag2 = keyIt.next();
+    Assert.assertEquals("bar", tag2.getTag());
+    Assert.assertEquals(2, tag2.getNumOfAllocations());
+    Iterator<PlacementConstraint> valueIt = result.values().iterator();
+    expectedPc1 = targetNotIn("node", allocationTag("foo")).build();
+    expectedPc2 = targetIn("node", allocationTag("foo")).build();
+    Assert.assertEquals(expectedPc1, valueIt.next());
+    Assert.assertEquals(expectedPc2, valueIt.next());
+
+    // And constraint
+    result = PlacementConstraintParser
+        .parsePlacementSpec("foo=1000,and(notin,node,bar:in,node,foo)");
+    Assert.assertEquals(1, result.size());
+    keyIt = result.keySet().iterator();
+    tag1 = keyIt.next();
+    Assert.assertEquals("foo", tag1.getTag());
+    Assert.assertEquals(1000, tag1.getNumOfAllocations());
+    actualPc1 = result.values().iterator().next();
+    expectedPc1 = and(targetNotIn("node", allocationTag("bar")),
+        targetIn("node", allocationTag("foo"))).build();
+    Assert.assertEquals(expectedPc1, actualPc1);
+
+    // Multiple constraints with nested forms.
+    result = PlacementConstraintParser.parsePlacementSpec(
+            "foo=1000,and(notin,node,bar:or(in,node,foo:in,node,moo))"
+                + ":bar=200,notin,node,foo");
+    Assert.assertEquals(2, result.size());
+    keyIt = result.keySet().iterator();
+    tag1 = keyIt.next();
+    tag2 = keyIt.next();
+    Assert.assertEquals("foo", tag1.getTag());
+    Assert.assertEquals(1000, tag1.getNumOfAllocations());
+    Assert.assertEquals("bar", tag2.getTag());
+    Assert.assertEquals(200, tag2.getNumOfAllocations());
+    valueIt = result.values().iterator();
+    actualPc1 = valueIt.next();
+    actualPc2 = valueIt.next();
+
+    expectedPc1 = and(targetNotIn("node", allocationTag("bar")),
+        or(targetIn("node", allocationTag("foo")),
+            targetIn("node", allocationTag("moo")))).build();
+    Assert.assertEquals(actualPc1, expectedPc1);
+    expectedPc2 = targetNotIn("node", allocationTag("foo")).build();
+    Assert.assertEquals(expectedPc2, actualPc2);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a08c0488/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index 9ba2138..a06ee7c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -28,6 +28,7 @@ import java.lang.reflect.UndeclaredThrowableException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -38,6 +39,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.Vector;
+import java.util.Base64;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -671,8 +673,14 @@ public class ApplicationMaster {
   }
 
   private void parsePlacementSpecs(String placementSpecifications) {
+    // Client sends placement spec in encoded format
+    Base64.Decoder decoder = Base64.getDecoder();
+    byte[] decodedBytes = decoder.decode(
+        placementSpecifications.getBytes(StandardCharsets.UTF_8));
+    String decodedSpec = new String(decodedBytes, StandardCharsets.UTF_8);
+    LOG.info("Decode placement spec: " + decodedSpec);
     Map<String, PlacementSpec> pSpecs =
-        PlacementSpec.parse(placementSpecifications);
+        PlacementSpec.parse(decodedSpec);
     this.placementSpecs = new HashMap<>();
     this.numTotalContainers = 0;
     for (PlacementSpec pSpec : pSpecs.values()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a08c0488/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
index 0aef83f..ac58662 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.applications.distributedshell;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -28,6 +29,7 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Vector;
 import java.util.Arrays;
+import java.util.Base64;
 
 import com.google.common.base.Joiner;
 import org.apache.commons.cli.CommandLine;
@@ -857,7 +859,11 @@ public class Client {
     }
     vargs.add("--num_containers " + String.valueOf(numContainers));
     if (placementSpec != null && placementSpec.length() > 0) {
-      vargs.add("--placement_spec " + placementSpec);
+      // Encode the spec to avoid passing special chars via shell arguments.
+      String encodedSpec = Base64.getEncoder()
+          .encodeToString(placementSpec.getBytes(StandardCharsets.UTF_8));
+      LOG.info("Encode placement spec: " + encodedSpec);
+      vargs.add("--placement_spec " + encodedSpec);
     }
     if (null != nodeLabelExpression) {
       appContext.setNodeLabelExpression(nodeLabelExpression);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a08c0488/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/PlacementSpec.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/PlacementSpec.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/PlacementSpec.java
index ed13ee0..2909259 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/PlacementSpec.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/PlacementSpec.java
@@ -18,13 +18,14 @@
 package org.apache.hadoop.yarn.applications.distributedshell;
 
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
-import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.apache.hadoop.yarn.util.constraint.PlacementConstraintParseException;
+import org.apache.hadoop.yarn.util.constraint.PlacementConstraintParser;
+import org.apache.hadoop.yarn.util.constraint.PlacementConstraintParser.SourceTags;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.util.HashMap;
 import java.util.Map;
-import java.util.Scanner;
 
 /**
  * Class encapsulating a SourceTag, number of container and a Placement
@@ -34,12 +35,6 @@ public class PlacementSpec {
 
   private static final Logger LOG =
       LoggerFactory.getLogger(PlacementSpec.class);
-  private static final String SPEC_DELIM = ":";
-  private static final String KV_SPLIT_DELIM = "=";
-  private static final String SPEC_VAL_DELIM = ",";
-  private static final String IN = "in";
-  private static final String NOT_IN = "notin";
-  private static final String CARDINALITY = "cardinality";
 
   public final String sourceTag;
   public final int numContainers;
@@ -73,65 +68,28 @@ public class PlacementSpec {
    * @param specs Placement spec.
    * @return Mapping from source tag to placement constraint.
    */
-  public static Map<String, PlacementSpec> parse(String specs) {
+  public static Map<String, PlacementSpec> parse(String specs)
+      throws IllegalArgumentException {
     LOG.info("Parsing Placement Specs: [{}]", specs);
-    Scanner s = new Scanner(specs).useDelimiter(SPEC_DELIM);
     Map<String, PlacementSpec> pSpecs = new HashMap<>();
-    while (s.hasNext()) {
-      String sp = s.next();
-      LOG.info("Parsing Spec: [{}]", sp);
-      String[] specSplit = sp.split(KV_SPLIT_DELIM);
-      String sourceTag = specSplit[0];
-      Scanner ps = new Scanner(specSplit[1]).useDelimiter(SPEC_VAL_DELIM);
-      int numContainers = ps.nextInt();
-      if (!ps.hasNext()) {
-        pSpecs.put(sourceTag,
-            new PlacementSpec(sourceTag, numContainers, null));
-        LOG.info("Creating Spec without constraint {}: num[{}]",
-            sourceTag, numContainers);
-        continue;
+    Map<SourceTags, PlacementConstraint> parsed;
+    try {
+      parsed = PlacementConstraintParser.parsePlacementSpec(specs);
+      for (Map.Entry<SourceTags, PlacementConstraint> entry :
+          parsed.entrySet()) {
+        LOG.info("Parsed source tag: {}, number of allocations: {}",
+            entry.getKey().getTag(), entry.getKey().getNumOfAllocations());
+        LOG.info("Parsed constraint: {}", entry.getValue()
+            .getConstraintExpr().getClass().getSimpleName());
+        pSpecs.put(entry.getKey().getTag(), new PlacementSpec(
+            entry.getKey().getTag(),
+            entry.getKey().getNumOfAllocations(),
+            entry.getValue()));
       }
-      String cType = ps.next().toLowerCase();
-      String scope = ps.next().toLowerCase();
-
-      String targetTag = ps.next();
-      scope = scope.equals("rack") ? PlacementConstraints.RACK :
-          PlacementConstraints.NODE;
-
-      PlacementConstraint pc;
-      if (cType.equals(IN)) {
-        pc = PlacementConstraints.build(
-            PlacementConstraints.targetIn(scope,
-                PlacementConstraints.PlacementTargets.allocationTag(
-                    targetTag)));
-        LOG.info("Creating IN Constraint for source tag [{}], num[{}]: " +
-                "scope[{}], target[{}]",
-            sourceTag, numContainers, scope, targetTag);
-      } else if (cType.equals(NOT_IN)) {
-        pc = PlacementConstraints.build(
-            PlacementConstraints.targetNotIn(scope,
-                PlacementConstraints.PlacementTargets.allocationTag(
-                    targetTag)));
-        LOG.info("Creating NOT_IN Constraint for source tag [{}], num[{}]: " +
-                "scope[{}], target[{}]",
-            sourceTag, numContainers, scope, targetTag);
-      } else if (cType.equals(CARDINALITY)) {
-        int minCard = ps.nextInt();
-        int maxCard = ps.nextInt();
-        pc = PlacementConstraints.build(
-            PlacementConstraints.targetCardinality(scope, minCard, maxCard,
-                PlacementConstraints.PlacementTargets.allocationTag(
-                    targetTag)));
-        LOG.info("Creating CARDINALITY Constraint source tag [{}], num[{}]: " +
-                "scope[{}], min[{}], max[{}], target[{}]",
-            sourceTag, numContainers, scope, minCard, maxCard, targetTag);
-      } else {
-        throw new RuntimeException(
-            "Could not parse constraintType [" + cType + "]" +
-                " in [" + specSplit[1] + "]");
-      }
-      pSpecs.put(sourceTag, new PlacementSpec(sourceTag, numContainers, pc));
+      return pSpecs;
+    } catch (PlacementConstraintParseException e) {
+      throw new IllegalArgumentException(
+          "Invalid placement spec: " + specs, e);
     }
-    return pSpecs;
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: Preparing for 3.2.0 development

Posted by ae...@apache.org.
Preparing for 3.2.0 development

Change-Id: I6d0e01f3d665d26573ef2b957add1cf0cddf7938


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60f9e60b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60f9e60b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60f9e60b

Branch: refs/heads/HDFS-7240
Commit: 60f9e60b3b417c800683c87669b6f5410ac65066
Parents: c97d5bc
Author: Wangda Tan <wa...@apache.org>
Authored: Sun Feb 11 11:17:38 2018 +0800
Committer: Wangda Tan <wa...@apache.org>
Committed: Sun Feb 11 11:17:38 2018 +0800

----------------------------------------------------------------------
 hadoop-assemblies/pom.xml                                        | 4 ++--
 hadoop-build-tools/pom.xml                                       | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml                  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml     | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml                  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml    | 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml          | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml              | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml                      | 4 ++--
 hadoop-client-modules/pom.xml                                    | 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml        | 4 ++--
 hadoop-cloud-storage-project/pom.xml                             | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml                 | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml               | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml                        | 4 ++--
 hadoop-common-project/hadoop-common/pom.xml                      | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml                         | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml                     | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml                         | 4 ++--
 hadoop-common-project/pom.xml                                    | 4 ++--
 hadoop-dist/pom.xml                                              | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml                   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml                   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml            | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml                      | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml                          | 4 ++--
 hadoop-hdfs-project/pom.xml                                      | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml                       | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml                   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml                    | 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml                   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml                      | 4 ++--
 .../hadoop-mapreduce-client-uploader/pom.xml                     | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml         | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml       | 4 ++--
 hadoop-mapreduce-project/pom.xml                                 | 4 ++--
 hadoop-maven-plugins/pom.xml                                     | 2 +-
 hadoop-minicluster/pom.xml                                       | 4 ++--
 hadoop-project-dist/pom.xml                                      | 4 ++--
 hadoop-project/pom.xml                                           | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml                               | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml                         | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml                             | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml                                  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml                       | 2 +-
 hadoop-tools/hadoop-azure/pom.xml                                | 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml                             | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml                               | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml                               | 4 ++--
 hadoop-tools/hadoop-fs2img/pom.xml                               | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml                              | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml                                | 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml                            | 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml                                | 4 ++--
 hadoop-tools/hadoop-resourceestimator/pom.xml                    | 2 +-
 hadoop-tools/hadoop-rumen/pom.xml                                | 4 ++--
 hadoop-tools/hadoop-sls/pom.xml                                  | 4 ++--
 hadoop-tools/hadoop-streaming/pom.xml                            | 4 ++--
 hadoop-tools/hadoop-tools-dist/pom.xml                           | 4 ++--
 hadoop-tools/pom.xml                                             | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml          | 4 ++--
 .../hadoop-yarn-applications-distributedshell/pom.xml            | 4 ++--
 .../hadoop-yarn-applications-unmanaged-am-launcher/pom.xml       | 4 ++--
 .../hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml    | 2 +-
 .../hadoop-yarn-services/hadoop-yarn-services-core/pom.xml       | 2 +-
 .../hadoop-yarn-applications/hadoop-yarn-services/pom.xml        | 2 +-
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml       | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml       | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml     | 4 ++--
 .../hadoop-yarn-server-applicationhistoryservice/pom.xml         | 4 ++--
 .../hadoop-yarn-server/hadoop-yarn-server-common/pom.xml         | 4 ++--
 .../hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml    | 4 ++--
 .../hadoop-yarn-server-resourcemanager/pom.xml                   | 4 ++--
 .../hadoop-yarn-server/hadoop-yarn-server-router/pom.xml         | 4 ++--
 .../hadoop-yarn-server-sharedcachemanager/pom.xml                | 4 ++--
 .../hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml          | 4 ++--
 .../hadoop-yarn-server-timeline-pluginstorage/pom.xml            | 4 ++--
 .../hadoop-yarn-server-timelineservice-hbase-tests/pom.xml       | 4 ++--
 .../hadoop-yarn-server-timelineservice-hbase/pom.xml             | 2 +-
 .../hadoop-yarn-server-timelineservice/pom.xml                   | 4 ++--
 .../hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml      | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml       | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml         | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml           | 4 ++--
 hadoop-yarn-project/hadoop-yarn/pom.xml                          | 4 ++--
 hadoop-yarn-project/pom.xml                                      | 4 ++--
 pom.xml                                                          | 2 +-
 91 files changed, 170 insertions(+), 170 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-assemblies/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-assemblies/pom.xml b/hadoop-assemblies/pom.xml
index 9f99503..6979311 100644
--- a/hadoop-assemblies/pom.xml
+++ b/hadoop-assemblies/pom.xml
@@ -23,11 +23,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-assemblies</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop Assemblies</name>
   <description>Apache Hadoop Assemblies</description>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-build-tools/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-build-tools/pom.xml b/hadoop-build-tools/pom.xml
index 6597889..655550e 100644
--- a/hadoop-build-tools/pom.xml
+++ b/hadoop-build-tools/pom.xml
@@ -18,7 +18,7 @@
   <parent>
     <artifactId>hadoop-main</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-build-tools</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-client-modules/hadoop-client-api/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-api/pom.xml b/hadoop-client-modules/hadoop-client-api/pom.xml
index edbddc9..024f6ba 100644
--- a/hadoop-client-modules/hadoop-client-api/pom.xml
+++ b/hadoop-client-modules/hadoop-client-api/pom.xml
@@ -18,11 +18,11 @@
 <parent>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-project</artifactId>
-   <version>3.1.0-SNAPSHOT</version>
+   <version>3.2.0-SNAPSHOT</version>
    <relativePath>../../hadoop-project</relativePath>
 </parent>
   <artifactId>hadoop-client-api</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Client</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
index c6a8552..3989c30 100644
--- a/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
+++ b/hadoop-client-modules/hadoop-client-check-invariants/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-check-invariants</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
index 691b545..4f35ba3 100644
--- a/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
+++ b/hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-check-test-invariants</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
index 89b9645..68d2969 100644
--- a/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
+++ b/hadoop-client-modules/hadoop-client-integration-tests/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-integration-tests</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
 
   <description>Checks that we can use the generated artifacts</description>
   <name>Apache Hadoop Client Packaging Integration Tests</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 00f2d25..905d53a 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-minicluster</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Minicluster for Clients</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-client-modules/hadoop-client-runtime/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 7ed5ba7..363adf5 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -18,11 +18,11 @@
 <parent>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-project</artifactId>
-   <version>3.1.0-SNAPSHOT</version>
+   <version>3.2.0-SNAPSHOT</version>
    <relativePath>../../hadoop-project</relativePath>
 </parent>
   <artifactId>hadoop-client-runtime</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Client</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-client-modules/hadoop-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client/pom.xml b/hadoop-client-modules/hadoop-client/pom.xml
index a738d47..19c313e 100644
--- a/hadoop-client-modules/hadoop-client/pom.xml
+++ b/hadoop-client-modules/hadoop-client/pom.xml
@@ -18,11 +18,11 @@
 <parent>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-project-dist</artifactId>
-   <version>3.1.0-SNAPSHOT</version>
+   <version>3.2.0-SNAPSHOT</version>
    <relativePath>../../hadoop-project-dist</relativePath>
 </parent>
   <artifactId>hadoop-client</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
 
   <description>Apache Hadoop Client aggregation pom with dependencies exposed</description>
   <name>Apache Hadoop Client Aggregator</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-client-modules/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/pom.xml b/hadoop-client-modules/pom.xml
index 7baca38..9ddd526 100644
--- a/hadoop-client-modules/pom.xml
+++ b/hadoop-client-modules/pom.xml
@@ -18,7 +18,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-modules</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
index 73a9d41..38264df 100644
--- a/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
+++ b/hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-cloud-storage</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Cloud Storage</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-cloud-storage-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-cloud-storage-project/pom.xml b/hadoop-cloud-storage-project/pom.xml
index e07b284..6ede052 100644
--- a/hadoop-cloud-storage-project/pom.xml
+++ b/hadoop-cloud-storage-project/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-cloud-storage-project</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Cloud Storage Project</description>
   <name>Apache Hadoop Cloud Storage Project</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-common-project/hadoop-annotations/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-annotations/pom.xml b/hadoop-common-project/hadoop-annotations/pom.xml
index 17e49eb..4506551 100644
--- a/hadoop-common-project/hadoop-annotations/pom.xml
+++ b/hadoop-common-project/hadoop-annotations/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-annotations</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Annotations</description>
   <name>Apache Hadoop Annotations</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-common-project/hadoop-auth-examples/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth-examples/pom.xml b/hadoop-common-project/hadoop-auth-examples/pom.xml
index 66a97af..f2f4a5d 100644
--- a/hadoop-common-project/hadoop-auth-examples/pom.xml
+++ b/hadoop-common-project/hadoop-auth-examples/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-auth-examples</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>war</packaging>
 
   <name>Apache Hadoop Auth Examples</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-common-project/hadoop-auth/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml
index 9308d0c..12fe971 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-auth</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop Auth</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-common-project/hadoop-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index aae040a..078a943 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <artifactId>hadoop-common</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Common</description>
   <name>Apache Hadoop Common</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-common-project/hadoop-kms/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml
index c3e612c..c1c5285 100644
--- a/hadoop-common-project/hadoop-kms/pom.xml
+++ b/hadoop-common-project/hadoop-kms/pom.xml
@@ -22,11 +22,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-kms</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop KMS</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-common-project/hadoop-minikdc/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml b/hadoop-common-project/hadoop-minikdc/pom.xml
index 8305ec0..c92b6b1 100644
--- a/hadoop-common-project/hadoop-minikdc/pom.xml
+++ b/hadoop-common-project/hadoop-minikdc/pom.xml
@@ -18,12 +18,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-minikdc</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop MiniKDC</description>
   <name>Apache Hadoop MiniKDC</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-common-project/hadoop-nfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/pom.xml b/hadoop-common-project/hadoop-nfs/pom.xml
index 90fdf61..8546112 100644
--- a/hadoop-common-project/hadoop-nfs/pom.xml
+++ b/hadoop-common-project/hadoop-nfs/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-nfs</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop NFS</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-common-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml
index 9413c20..a1a511f 100644
--- a/hadoop-common-project/pom.xml
+++ b/hadoop-common-project/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-common-project</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Common Project</description>
   <name>Apache Hadoop Common Project</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 65a352c..8a0453f 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-dist</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Distribution</description>
   <name>Apache Hadoop Distribution</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index baf63ed..a5ed7a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-client</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop HDFS Client</description>
   <name>Apache Hadoop HDFS Client</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index 0e63527..eaf5c52 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -22,11 +22,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-httpfs</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop HttpFS</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
index 54871d7..fa0e174 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-native-client</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop HDFS Native Client</description>
   <name>Apache Hadoop HDFS Native Client</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
index 7fc7d32..e9525e2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-nfs</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop HDFS-NFS</description>
   <name>Apache Hadoop HDFS-NFS</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 2094f23..d6afed1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <artifactId>hadoop-hdfs</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop HDFS</description>
   <name>Apache Hadoop HDFS</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-hdfs-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/pom.xml b/hadoop-hdfs-project/pom.xml
index dc343a2..33930fa 100644
--- a/hadoop-hdfs-project/pom.xml
+++ b/hadoop-hdfs-project/pom.xml
@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-project</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop HDFS Project</description>
   <name>Apache Hadoop HDFS Project</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
index b040112..fe0a6ba 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-app</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce App</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
index 8fc8a07..5c4b83a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-common</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce Common</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index 5e902d5..f5bc301 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-core</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce Core</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml
index 4138d02..715d0f6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-hs-plugins</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce HistoryServer Plugins</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
index e56c007..c174140 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-hs</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce HistoryServer</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
index 7b2390e..aec0fe3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce JobClient</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml
index df892cc..c583576 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-nativetask</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce NativeTask</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
index 7fc68f0..d5c7a9a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-shuffle</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce Shuffle</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
index a721404..12ba3f9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-uploader/pom.xml
@@ -18,11 +18,11 @@
     <parent>
         <artifactId>hadoop-mapreduce-client</artifactId>
         <groupId>org.apache.hadoop</groupId>
-        <version>3.1.0-SNAPSHOT</version>
+        <version>3.2.0-SNAPSHOT</version>
     </parent>
     <modelVersion>4.0.0</modelVersion>
     <artifactId>hadoop-mapreduce-client-uploader</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <name>Apache Hadoop MapReduce Uploader</name>
 
     <dependencies>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
index a8350cb..324825f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-mapreduce-client</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce Client</name>
   <packaging>pom</packaging>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
index 5e5dd8f..7f2fb8f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-mapreduce-examples</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop MapReduce Examples</description>
   <name>Apache Hadoop MapReduce Examples</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-mapreduce-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml
index 773eb1f..e75461a 100644
--- a/hadoop-mapreduce-project/pom.xml
+++ b/hadoop-mapreduce-project/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-mapreduce</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>pom</packaging>
   <name>Apache Hadoop MapReduce</name>
   <url>http://hadoop.apache.org/mapreduce/</url>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-maven-plugins/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-maven-plugins/pom.xml b/hadoop-maven-plugins/pom.xml
index d0f4a73..bd347d6 100644
--- a/hadoop-maven-plugins/pom.xml
+++ b/hadoop-maven-plugins/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-maven-plugins</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-minicluster/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-minicluster/pom.xml b/hadoop-minicluster/pom.xml
index 4558e99..636ee35 100644
--- a/hadoop-minicluster/pom.xml
+++ b/hadoop-minicluster/pom.xml
@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-minicluster</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Mini-Cluster</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-project-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index 9118a71..b0ed311 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-project-dist</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Project Dist POM</description>
   <name>Apache Hadoop Project Dist POM</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 82f3b55..dd8465a 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -20,10 +20,10 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-main</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <artifactId>hadoop-project</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Project POM</description>
   <name>Apache Hadoop Project POM</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/hadoop-aliyun/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/pom.xml b/hadoop-tools/hadoop-aliyun/pom.xml
index a298efa..cd8cc5d 100644
--- a/hadoop-tools/hadoop-aliyun/pom.xml
+++ b/hadoop-tools/hadoop-aliyun/pom.xml
@@ -18,7 +18,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-aliyun</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/hadoop-archive-logs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archive-logs/pom.xml b/hadoop-tools/hadoop-archive-logs/pom.xml
index 17d444b..3ec3cfd 100644
--- a/hadoop-tools/hadoop-archive-logs/pom.xml
+++ b/hadoop-tools/hadoop-archive-logs/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-archive-logs</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Archive Logs</description>
   <name>Apache Hadoop Archive Logs</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/hadoop-archives/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archives/pom.xml b/hadoop-tools/hadoop-archives/pom.xml
index d4641ea..cfd9790 100644
--- a/hadoop-tools/hadoop-archives/pom.xml
+++ b/hadoop-tools/hadoop-archives/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-archives</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Archives</description>
   <name>Apache Hadoop Archives</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/hadoop-aws/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index 748537c..c704a62 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-aws</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop Amazon Web Services support</name>
   <description>
     This module contains code to support integration with Amazon Web Services.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/hadoop-azure-datalake/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml b/hadoop-tools/hadoop-azure-datalake/pom.xml
index 70c8f6c..19fa6c0 100644
--- a/hadoop-tools/hadoop-azure-datalake/pom.xml
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-azure-datalake</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/hadoop-azure/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml
index 1728b60..2f19311 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-azure</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/hadoop-datajoin/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-datajoin/pom.xml b/hadoop-tools/hadoop-datajoin/pom.xml
index b37f465..1f0a151 100644
--- a/hadoop-tools/hadoop-datajoin/pom.xml
+++ b/hadoop-tools/hadoop-datajoin/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-datajoin</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Data Join</description>
   <name>Apache Hadoop Data Join</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/hadoop-distcp/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/pom.xml b/hadoop-tools/hadoop-distcp/pom.xml
index a044a81..7fee80b 100644
--- a/hadoop-tools/hadoop-distcp/pom.xml
+++ b/hadoop-tools/hadoop-distcp/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-distcp</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Distributed Copy</description>
   <name>Apache Hadoop Distributed Copy</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/hadoop-extras/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-extras/pom.xml b/hadoop-tools/hadoop-extras/pom.xml
index f28b5cd..606b7d7 100644
--- a/hadoop-tools/hadoop-extras/pom.xml
+++ b/hadoop-tools/hadoop-extras/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-extras</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Extras</description>
   <name>Apache Hadoop Extras</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/hadoop-fs2img/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/pom.xml b/hadoop-tools/hadoop-fs2img/pom.xml
index 1ae17dc..747e0b1 100644
--- a/hadoop-tools/hadoop-fs2img/pom.xml
+++ b/hadoop-tools/hadoop-fs2img/pom.xml
@@ -17,12 +17,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-fs2img</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Image Generation Tool</description>
   <name>Apache Hadoop Image Generation Tool</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/hadoop-gridmix/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-gridmix/pom.xml b/hadoop-tools/hadoop-gridmix/pom.xml
index 7171c9a..8625291 100644
--- a/hadoop-tools/hadoop-gridmix/pom.xml
+++ b/hadoop-tools/hadoop-gridmix/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-gridmix</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Gridmix</description>
   <name>Apache Hadoop Gridmix</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/hadoop-kafka/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-kafka/pom.xml b/hadoop-tools/hadoop-kafka/pom.xml
index 1ae7270..1a3443a 100644
--- a/hadoop-tools/hadoop-kafka/pom.xml
+++ b/hadoop-tools/hadoop-kafka/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-kafka</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop Kafka Library support</name>
   <description>
     This module contains code to support integration with Kafka.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/hadoop-openstack/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/pom.xml b/hadoop-tools/hadoop-openstack/pom.xml
index 9e492ef..b840cc3 100644
--- a/hadoop-tools/hadoop-openstack/pom.xml
+++ b/hadoop-tools/hadoop-openstack/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-openstack</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop OpenStack support</name>
   <description>
     This module contains code to support integration with OpenStack.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/hadoop-pipes/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-pipes/pom.xml b/hadoop-tools/hadoop-pipes/pom.xml
index 4259385..367a3d8 100644
--- a/hadoop-tools/hadoop-pipes/pom.xml
+++ b/hadoop-tools/hadoop-pipes/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-pipes</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Pipes</description>
   <name>Apache Hadoop Pipes</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/hadoop-resourceestimator/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-resourceestimator/pom.xml b/hadoop-tools/hadoop-resourceestimator/pom.xml
index 5fc929e..182264b 100644
--- a/hadoop-tools/hadoop-resourceestimator/pom.xml
+++ b/hadoop-tools/hadoop-resourceestimator/pom.xml
@@ -25,7 +25,7 @@
     <parent>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-project</artifactId>
-        <version>3.1.0-SNAPSHOT</version>
+        <version>3.2.0-SNAPSHOT</version>
         <relativePath>../../hadoop-project</relativePath>
     </parent>
     <artifactId>hadoop-resourceestimator</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/hadoop-rumen/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/pom.xml b/hadoop-tools/hadoop-rumen/pom.xml
index b6e7e6b..9608717 100644
--- a/hadoop-tools/hadoop-rumen/pom.xml
+++ b/hadoop-tools/hadoop-rumen/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-rumen</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Rumen</description>
   <name>Apache Hadoop Rumen</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/hadoop-sls/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/pom.xml b/hadoop-tools/hadoop-sls/pom.xml
index 0882f77..a7cb9b2 100644
--- a/hadoop-tools/hadoop-sls/pom.xml
+++ b/hadoop-tools/hadoop-sls/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-sls</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Scheduler Load Simulator</description>
   <name>Apache Hadoop Scheduler Load Simulator</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/hadoop-streaming/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-streaming/pom.xml b/hadoop-tools/hadoop-streaming/pom.xml
index 7c9cff4..c4c916a 100644
--- a/hadoop-tools/hadoop-streaming/pom.xml
+++ b/hadoop-tools/hadoop-streaming/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-streaming</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop MapReduce Streaming</description>
   <name>Apache Hadoop MapReduce Streaming</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/hadoop-tools-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml
index 4b90361..42ce94c 100644
--- a/hadoop-tools/hadoop-tools-dist/pom.xml
+++ b/hadoop-tools/hadoop-tools-dist/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <artifactId>hadoop-tools-dist</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Tools Dist</description>
   <name>Apache Hadoop Tools Dist</name>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-tools/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml
index c030045..92f585f 100644
--- a/hadoop-tools/pom.xml
+++ b/hadoop-tools/pom.xml
@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-tools</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Tools</description>
   <name>Apache Hadoop Tools</name>
   <packaging>pom</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
index 450207c..8750d58 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-api</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN API</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
index 529f5ba..f3a8712 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn-applications</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-applications-distributedshell</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN DistributedShell</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml
index 4e0fca9..d296c16 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn-applications</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-applications-unmanaged-am-launcher</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN Unmanaged Am Launcher</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
index bae62c6..7fe2ef6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-yarn-applications</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <artifactId>hadoop-yarn-services-api</artifactId>
   <name>Apache Hadoop YARN Services API</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
index 6c9a875..3ce8876 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-yarn-services</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <artifactId>hadoop-yarn-services-core</artifactId>
   <packaging>jar</packaging>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml
index 716fdb7..5f5e70b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml
@@ -19,7 +19,7 @@
     <parent>
         <artifactId>hadoop-yarn-applications</artifactId>
         <groupId>org.apache.hadoop</groupId>
-        <version>3.1.0-SNAPSHOT</version>
+        <version>3.2.0-SNAPSHOT</version>
     </parent>
     <modelVersion>4.0.0</modelVersion>
     <artifactId>hadoop-yarn-services</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
index 4fb579c..b2b34ec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-applications</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN Applications</name>
   <packaging>pom</packaging>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
index 4654000..37e5d73 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
@@ -17,10 +17,10 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <artifactId>hadoop-yarn-client</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN Client</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index e46eeda..1662b32 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-common</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN Common</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
index 4e805cd..132624b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-registry</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN Registry</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
index f81a5d7..f310518 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
@@ -22,11 +22,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-applicationhistoryservice</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN ApplicationHistoryService</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index cd5195d..52a1c0e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-common</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN Server Common</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
index 70b29f4..072395a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-nodemanager</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN NodeManager</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index a4d7afc..cb651c7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN ResourceManager</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
index edfc8ac..22b9b49 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
@@ -19,12 +19,12 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-yarn-server-router</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN Router</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml
index dcc44ea..65395bc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-sharedcachemanager/pom.xml
@@ -17,10 +17,10 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <artifactId>hadoop-yarn-server-sharedcachemanager</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN SharedCacheManager</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
index a7b4a6d..8329fd7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
@@ -19,10 +19,10 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <artifactId>hadoop-yarn-server-tests</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN Server Tests</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml
index e970fc2..c50b575 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/pom.xml
@@ -22,11 +22,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-timeline-pluginstorage</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN Timeline Plugin Storage</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
index ffba1fe..f36897b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
@@ -22,11 +22,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-timelineservice-hbase-tests</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN Timeline Service HBase tests</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
index 3739301..6369864 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
@@ -22,7 +22,7 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-timelineservice-hbase</artifactId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
index 4236c51..dd47fd5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
@@ -22,11 +22,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-timelineservice</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN Timeline Service</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
index d185bfd..61e0429 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-web-proxy</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN Web Proxy</name>
 
   <properties>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
index 8156016..de4484c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN Server</name>
   <packaging>pom</packaging>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60f9e60b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml
index 128c328..e9ecb65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/pom.xml
@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-yarn</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-site</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN Site</name>
   <packaging>pom</packaging>
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: YARN-7815. Make the YARN mounts added to Docker containers more restrictive. Contributed by Shane Kumpf

Posted by ae...@apache.org.
YARN-7815. Make the YARN mounts added to Docker containers more restrictive. Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/456705a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/456705a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/456705a0

Branch: refs/heads/HDFS-7240
Commit: 456705a07c8b80658950acc99f23086244c6b20f
Parents: 01bd6ab
Author: Jason Lowe <jl...@apache.org>
Authored: Wed Feb 7 13:09:08 2018 -0600
Committer: Jason Lowe <jl...@apache.org>
Committed: Wed Feb 7 13:09:08 2018 -0600

----------------------------------------------------------------------
 .../nodemanager/LinuxContainerExecutor.java     |   3 +
 .../launcher/ContainerLaunch.java               |  52 +++-
 .../launcher/ContainerRelaunch.java             |   5 +
 .../runtime/DockerLinuxContainerRuntime.java    |  23 +-
 .../runtime/LinuxContainerRuntimeConstants.java |   4 +
 .../linux/runtime/docker/DockerRunCommand.java  |  14 +
 .../executor/ContainerStartContext.java         |  26 ++
 .../TestLinuxContainerExecutorWithMocks.java    |   8 +
 .../launcher/TestContainerRelaunch.java         |   2 +
 .../runtime/TestDockerContainerRuntime.java     | 271 ++++++++-----------
 10 files changed, 232 insertions(+), 176 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/456705a0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index d359f31..fe54e2c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -651,6 +651,9 @@ public class LinuxContainerExecutor extends ContainerExecutor {
       .setExecutionAttribute(FILECACHE_DIRS, ctx.getFilecacheDirs())
       .setExecutionAttribute(USER_LOCAL_DIRS, ctx.getUserLocalDirs())
       .setExecutionAttribute(CONTAINER_LOCAL_DIRS, ctx.getContainerLocalDirs())
+      .setExecutionAttribute(USER_FILECACHE_DIRS, ctx.getUserFilecacheDirs())
+      .setExecutionAttribute(APPLICATION_LOCAL_DIRS,
+          ctx.getApplicationLocalDirs())
       .setExecutionAttribute(CONTAINER_LOG_DIRS, ctx.getContainerLogDirs())
       .setExecutionAttribute(RESOURCES_OPTIONS, resourcesOptions);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/456705a0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 7f43458..112f54a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -169,6 +169,17 @@ public class ContainerLaunch implements Callable<Integer> {
     return var;
   }
 
+  private Map<String, String> expandAllEnvironmentVars(
+      ContainerLaunchContext launchContext, Path containerLogDir) {
+    Map<String, String> environment = launchContext.getEnvironment();
+    for (Entry<String, String> entry : environment.entrySet()) {
+      String value = entry.getValue();
+      value = expandEnvironment(value, containerLogDir);
+      entry.setValue(value);
+    }
+    return environment;
+  }
+
   @Override
   @SuppressWarnings("unchecked") // dispatcher not typed
   public Integer call() {
@@ -202,13 +213,8 @@ public class ContainerLaunch implements Callable<Integer> {
       }
       launchContext.setCommands(newCmds);
 
-      Map<String, String> environment = launchContext.getEnvironment();
-      // Make a copy of env to iterate & do variable expansion
-      for (Entry<String, String> entry : environment.entrySet()) {
-        String value = entry.getValue();
-        value = expandEnvironment(value, containerLogDir);
-        entry.setValue(value);
-      }
+      Map<String, String> environment = expandAllEnvironmentVars(
+          launchContext, containerLogDir);
       // /////////////////////////// End of variable expansion
 
       FileContext lfs = FileContext.getLocalFSFileContext();
@@ -237,6 +243,9 @@ public class ContainerLaunch implements Callable<Integer> {
       List<String> userLocalDirs = getUserLocalDirs(localDirs);
       List<String> containerLocalDirs = getContainerLocalDirs(localDirs);
       List<String> containerLogDirs = getContainerLogDirs(logDirs);
+      List<String> userFilecacheDirs = getUserFilecacheDirs(localDirs);
+      List<String> applicationLocalDirs = getApplicationLocalDirs(localDirs,
+          appIdStr);
 
       if (!dirsHandler.areDisksHealthy()) {
         ret = ContainerExitStatus.DISKS_FAILED;
@@ -295,7 +304,9 @@ public class ContainerLaunch implements Callable<Integer> {
           .setFilecacheDirs(filecacheDirs)
           .setUserLocalDirs(userLocalDirs)
           .setContainerLocalDirs(containerLocalDirs)
-          .setContainerLogDirs(containerLogDirs).build());
+          .setContainerLogDirs(containerLogDirs)
+          .setUserFilecacheDirs(userFilecacheDirs)
+          .setApplicationLocalDirs(applicationLocalDirs).build());
     } catch (ConfigurationException e) {
       LOG.error("Failed to launch container due to configuration error.", e);
       dispatcher.getEventHandler().handle(new ContainerExitEvent(
@@ -426,6 +437,31 @@ public class ContainerLaunch implements Callable<Integer> {
     return filecacheDirs;
   }
 
+  protected List<String> getUserFilecacheDirs(List<String> localDirs) {
+    List<String> userFilecacheDirs = new ArrayList<>(localDirs.size());
+    String user = container.getUser();
+    for (String localDir : localDirs) {
+      String userFilecacheDir = localDir + Path.SEPARATOR +
+          ContainerLocalizer.USERCACHE + Path.SEPARATOR + user
+          + Path.SEPARATOR + ContainerLocalizer.FILECACHE;
+      userFilecacheDirs.add(userFilecacheDir);
+    }
+    return userFilecacheDirs;
+  }
+
+  protected List<String> getApplicationLocalDirs(List<String> localDirs,
+      String appIdStr) {
+    List<String> applicationLocalDirs = new ArrayList<>(localDirs.size());
+    String user = container.getUser();
+    for (String localDir : localDirs) {
+      String appLocalDir = localDir + Path.SEPARATOR +
+          ContainerLocalizer.USERCACHE + Path.SEPARATOR + user
+          + Path.SEPARATOR + ContainerLocalizer.APPCACHE
+          + Path.SEPARATOR + appIdStr;
+      applicationLocalDirs.add(appLocalDir);
+    }
+    return applicationLocalDirs;
+  }
 
   protected Map<Path, List<String>> getLocalizedResources()
       throws YarnException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/456705a0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java
index 6a0761a..b3add30 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerRelaunch.java
@@ -98,6 +98,9 @@ public class ContainerRelaunch extends ContainerLaunch {
       List<String> containerLogDirs = getContainerLogDirs(logDirs);
       List<String> filecacheDirs = getNMFilecacheDirs(localDirs);
       List<String> userLocalDirs = getUserLocalDirs(localDirs);
+      List<String> userFilecacheDirs = getUserFilecacheDirs(localDirs);
+      List<String> applicationLocalDirs = getApplicationLocalDirs(localDirs,
+          appIdStr);
 
       if (!dirsHandler.areDisksHealthy()) {
         ret = ContainerExitStatus.DISKS_FAILED;
@@ -119,6 +122,8 @@ public class ContainerRelaunch extends ContainerLaunch {
           .setUserLocalDirs(userLocalDirs)
           .setContainerLocalDirs(containerLocalDirs)
           .setContainerLogDirs(containerLogDirs)
+          .setUserFilecacheDirs(userFilecacheDirs)
+          .setApplicationLocalDirs(applicationLocalDirs)
           .build());
     } catch (ConfigurationException e) {
       LOG.error("Failed to launch container due to configuration error.", e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/456705a0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 601c32c..f95642b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -769,16 +769,17 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     @SuppressWarnings("unchecked")
     List<String> filecacheDirs = ctx.getExecutionAttribute(FILECACHE_DIRS);
     @SuppressWarnings("unchecked")
-    List<String> containerLocalDirs = ctx.getExecutionAttribute(
-        CONTAINER_LOCAL_DIRS);
-    @SuppressWarnings("unchecked")
     List<String> containerLogDirs = ctx.getExecutionAttribute(
         CONTAINER_LOG_DIRS);
     @SuppressWarnings("unchecked")
+    List<String> userFilecacheDirs =
+        ctx.getExecutionAttribute(USER_FILECACHE_DIRS);
+    @SuppressWarnings("unchecked")
+    List<String> applicationLocalDirs =
+        ctx.getExecutionAttribute(APPLICATION_LOCAL_DIRS);
+    @SuppressWarnings("unchecked")
     Map<Path, List<String>> localizedResources = ctx.getExecutionAttribute(
         LOCALIZED_RESOURCES);
-    @SuppressWarnings("unchecked")
-    List<String> userLocalDirs = ctx.getExecutionAttribute(USER_LOCAL_DIRS);
 
     @SuppressWarnings("unchecked")
     DockerRunCommand runCommand = new DockerRunCommand(containerIdStr,
@@ -789,14 +790,10 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     setHostname(runCommand, containerIdStr, hostname);
     runCommand.setCapabilities(capabilities);
 
-    List<String> allDirs = new ArrayList<>(containerLocalDirs);
-    allDirs.addAll(filecacheDirs);
-    allDirs.add(containerWorkDir.toString());
-    allDirs.addAll(containerLogDirs);
-    allDirs.addAll(userLocalDirs);
-    for (String dir: allDirs) {
-      runCommand.addMountLocation(dir, dir, true);
-    }
+    runCommand.addAllReadWriteMountLocations(containerLogDirs);
+    runCommand.addAllReadWriteMountLocations(applicationLocalDirs);
+    runCommand.addAllReadOnlyMountLocations(filecacheDirs);
+    runCommand.addAllReadOnlyMountLocations(userFilecacheDirs);
 
     if (environment.containsKey(ENV_DOCKER_CONTAINER_LOCAL_RESOURCE_MOUNTS)) {
       String mounts = environment.get(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/456705a0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java
index 3a47523..579e03b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java
@@ -70,6 +70,10 @@ public final class LinuxContainerRuntimeConstants {
       List.class, "user_local_dirs");
   public static final Attribute<List> CONTAINER_LOCAL_DIRS = Attribute
       .attribute(List.class, "container_local_dirs");
+  public static final Attribute<List> USER_FILECACHE_DIRS = Attribute
+      .attribute(List.class, "user_filecache_dirs");
+  public static final Attribute<List> APPLICATION_LOCAL_DIRS = Attribute
+      .attribute(List.class, "application_local_dirs");
   public static final Attribute<List> CONTAINER_LOG_DIRS = Attribute.attribute(
       List.class, "container_log_dirs");
   public static final Attribute<String> RESOURCES_OPTIONS = Attribute.attribute(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/456705a0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
index b7e84d7..bfeeaf5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
@@ -77,6 +77,13 @@ public class DockerRunCommand extends DockerCommand {
     return this;
   }
 
+  public DockerRunCommand addAllReadWriteMountLocations(List<String> paths) {
+    for (String dir: paths) {
+      this.addReadWriteMountLocation(dir, dir);
+    }
+    return this;
+  }
+
   public DockerRunCommand addReadOnlyMountLocation(String sourcePath, String
       destinationPath, boolean createSource) {
     boolean sourceExists = new File(sourcePath).exists();
@@ -93,6 +100,13 @@ public class DockerRunCommand extends DockerCommand {
     return this;
   }
 
+  public DockerRunCommand addAllReadOnlyMountLocations(List<String> paths) {
+    for (String dir: paths) {
+      this.addReadOnlyMountLocation(dir, dir);
+    }
+    return this;
+  }
+
   public DockerRunCommand setVolumeDriver(String volumeDriver) {
     super.addCommandArguments("volume-driver", volumeDriver);
     return this;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/456705a0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerStartContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerStartContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerStartContext.java
index 9c5f3a3..ff41572 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerStartContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerStartContext.java
@@ -49,6 +49,8 @@ public final class ContainerStartContext {
   private final List<String> userLocalDirs;
   private final List<String> containerLocalDirs;
   private final List<String> containerLogDirs;
+  private final List<String> userFilecacheDirs;
+  private final List<String> applicationLocalDirs;
 
   public static final class Builder {
     private Container container;
@@ -64,6 +66,8 @@ public final class ContainerStartContext {
     private List<String> userLocalDirs;
     private List<String> containerLocalDirs;
     private List<String> containerLogDirs;
+    private List<String> userFilecacheDirs;
+    private List<String> applicationLocalDirs;
 
     public Builder() {
     }
@@ -135,6 +139,18 @@ public final class ContainerStartContext {
       return this;
     }
 
+    @SuppressWarnings("checkstyle:hiddenfield")
+    public Builder setUserFilecacheDirs(List<String> userFilecacheDirs) {
+      this.userFilecacheDirs = userFilecacheDirs;
+      return this;
+    }
+
+    @SuppressWarnings("checkstyle:hiddenfield")
+    public Builder setApplicationLocalDirs(List<String> applicationLocalDirs) {
+      this.applicationLocalDirs = applicationLocalDirs;
+      return this;
+    }
+
     public ContainerStartContext build() {
       return new ContainerStartContext(this);
     }
@@ -154,6 +170,8 @@ public final class ContainerStartContext {
     this.userLocalDirs = builder.userLocalDirs;
     this.containerLocalDirs = builder.containerLocalDirs;
     this.containerLogDirs = builder.containerLogDirs;
+    this.userFilecacheDirs = builder.userFilecacheDirs;
+    this.applicationLocalDirs = builder.applicationLocalDirs;
   }
 
   public Container getContainer() {
@@ -212,4 +230,12 @@ public final class ContainerStartContext {
     return Collections.unmodifiableList(this
         .containerLogDirs);
   }
+
+  public List<String> getUserFilecacheDirs() {
+    return Collections.unmodifiableList(this.userFilecacheDirs);
+  }
+
+  public List<String> getApplicationLocalDirs() {
+    return Collections.unmodifiableList(this.applicationLocalDirs);
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/456705a0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
index 0632482..e7a333e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java
@@ -209,6 +209,8 @@ public class TestLinuxContainerExecutorWithMocks {
         .setUserLocalDirs(new ArrayList<>())
         .setContainerLocalDirs(new ArrayList<>())
         .setContainerLogDirs(new ArrayList<>())
+        .setUserFilecacheDirs(new ArrayList<>())
+        .setApplicationLocalDirs(new ArrayList<>())
         .build());
     assertEquals(0, ret);
     assertEquals(Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,
@@ -398,6 +400,8 @@ public class TestLinuxContainerExecutorWithMocks {
             .setUserLocalDirs(new ArrayList<>())
             .setContainerLocalDirs(new ArrayList<>())
             .setContainerLogDirs(new ArrayList<>())
+            .setUserFilecacheDirs(new ArrayList<>())
+            .setApplicationLocalDirs(new ArrayList<>())
             .build());
 
         Assert.assertNotSame(0, ret);
@@ -611,6 +615,8 @@ public class TestLinuxContainerExecutorWithMocks {
         .setUserLocalDirs(new ArrayList<>())
         .setContainerLocalDirs(new ArrayList<>())
         .setContainerLogDirs(new ArrayList<>())
+        .setUserFilecacheDirs(new ArrayList<>())
+        .setApplicationLocalDirs(new ArrayList<>())
         .build());
     lce.deleteAsUser(new DeletionAsUserContext.Builder()
         .setUser(appSubmitter)
@@ -665,6 +671,8 @@ public class TestLinuxContainerExecutorWithMocks {
         .setUserLocalDirs(new ArrayList<>())
         .setContainerLocalDirs(new ArrayList<>())
         .setContainerLogDirs(new ArrayList<>())
+        .setUserFilecacheDirs(new ArrayList<>())
+        .setApplicationLocalDirs(new ArrayList<>())
         .build());
     ArgumentCaptor<PrivilegedOperation> opCaptor = ArgumentCaptor.forClass(
         PrivilegedOperation.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/456705a0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerRelaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerRelaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerRelaunch.java
index 95f706c..f3dd972 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerRelaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerRelaunch.java
@@ -93,5 +93,7 @@ public class TestContainerRelaunch {
     assertNotNull("tokens path null", csc.getNmPrivateTokensPath());
     assertNotNull("user null", csc.getUser());
     assertNotNull("user local dirs null", csc.getUserLocalDirs());
+    assertNotNull("user filecache dirs null", csc.getUserFilecacheDirs());
+    assertNotNull("application local dirs null", csc.getApplicationLocalDirs());
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/456705a0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index fe4e238..2015ab0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -24,7 +24,6 @@ import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -64,19 +63,16 @@ import java.nio.charset.Charset;
 import java.nio.file.Files;
 import java.nio.file.Paths;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
-import java.util.Set;
 
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.APPID;
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.APPLICATION_LOCAL_DIRS;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_ID_STR;
-import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_LOCAL_DIRS;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_LOG_DIRS;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.CONTAINER_WORK_DIR;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.FILECACHE_DIRS;
@@ -91,7 +87,7 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.RUN_AS_USER;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.SIGNAL;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.USER;
-import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.USER_LOCAL_DIRS;
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.USER_FILECACHE_DIRS;
 import static org.mockito.Mockito.any;
 import static org.mockito.Mockito.anyBoolean;
 import static org.mockito.Mockito.anyList;
@@ -109,7 +105,6 @@ public class TestDockerContainerRuntime {
   private PrivilegedOperationExecutor mockExecutor;
   private CGroupsHandler mockCGroupsHandler;
   private String containerId;
-  private String defaultHostname;
   private Container container;
   private ContainerId cId;
   private ContainerLaunchContext context;
@@ -128,8 +123,8 @@ public class TestDockerContainerRuntime {
   private List<String> localDirs;
   private List<String> logDirs;
   private List<String> filecacheDirs;
-  private List<String> userLocalDirs;
-  private List<String> containerLocalDirs;
+  private List<String> userFilecacheDirs;
+  private List<String> applicationLocalDirs;
   private List<String> containerLogDirs;
   private Map<Path, List<String>> localizedResources;
   private String resourcesOptions;
@@ -151,7 +146,6 @@ public class TestDockerContainerRuntime {
         .mock(PrivilegedOperationExecutor.class);
     mockCGroupsHandler = Mockito.mock(CGroupsHandler.class);
     containerId = "container_id";
-    defaultHostname = RegistryPathUtils.encodeYarnID(containerId);
     container = mock(Container.class);
     cId = mock(ContainerId.class);
     context = mock(ContainerLaunchContext.class);
@@ -208,16 +202,16 @@ public class TestDockerContainerRuntime {
     logDirs = new ArrayList<>();
     filecacheDirs = new ArrayList<>();
     resourcesOptions = "cgroups=none";
-    userLocalDirs = new ArrayList<>();
-    containerLocalDirs = new ArrayList<>();
+    userFilecacheDirs = new ArrayList<>();
+    applicationLocalDirs = new ArrayList<>();
     containerLogDirs = new ArrayList<>();
     localizedResources = new HashMap<>();
 
     localDirs.add("/test_local_dir");
     logDirs.add("/test_log_dir");
     filecacheDirs.add("/test_filecache_dir");
-    userLocalDirs.add("/test_user_local_dir");
-    containerLocalDirs.add("/test_container_local_dir");
+    userFilecacheDirs.add("/test_user_filecache_dir");
+    applicationLocalDirs.add("/test_application_local_dir");
     containerLogDirs.add("/test_container_log_dir");
     localizedResources.put(new Path("/test_local_dir/test_resource_file"),
         Collections.singletonList("test_dir/test_resource_file"));
@@ -241,8 +235,8 @@ public class TestDockerContainerRuntime {
         .setExecutionAttribute(LOCAL_DIRS, localDirs)
         .setExecutionAttribute(LOG_DIRS, logDirs)
         .setExecutionAttribute(FILECACHE_DIRS, filecacheDirs)
-        .setExecutionAttribute(USER_LOCAL_DIRS, userLocalDirs)
-        .setExecutionAttribute(CONTAINER_LOCAL_DIRS, containerLocalDirs)
+        .setExecutionAttribute(USER_FILECACHE_DIRS, userFilecacheDirs)
+        .setExecutionAttribute(APPLICATION_LOCAL_DIRS, applicationLocalDirs)
         .setExecutionAttribute(CONTAINER_LOG_DIRS, containerLogDirs)
         .setExecutionAttribute(LOCALIZED_RESOURCES, localizedResources)
         .setExecutionAttribute(RESOURCES_OPTIONS, resourcesOptions);
@@ -296,41 +290,28 @@ public class TestDockerContainerRuntime {
     List<String> args = op.getArguments();
 
     //This invocation of container-executor should use 13 arguments in a
-    // specific order (sigh.)
-    Assert.assertEquals(13, args.size());
-
-    //verify arguments
-    Assert.assertEquals(user, args.get(1));
+    // specific order
+    int expected = 13;
+    int counter = 1;
+    Assert.assertEquals(expected, args.size());
+    Assert.assertEquals(user, args.get(counter++));
     Assert.assertEquals(Integer.toString(PrivilegedOperation.RunAsUserCommand
-        .LAUNCH_DOCKER_CONTAINER.getValue()), args.get(2));
-    Assert.assertEquals(appId, args.get(3));
-    Assert.assertEquals(containerId, args.get(4));
-    Assert.assertEquals(containerWorkDir.toString(), args.get(5));
+        .LAUNCH_DOCKER_CONTAINER.getValue()), args.get(counter++));
+    Assert.assertEquals(appId, args.get(counter++));
+    Assert.assertEquals(containerId, args.get(counter++));
+    Assert.assertEquals(containerWorkDir.toString(), args.get(counter++));
     Assert.assertEquals(nmPrivateContainerScriptPath.toUri()
-        .toString(), args.get(6));
-    Assert.assertEquals(nmPrivateTokensPath.toUri().getPath(), args.get(7));
-    Assert.assertEquals(pidFilePath.toString(), args.get(8));
-    Assert.assertEquals(localDirs.get(0), args.get(9));
-    Assert.assertEquals(logDirs.get(0), args.get(10));
-    Assert.assertEquals(resourcesOptions, args.get(12));
+        .toString(), args.get(counter++));
+    Assert.assertEquals(nmPrivateTokensPath.toUri().getPath(),
+        args.get(counter++));
+    Assert.assertEquals(pidFilePath.toString(), args.get(counter++));
+    Assert.assertEquals(localDirs.get(0), args.get(counter++));
+    Assert.assertEquals(logDirs.get(0), args.get(counter++));
+    Assert.assertEquals(resourcesOptions, args.get(++counter));
 
     return op;
   }
 
-  private String getExpectedTestCapabilitiesArgumentString()  {
-    /* Ordering of capabilities depends on HashSet ordering. */
-    Set<String> capabilitySet = new HashSet<>(Arrays.asList(testCapabilities));
-    StringBuilder expectedCapabilitiesString = new StringBuilder(
-        "--cap-drop=ALL ");
-
-    for(String capability : capabilitySet) {
-      expectedCapabilitiesString.append("--cap-add=").append(capability)
-          .append(" ");
-    }
-
-    return expectedCapabilitiesString.toString();
-  }
-
   @Test
   public void testDockerContainerLaunch()
       throws ContainerExecutionException, PrivilegedOperationException,
@@ -347,7 +328,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
             (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 14;
+    int expected = 15;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -367,16 +348,16 @@ public class TestDockerContainerRuntime {
         dockerCommands.get(counter++));
     Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
     Assert.assertEquals("  net=host", dockerCommands.get(counter++));
+    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
+        + "/test_user_filecache_dir:/test_user_filecache_dir",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
-        "  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
-            + "/test_filecache_dir:/test_filecache_dir,"
-            + "/test_container_work_dir:/test_container_work_dir,"
-            + "/test_container_log_dir:/test_container_log_dir,"
-            + "/test_user_local_dir:/test_user_local_dir",
+        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
+            + "/test_application_local_dir:/test_application_local_dir",
         dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
-        dockerCommands.get(counter++));
+        dockerCommands.get(counter));
   }
 
   @Test
@@ -397,7 +378,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(
         Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
 
-    Assert.assertEquals(14, dockerCommands.size());
+    Assert.assertEquals(15, dockerCommands.size());
     int counter = 0;
     Assert.assertEquals("[docker-command-execution]",
         dockerCommands.get(counter++));
@@ -418,16 +399,16 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
     Assert
         .assertEquals("  net=host", dockerCommands.get(counter++));
+    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
-        "  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
-            + "/test_filecache_dir:/test_filecache_dir,"
-            + "/test_container_work_dir:/test_container_work_dir,"
-            + "/test_container_log_dir:/test_container_log_dir,"
-            + "/test_user_local_dir:/test_user_local_dir",
+        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
+            + "/test_application_local_dir:/test_application_local_dir",
         dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
-        dockerCommands.get(counter++));
+        dockerCommands.get(counter));
   }
 
   @Test
@@ -515,7 +496,7 @@ public class TestDockerContainerRuntime {
     //This is the expected docker invocation for this case
     List<String> dockerCommands = Files
         .readAllLines(Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
-    int expected = 14;
+    int expected = 15;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -537,16 +518,16 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
     Assert
         .assertEquals("  net=" + allowedNetwork, dockerCommands.get(counter++));
+    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
-        "  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
-            + "/test_filecache_dir:/test_filecache_dir,"
-            + "/test_container_work_dir:/test_container_work_dir,"
-            + "/test_container_log_dir:/test_container_log_dir,"
-            + "/test_user_local_dir:/test_user_local_dir",
+        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
+            + "/test_application_local_dir:/test_application_local_dir",
         dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
-        dockerCommands.get(counter++));
+        dockerCommands.get(counter));
   }
 
   @Test
@@ -583,7 +564,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files
         .readAllLines(Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 14;
+    int expected = 15;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -603,16 +584,16 @@ public class TestDockerContainerRuntime {
         dockerCommands.get(counter++));
     Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
     Assert.assertEquals("  net=sdn1", dockerCommands.get(counter++));
+    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
-        "  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
-            + "/test_filecache_dir:/test_filecache_dir,"
-            + "/test_container_work_dir:/test_container_work_dir,"
-            + "/test_container_log_dir:/test_container_log_dir,"
-            + "/test_user_local_dir:/test_user_local_dir",
+        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
+            + "/test_application_local_dir:/test_application_local_dir",
         dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
-        dockerCommands.get(counter++));
+        dockerCommands.get(counter));
 
     //now set an explicit (non-default) allowedNetwork and ensure that it is
     // used.
@@ -649,16 +630,16 @@ public class TestDockerContainerRuntime {
 
     Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
     Assert.assertEquals("  net=sdn2", dockerCommands.get(counter++));
+    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
-        "  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
-            + "/test_filecache_dir:/test_filecache_dir,"
-            + "/test_container_work_dir:/test_container_work_dir,"
-            + "/test_container_log_dir:/test_container_log_dir,"
-            + "/test_user_local_dir:/test_user_local_dir",
+        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
+            + "/test_application_local_dir:/test_application_local_dir",
         dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
-        dockerCommands.get(counter++));
+        dockerCommands.get(counter));
 
 
     //disallowed network should trigger a launch failure
@@ -677,7 +658,7 @@ public class TestDockerContainerRuntime {
   @Test
   public void testLaunchPidNamespaceContainersInvalidEnvVar()
       throws ContainerExecutionException, PrivilegedOperationException,
-      IOException{
+      IOException {
     DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
         mockExecutor, mockCGroupsHandler);
     runtime.initialize(conf, null);
@@ -693,7 +674,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
         (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 14;
+    int expected = 15;
     Assert.assertEquals(expected, dockerCommands.size());
 
     String command = dockerCommands.get(0);
@@ -724,7 +705,7 @@ public class TestDockerContainerRuntime {
   @Test
   public void testLaunchPidNamespaceContainersEnabled()
       throws ContainerExecutionException, PrivilegedOperationException,
-      IOException{
+      IOException {
     //Enable host pid namespace containers.
     conf.setBoolean(YarnConfiguration.NM_DOCKER_ALLOW_HOST_PID_NAMESPACE,
         true);
@@ -744,7 +725,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(
         Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 15;
+    int expected = 16;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -765,22 +746,22 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
     Assert.assertEquals("  net=host", dockerCommands.get(counter++));
     Assert.assertEquals("  pid=host", dockerCommands.get(counter++));
+    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
-        "  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
-            + "/test_filecache_dir:/test_filecache_dir,"
-            + "/test_container_work_dir:/test_container_work_dir,"
-            + "/test_container_log_dir:/test_container_log_dir,"
-            + "/test_user_local_dir:/test_user_local_dir",
+        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
+            + "/test_application_local_dir:/test_application_local_dir",
         dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
-        dockerCommands.get(counter++));
+        dockerCommands.get(counter));
   }
 
   @Test
   public void testLaunchPrivilegedContainersInvalidEnvVar()
       throws ContainerExecutionException, PrivilegedOperationException,
-      IOException{
+      IOException {
     DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
         mockExecutor, mockCGroupsHandler);
     runtime.initialize(conf, null);
@@ -796,7 +777,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(
         Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 14;
+    int expected = 15;
     Assert.assertEquals(expected, dockerCommands.size());
 
     String command = dockerCommands.get(0);
@@ -808,8 +789,7 @@ public class TestDockerContainerRuntime {
 
   @Test
   public void testLaunchPrivilegedContainersWithDisabledSetting()
-      throws ContainerExecutionException, PrivilegedOperationException,
-      IOException{
+      throws ContainerExecutionException {
     DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
         mockExecutor, mockCGroupsHandler);
     runtime.initialize(conf, null);
@@ -827,8 +807,7 @@ public class TestDockerContainerRuntime {
 
   @Test
   public void testLaunchPrivilegedContainersWithEnabledSettingAndDefaultACL()
-      throws ContainerExecutionException, PrivilegedOperationException,
-      IOException{
+      throws ContainerExecutionException {
     //Enable privileged containers.
     conf.setBoolean(YarnConfiguration.NM_DOCKER_ALLOW_PRIVILEGED_CONTAINERS,
         true);
@@ -854,8 +833,7 @@ public class TestDockerContainerRuntime {
   @Test
   public void
   testLaunchPrivilegedContainersEnabledAndUserNotInWhitelist()
-      throws ContainerExecutionException, PrivilegedOperationException,
-      IOException{
+      throws ContainerExecutionException {
     //Enable privileged containers.
     conf.setBoolean(YarnConfiguration.NM_DOCKER_ALLOW_PRIVILEGED_CONTAINERS,
         true);
@@ -882,7 +860,7 @@ public class TestDockerContainerRuntime {
   public void
   testLaunchPrivilegedContainersEnabledAndUserInWhitelist()
       throws ContainerExecutionException, PrivilegedOperationException,
-      IOException{
+      IOException {
     //Enable privileged containers.
     conf.setBoolean(YarnConfiguration.NM_DOCKER_ALLOW_PRIVILEGED_CONTAINERS,
         true);
@@ -905,7 +883,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files.readAllLines(Paths.get
         (dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 15;
+    int expected = 16;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -926,16 +904,16 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
     Assert.assertEquals("  net=host", dockerCommands.get(counter++));
     Assert.assertEquals("  privileged=true", dockerCommands.get(counter++));
+    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
-        "  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
-            + "/test_filecache_dir:/test_filecache_dir,"
-            + "/test_container_work_dir:/test_container_work_dir,"
-            + "/test_container_log_dir:/test_container_log_dir,"
-            + "/test_user_local_dir:/test_user_local_dir",
+        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
+            + "/test_application_local_dir:/test_application_local_dir",
         dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
-        dockerCommands.get(counter++));
+        dockerCommands.get(counter));
   }
 
   @Test
@@ -985,9 +963,7 @@ public class TestDockerContainerRuntime {
   }
 
   @Test
-  public void testMountSourceOnly()
-      throws ContainerExecutionException, PrivilegedOperationException,
-      IOException{
+  public void testMountSourceOnly() throws ContainerExecutionException {
     DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
         mockExecutor, mockCGroupsHandler);
     runtime.initialize(conf, null);
@@ -1007,7 +983,7 @@ public class TestDockerContainerRuntime {
   @Test
   public void testMountSourceTarget()
       throws ContainerExecutionException, PrivilegedOperationException,
-      IOException{
+      IOException {
     DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
         mockExecutor, mockCGroupsHandler);
     runtime.initialize(conf, null);
@@ -1045,24 +1021,21 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
     Assert.assertEquals("  net=host", dockerCommands.get(counter++));
     Assert.assertEquals(
-        "  ro-mounts=/test_local_dir/test_resource_file:test_mount",
+        "  ro-mounts=/test_filecache_dir:/test_filecache_dir,/"
+            + "test_user_filecache_dir:/test_user_filecache_dir,"
+            + "/test_local_dir/test_resource_file:test_mount",
         dockerCommands.get(counter++));
     Assert.assertEquals(
-        "  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
-            + "/test_filecache_dir:/test_filecache_dir,"
-            + "/test_container_work_dir:/test_container_work_dir,"
-            + "/test_container_log_dir:/test_container_log_dir,"
-            + "/test_user_local_dir:/test_user_local_dir",
+        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
+            + "/test_application_local_dir:/test_application_local_dir",
         dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
-        dockerCommands.get(counter++));
+        dockerCommands.get(counter));
   }
 
   @Test
-  public void testMountInvalid()
-      throws ContainerExecutionException, PrivilegedOperationException,
-      IOException{
+  public void testMountInvalid() throws ContainerExecutionException {
     DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
         mockExecutor, mockCGroupsHandler);
     runtime.initialize(conf, null);
@@ -1082,7 +1055,7 @@ public class TestDockerContainerRuntime {
   @Test
   public void testMountMultiple()
       throws ContainerExecutionException, PrivilegedOperationException,
-      IOException{
+      IOException {
     DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
         mockExecutor, mockCGroupsHandler);
     runtime.initialize(conf, null);
@@ -1121,26 +1094,24 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
     Assert.assertEquals("  net=host", dockerCommands.get(counter++));
     Assert.assertEquals(
-        "  ro-mounts=/test_local_dir/test_resource_file:test_mount1,"
+        "  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir,"
+            + "/test_local_dir/test_resource_file:test_mount1,"
             + "/test_local_dir/test_resource_file:test_mount2",
         dockerCommands.get(counter++));
     Assert.assertEquals(
-        "  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
-            + "/test_filecache_dir:/test_filecache_dir,"
-            + "/test_container_work_dir:/test_container_work_dir,"
-            + "/test_container_log_dir:/test_container_log_dir,"
-            + "/test_user_local_dir:/test_user_local_dir",
+        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
+            + "/test_application_local_dir:/test_application_local_dir",
         dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
-        dockerCommands.get(counter++));
-
+        dockerCommands.get(counter));
   }
 
   @Test
   public void testUserMounts()
       throws ContainerExecutionException, PrivilegedOperationException,
-      IOException{
+      IOException {
     DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
         mockExecutor, mockCGroupsHandler);
     runtime.initialize(conf, null);
@@ -1177,25 +1148,22 @@ public class TestDockerContainerRuntime {
         dockerCommands.get(counter++));
     Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
     Assert.assertEquals("  net=host", dockerCommands.get(counter++));
-    Assert.assertEquals("  ro-mounts=/tmp/foo:/tmp/foo",
+    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir,"
+            + "/tmp/foo:/tmp/foo",
         dockerCommands.get(counter++));
     Assert.assertEquals(
-        "  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
-            + "/test_filecache_dir:/test_filecache_dir,"
-            + "/test_container_work_dir:/test_container_work_dir,"
-            + "/test_container_log_dir:/test_container_log_dir,"
-            + "/test_user_local_dir:/test_user_local_dir,"
+        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
+            + "/test_application_local_dir:/test_application_local_dir,"
             + "/tmp/bar:/tmp/bar",
         dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
-        dockerCommands.get(counter++));
+        dockerCommands.get(counter));
   }
 
   @Test
-  public void testUserMountInvalid()
-      throws ContainerExecutionException, PrivilegedOperationException,
-      IOException{
+  public void testUserMountInvalid() throws ContainerExecutionException {
     DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
         mockExecutor, mockCGroupsHandler);
     runtime.initialize(conf, null);
@@ -1213,9 +1181,7 @@ public class TestDockerContainerRuntime {
   }
 
   @Test
-  public void testUserMountModeInvalid()
-      throws ContainerExecutionException, PrivilegedOperationException,
-      IOException{
+  public void testUserMountModeInvalid() throws ContainerExecutionException {
     DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
         mockExecutor, mockCGroupsHandler);
     runtime.initialize(conf, null);
@@ -1233,9 +1199,7 @@ public class TestDockerContainerRuntime {
   }
 
   @Test
-  public void testUserMountModeNulInvalid()
-      throws ContainerExecutionException, PrivilegedOperationException,
-      IOException{
+  public void testUserMountModeNulInvalid() throws ContainerExecutionException {
     DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
         mockExecutor, mockCGroupsHandler);
     runtime.initialize(conf, null);
@@ -1679,14 +1643,13 @@ public class TestDockerContainerRuntime {
         dockerCommands.get(counter++));
     Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
     Assert.assertEquals("  net=host", dockerCommands.get(counter++));
-    Assert.assertEquals("  ro-mounts=/source/path:/destination/path",
+    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir,"
+            + "/source/path:/destination/path",
         dockerCommands.get(counter++));
     Assert.assertEquals(
-        "  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
-            + "/test_filecache_dir:/test_filecache_dir,"
-            + "/test_container_work_dir:/test_container_work_dir,"
-            + "/test_container_log_dir:/test_container_log_dir,"
-            + "/test_user_local_dir:/test_user_local_dir",
+        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
+            + "/test_application_local_dir:/test_application_local_dir",
         dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
 
@@ -1694,13 +1657,11 @@ public class TestDockerContainerRuntime {
     Assert.assertEquals("  volume-driver=driver-1",
         dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",
-        dockerCommands.get(counter++));
+        dockerCommands.get(counter));
   }
 
   @Test
-  public void testDockerCapabilities()
-      throws ContainerExecutionException, PrivilegedOperationException,
-      IOException {
+  public void testDockerCapabilities() throws ContainerExecutionException {
     DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
         mockExecutor, mockCGroupsHandler);
     try {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

Posted by ae...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47919787/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 58076f5,0000000..65b8726
mode 100644,000000..100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@@ -1,1095 -1,0 +1,1100 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + *  with the License.  You may obtain a copy of the License at
 + *
 + *      http://www.apache.org/licenses/LICENSE-2.0
 + *
 + *  Unless required by applicable law or agreed to in writing, software
 + *  distributed under the License is distributed on an "AS IS" BASIS,
 + *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + *  See the License for the specific language governing permissions and
 + *  limitations under the License.
 + */
 +
 +package org.apache.hadoop.ozone.container.common.impl;
 +
 +import com.google.common.annotations.VisibleForTesting;
 +import com.google.common.base.Preconditions;
 +import org.apache.commons.codec.digest.DigestUtils;
 +import org.apache.commons.io.FileUtils;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
 +import org.apache.hadoop.hdfs.protocol.DatanodeID;
 +import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 +import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
 +import org.apache.hadoop.ozone.container.common.interfaces.*;
 +import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
 +import org.apache.hadoop.scm.container.common.helpers.StorageContainerException;
 +import org.apache.hadoop.util.ReflectionUtils;
 +import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos;
 +import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState;
 +import org.apache.hadoop.ozone.protocol.proto
 +    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
 +import org.apache.hadoop.ozone.protocol.proto
 +    .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
 +import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 +import org.apache.hadoop.io.IOUtils;
 +import org.apache.hadoop.ozone.OzoneConsts;
 +import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 +import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 +import org.apache.hadoop.scm.ScmConfigKeys;
 +import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 +import org.apache.hadoop.utils.MetadataKeyFilters;
 +import org.apache.hadoop.utils.MetadataStore;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import java.io.File;
 +import java.io.FileInputStream;
 +import java.io.FileOutputStream;
 +import java.io.FilenameFilter;
 +import java.io.IOException;
 +import java.nio.file.Path;
 +import java.nio.file.Paths;
 +import java.security.DigestInputStream;
 +import java.security.DigestOutputStream;
 +import java.security.MessageDigest;
 +import java.security.NoSuchAlgorithmException;
 +import java.util.LinkedList;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.concurrent.ConcurrentNavigableMap;
 +import java.util.concurrent.ConcurrentSkipListMap;
 +import java.util.concurrent.locks.ReentrantReadWriteLock;
 +import java.util.stream.Collectors;
 +
 +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 +import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 +    .Result.CONTAINER_EXISTS;
 +import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 +    .Result.CONTAINER_INTERNAL_ERROR;
 +import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 +    .Result.CONTAINER_NOT_FOUND;
 +import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 +    .Result.INVALID_CONFIG;
 +import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 +    .Result.IO_EXCEPTION;
 +import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 +    .Result.NO_SUCH_ALGORITHM;
 +import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 +    .Result.UNABLE_TO_READ_METADATA_DB;
 +import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 +    .Result.UNSUPPORTED_REQUEST;
 +import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 +   .Result.ERROR_IN_COMPACT_DB;
 +import static org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 +    .Result.UNCLOSED_CONTAINER_IO;
 +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
 +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_META;
 +
 +/**
 + * A Generic ContainerManagerImpl that will be called from Ozone
 + * ContainerManagerImpl. This allows us to support delta changes to ozone
 + * version without having to rewrite the containerManager.
 + */
 +public class ContainerManagerImpl implements ContainerManager {
 +  static final Logger LOG =
 +      LoggerFactory.getLogger(ContainerManagerImpl.class);
 +
 +  private final ConcurrentSkipListMap<String, ContainerStatus>
 +      containerMap = new ConcurrentSkipListMap<>();
 +
 +  // Use a non-fair RW lock for better throughput, we may revisit this decision
 +  // if this causes fairness issues.
 +  private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
 +  private ContainerLocationManager locationManager;
 +  private ChunkManager chunkManager;
 +  private KeyManager keyManager;
 +  private Configuration conf;
 +  private DatanodeID datanodeID;
 +
 +  private ContainerDeletionChoosingPolicy containerDeletionChooser;
 +  private ContainerReportManager containerReportManager;
 +
 +  /**
 +   * Init call that sets up a container Manager.
 +   *
 +   * @param config - Configuration.
 +   * @param containerDirs - List of Metadata Container locations.
 +   * @param datanode - Datanode ID.
 +   * @throws IOException
 +   */
 +  @Override
 +  public void init(
 +      Configuration config, List<StorageLocation> containerDirs,
 +      DatanodeID datanode) throws IOException {
 +    Preconditions.checkNotNull(config, "Config must not be null");
 +    Preconditions.checkNotNull(containerDirs, "Container directories cannot " +
 +        "be null");
 +    Preconditions.checkNotNull(datanode, "Datanode ID cannot " +
 +        "be null");
 +
 +    Preconditions.checkState(containerDirs.size() > 0, "Number of container" +
 +        " directories must be greater than zero.");
 +
 +    this.conf = config;
 +    this.datanodeID = datanode;
 +
 +    readLock();
 +    try {
 +      containerDeletionChooser = ReflectionUtils.newInstance(conf.getClass(
 +          ScmConfigKeys.OZONE_SCM_CONTAINER_DELETION_CHOOSING_POLICY,
 +          TopNOrderedContainerDeletionChoosingPolicy.class,
 +          ContainerDeletionChoosingPolicy.class), conf);
 +
 +      for (StorageLocation path : containerDirs) {
 +        File directory = Paths.get(path.getNormalizedUri()).toFile();
 +        if (!directory.exists() && !directory.mkdirs()) {
 +          LOG.error("Container metadata directory doesn't exist "
 +              + "and cannot be created. Path: {}", path.toString());
 +          throw new StorageContainerException("Container metadata "
 +              + "directory doesn't exist and cannot be created " + path
 +              .toString(), INVALID_CONFIG);
 +        }
 +
 +        // TODO: This will fail if any directory is invalid.
 +        // We should fix this to handle invalid directories and continue.
 +        // Leaving it this way to fail fast for time being.
 +        if (!directory.isDirectory()) {
 +          LOG.error("Invalid path to container metadata directory. path: {}",
 +              path.toString());
 +          throw new StorageContainerException("Invalid path to container " +
 +              "metadata directory." + path, INVALID_CONFIG);
 +        }
 +        LOG.info("Loading containers under {}", path);
 +        File[] files = directory.listFiles(new ContainerFilter());
 +        if (files != null) {
 +          for (File containerFile : files) {
 +            LOG.debug("Loading container {}", containerFile);
 +            String containerPath =
 +                ContainerUtils.getContainerNameFromFile(containerFile);
 +            Preconditions.checkNotNull(containerPath, "Container path cannot" +
 +                " be null");
 +            readContainerInfo(containerPath);
 +          }
 +        }
 +      }
 +
 +      List<StorageLocation> dataDirs = new LinkedList<>();
 +      for (String dir : config.getStrings(DFS_DATANODE_DATA_DIR_KEY)) {
 +        StorageLocation location = StorageLocation.parse(dir);
 +        dataDirs.add(location);
 +      }
 +      this.locationManager =
 +          new ContainerLocationManagerImpl(containerDirs, dataDirs, config);
 +
 +      this.containerReportManager =
 +          new ContainerReportManagerImpl(config);
 +    } finally {
 +      readUnlock();
 +    }
 +  }
 +
 +  /**
 +   * Reads the Container Info from a file and verifies that checksum match. If
 +   * the checksums match, then that file is added to containerMap.
 +   *
 +   * @param containerName - Name which points to the persisted container.
 +   * @throws StorageContainerException
 +   */
 +  private void readContainerInfo(String containerName)
 +      throws StorageContainerException {
 +    Preconditions.checkState(containerName.length() > 0,
 +        "Container name length cannot be zero.");
 +    FileInputStream containerStream = null;
 +    DigestInputStream dis = null;
 +    FileInputStream metaStream = null;
 +    Path cPath = Paths.get(containerName).getFileName();
 +    String keyName = null;
 +    if (cPath != null) {
 +      keyName = cPath.toString();
 +    }
 +    Preconditions.checkNotNull(keyName,
 +        "Container Name  to container key mapping is null");
 +
 +    try {
 +      String containerFileName = containerName.concat(CONTAINER_EXTENSION);
 +      String metaFileName = containerName.concat(CONTAINER_META);
 +
 +      containerStream = new FileInputStream(containerFileName);
 +
 +      metaStream = new FileInputStream(metaFileName);
 +
 +      MessageDigest sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
 +
 +      dis = new DigestInputStream(containerStream, sha);
 +
 +      ContainerProtos.ContainerData containerDataProto =
 +          ContainerProtos.ContainerData.parseDelimitedFrom(dis);
 +      ContainerData containerData;
 +      if (containerDataProto == null) {
 +        // Sometimes container metadata might have been created but empty,
 +        // when loading the info we get a null, this often means last time
 +        // SCM was ending up at some middle phase causing that the metadata
 +        // was not populated. Such containers are marked as inactive.
 +        containerMap.put(keyName, new ContainerStatus(null));
 +        return;
 +      }
 +      containerData = ContainerData.getFromProtBuf(containerDataProto, conf);
 +      ContainerProtos.ContainerMeta meta =
 +          ContainerProtos.ContainerMeta.parseDelimitedFrom(metaStream);
 +      if (meta != null && !DigestUtils.sha256Hex(sha.digest())
 +          .equals(meta.getHash())) {
 +        // This means we were not able read data from the disk when booted the
 +        // datanode. We are going to rely on SCM understanding that we don't
 +        // have valid data for this container when we send container reports.
 +        // Hopefully SCM will ask us to delete this container and rebuild it.
 +        LOG.error("Invalid SHA found for container data. Name :{}"
 +            + "cowardly refusing to read invalid data", containerName);
 +        containerMap.put(keyName, new ContainerStatus(null));
 +        return;
 +      }
 +
 +      ContainerStatus containerStatus = new ContainerStatus(containerData);
 +      // Initialize pending deletion blocks count in in-memory
 +      // container status.
 +      MetadataStore metadata = KeyUtils.getDB(containerData, conf);
 +      List<Map.Entry<byte[], byte[]>> underDeletionBlocks = metadata
 +          .getSequentialRangeKVs(null, Integer.MAX_VALUE,
 +              MetadataKeyFilters.getDeletingKeyFilter());
 +      containerStatus.incrPendingDeletionBlocks(underDeletionBlocks.size());
 +
 +      List<Map.Entry<byte[], byte[]>> liveKeys = metadata
 +          .getRangeKVs(null, Integer.MAX_VALUE,
 +              MetadataKeyFilters.getNormalKeyFilter());
 +
 +      // Get container bytesUsed upon loading container
 +      // The in-memory state is updated upon key write or delete
 +      // TODO: update containerDataProto and persist it into container MetaFile
 +      long bytesUsed = 0;
 +      bytesUsed = liveKeys.parallelStream().mapToLong(e-> {
 +        KeyData keyData;
 +        try {
 +          keyData = KeyUtils.getKeyData(e.getValue());
 +          return keyData.getSize();
 +        } catch (IOException ex) {
 +          return 0L;
 +        }
 +      }).sum();
 +      containerStatus.setBytesUsed(bytesUsed);
 +
 +      containerMap.put(keyName, containerStatus);
 +    } catch (IOException | NoSuchAlgorithmException ex) {
 +      LOG.error("read failed for file: {} ex: {}", containerName,
 +          ex.getMessage());
 +
 +      // TODO : Add this file to a recovery Queue.
 +
 +      // Remember that this container is busted and we cannot use it.
 +      containerMap.put(keyName, new ContainerStatus(null));
 +      throw new StorageContainerException("Unable to read container info",
 +          UNABLE_TO_READ_METADATA_DB);
 +    } finally {
 +      IOUtils.closeStream(dis);
 +      IOUtils.closeStream(containerStream);
 +      IOUtils.closeStream(metaStream);
 +    }
 +  }
 +
 +  /**
 +   * Creates a container with the given name.
 +   *
 +   * @param pipeline -- Nodes which make up this container.
 +   * @param containerData - Container Name and metadata.
 +   * @throws StorageContainerException - Exception
 +   */
 +  @Override
 +  public void createContainer(Pipeline pipeline, ContainerData containerData)
 +      throws StorageContainerException {
 +    Preconditions.checkNotNull(containerData, "Container data cannot be null");
 +    writeLock();
 +    try {
 +      if (containerMap.containsKey(containerData.getName())) {
 +        LOG.debug("container already exists. {}", containerData.getName());
 +        throw new StorageContainerException("container already exists.",
 +            CONTAINER_EXISTS);
 +      }
 +
 +      // This is by design. We first write and close the
 +      // container Info and metadata to a directory.
 +      // Then read back and put that info into the containerMap.
 +      // This allows us to make sure that our write is consistent.
 +
 +      writeContainerInfo(containerData, false);
 +      File cFile = new File(containerData.getContainerPath());
 +      readContainerInfo(ContainerUtils.getContainerNameFromFile(cFile));
 +    } catch (NoSuchAlgorithmException ex) {
 +      LOG.error("Internal error: We seem to be running a JVM without a " +
 +          "needed hash algorithm.");
 +      throw new StorageContainerException("failed to create container",
 +          NO_SUCH_ALGORITHM);
 +    } finally {
 +      writeUnlock();
 +    }
 +
 +  }
 +
 +  /**
 +   * Writes a container to a chosen location and updates the container Map.
 +   *
 +   * The file formats of ContainerData and Container Meta is the following.
 +   *
 +   * message ContainerData {
 +   * required string name = 1;
 +   * repeated KeyValue metadata = 2;
 +   * optional string dbPath = 3;
 +   * optional string containerPath = 4;
 +   * optional int64 bytesUsed = 5;
 +   * optional int64 size = 6;
 +   * }
 +   *
 +   * message ContainerMeta {
 +   * required string fileName = 1;
 +   * required string hash = 2;
 +   * }
 +   *
 +   * @param containerData - container Data
 +   * @param overwrite - Whether we are overwriting.
 +   * @throws StorageContainerException, NoSuchAlgorithmException
 +   */
 +  private void writeContainerInfo(ContainerData containerData,
 +      boolean  overwrite)
 +      throws StorageContainerException, NoSuchAlgorithmException {
 +
 +    Preconditions.checkNotNull(this.locationManager,
 +        "Internal error: location manager cannot be null");
 +
 +    FileOutputStream containerStream = null;
 +    DigestOutputStream dos = null;
 +    FileOutputStream metaStream = null;
 +
 +    try {
 +      Path metadataPath = null;
 +      Path location = (!overwrite) ? locationManager.getContainerPath():
 +          Paths.get(containerData.getContainerPath()).getParent();
 +      if (location == null) {
 +        throw new StorageContainerException(
 +            "Failed to get container file path.",
 +            CONTAINER_INTERNAL_ERROR);
 +      }
 +
 +      File containerFile = ContainerUtils.getContainerFile(containerData,
 +          location);
 +      File metadataFile = ContainerUtils.getMetadataFile(containerData,
 +          location);
 +      String containerName = containerData.getContainerName();
 +
 +      if(!overwrite) {
 +        ContainerUtils.verifyIsNewContainer(containerFile, metadataFile);
 +        metadataPath = this.locationManager.getDataPath(containerName);
 +        metadataPath = ContainerUtils.createMetadata(metadataPath,
 +            containerName, conf);
 +      }  else {
 +        metadataPath = ContainerUtils.getMetadataDirectory(containerData);
 +      }
 +
 +      containerStream = new FileOutputStream(containerFile);
 +      metaStream = new FileOutputStream(metadataFile);
 +      MessageDigest sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
 +
 +      dos = new DigestOutputStream(containerStream, sha);
 +      containerData.setDBPath(metadataPath.resolve(
 +          ContainerUtils.getContainerDbFileName(containerName))
 +          .toString());
 +      containerData.setContainerPath(containerFile.toString());
 +
 +      ContainerProtos.ContainerData protoData = containerData
 +          .getProtoBufMessage();
 +      protoData.writeDelimitedTo(dos);
 +
 +      ContainerProtos.ContainerMeta protoMeta = ContainerProtos
 +          .ContainerMeta.newBuilder()
 +          .setFileName(containerFile.toString())
 +          .setHash(DigestUtils.sha256Hex(sha.digest()))
 +          .build();
 +      protoMeta.writeDelimitedTo(metaStream);
 +
 +    } catch (IOException ex) {
 +      // TODO : we need to clean up partially constructed files
 +      // The proper way to do would be for a thread
 +      // to read all these 3 artifacts and make sure they are
 +      // sane. That info needs to come from the replication
 +      // pipeline, and if not consistent delete these file.
 +
 +      // In case of ozone this is *not* a deal breaker since
 +      // SCM is guaranteed to generate unique container names.
 +      // The saving grace is that we check if we have residue files
 +      // lying around when creating a new container. We need to queue
 +      // this information to a cleaner thread.
 +
 +      LOG.error("Creation of container failed. Name: {}, we might need to " +
 +              "cleanup partially created artifacts. ",
 +          containerData.getContainerName(), ex);
 +      throw new StorageContainerException("Container creation failed. ",
 +          ex, CONTAINER_INTERNAL_ERROR);
 +    } finally {
 +      IOUtils.closeStream(dos);
 +      IOUtils.closeStream(containerStream);
 +      IOUtils.closeStream(metaStream);
 +    }
 +  }
 +
 +  /**
 +   * Deletes an existing container.
 +   *
 +   * @param pipeline - nodes that make this container.
 +   * @param containerName - name of the container.
 +   * @param forceDelete - whether this container should be deleted forcibly.
 +   * @throws StorageContainerException
 +   */
 +  @Override
 +  public void deleteContainer(Pipeline pipeline, String containerName,
 +      boolean forceDelete) throws StorageContainerException {
 +    Preconditions.checkNotNull(containerName, "Container name cannot be null");
 +    Preconditions.checkState(containerName.length() > 0,
 +        "Container name length cannot be zero.");
 +    writeLock();
 +    try {
 +      if (isOpen(pipeline.getContainerName())) {
 +        throw new StorageContainerException(
 +            "Deleting an open container is not allowed.",
 +            UNCLOSED_CONTAINER_IO);
 +      }
 +
 +      ContainerStatus status = containerMap.get(containerName);
 +      if (status == null) {
 +        LOG.debug("No such container. Name: {}", containerName);
 +        throw new StorageContainerException("No such container. Name : " +
 +            containerName, CONTAINER_NOT_FOUND);
 +      }
 +      if (status.getContainer() == null) {
 +        LOG.debug("Invalid container data. Name: {}", containerName);
 +        throw new StorageContainerException("Invalid container data. Name : " +
 +            containerName, CONTAINER_NOT_FOUND);
 +      }
 +      ContainerUtils.removeContainer(status.getContainer(), conf, forceDelete);
 +      containerMap.remove(containerName);
 +    } catch (StorageContainerException e) {
 +      throw e;
 +    } catch (IOException e) {
 +      // TODO : An I/O error during delete can leave partial artifacts on the
 +      // disk. We will need the cleaner thread to cleanup this information.
 +      LOG.error("Failed to cleanup container. Name: {}", containerName, e);
 +      throw new StorageContainerException(containerName, e, IO_EXCEPTION);
 +    } finally {
 +      writeUnlock();
 +    }
 +  }
 +
 +  /**
 +   * A simple interface for container Iterations.
 +   * <p/>
 +   * This call make no guarantees about consistency of the data between
 +   * different list calls. It just returns the best known data at that point of
 +   * time. It is possible that using this iteration you can miss certain
 +   * container from the listing.
 +   *
 +   * @param prefix -  Return keys that match this prefix.
 +   * @param count - how many to return
 +   * @param prevKey - Previous Key Value or empty String.
 +   * @param data - Actual containerData
 +   * @throws StorageContainerException
 +   */
 +  @Override
 +  public void listContainer(String prefix, long count, String prevKey,
 +      List<ContainerData> data) throws StorageContainerException {
 +    // TODO : Support list with Prefix and PrevKey
 +    Preconditions.checkNotNull(data,
 +        "Internal assertion: data cannot be null");
 +    readLock();
 +    try {
 +      ConcurrentNavigableMap<String, ContainerStatus> map;
 +      if (prevKey == null || prevKey.isEmpty()) {
 +        map = containerMap.tailMap(containerMap.firstKey(), true);
 +      } else {
 +        map = containerMap.tailMap(prevKey, false);
 +      }
 +
 +      int currentCount = 0;
 +      for (ContainerStatus entry : map.values()) {
 +        if (currentCount < count) {
 +          data.add(entry.getContainer());
 +          currentCount++;
 +        } else {
 +          return;
 +        }
 +      }
 +    } finally {
 +      readUnlock();
 +    }
 +  }
 +
 +  /**
 +   * Get metadata about a specific container.
 +   *
 +   * @param containerName - Name of the container
 +   * @return ContainerData - Container Data.
 +   * @throws StorageContainerException
 +   */
 +  @Override
 +  public ContainerData readContainer(String containerName) throws
 +      StorageContainerException {
 +    Preconditions.checkNotNull(containerName, "Container name cannot be null");
 +    Preconditions.checkState(containerName.length() > 0,
 +        "Container name length cannot be zero.");
 +    if (!containerMap.containsKey(containerName)) {
 +      throw new StorageContainerException("Unable to find the container. Name: "
 +          + containerName, CONTAINER_NOT_FOUND);
 +    }
 +    ContainerData cData = containerMap.get(containerName).getContainer();
 +    if (cData == null) {
 +      throw new StorageContainerException("Invalid container data. Name: "
 +          + containerName, CONTAINER_INTERNAL_ERROR);
 +    }
 +    return cData;
 +  }
 +
 +  /**
 +   * Closes a open container, if it is already closed or does not exist a
 +   * StorageContainerException is thrown.
 +   *
 +   * @param containerName - Name of the container.
 +   * @throws StorageContainerException
 +   */
 +  @Override
 +  public void closeContainer(String containerName)
 +      throws StorageContainerException, NoSuchAlgorithmException {
 +    ContainerData containerData = readContainer(containerName);
 +    containerData.closeContainer();
 +    writeContainerInfo(containerData, true);
 +    MetadataStore db = KeyUtils.getDB(containerData, conf);
 +
 +    // It is ok if this operation takes a bit of time.
 +    // Close container is not expected to be instantaneous.
 +    try {
 +      db.compactDB();
 +    } catch (IOException e) {
 +      LOG.error("Error in DB compaction while closing container", e);
 +      throw new StorageContainerException(e, ERROR_IN_COMPACT_DB);
 +    }
 +
 +    // Active is different from closed. Closed means it is immutable, active
 +    // false means we have some internal error that is happening to this
 +    // container. This is a way to track damaged containers if we have an
 +    // I/O failure, this allows us to take quick action in case of container
 +    // issues.
 +
 +    ContainerStatus status = new ContainerStatus(containerData);
 +    containerMap.put(containerName, status);
 +  }
 +
 +  @Override
 +  public void updateContainer(Pipeline pipeline, String containerName,
 +      ContainerData data, boolean forceUpdate)
 +      throws StorageContainerException {
 +    Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
 +    Preconditions.checkNotNull(containerName, "Container name cannot be null");
 +    Preconditions.checkNotNull(data, "Container data cannot be null");
 +    FileOutputStream containerStream = null;
 +    DigestOutputStream dos = null;
 +    MessageDigest sha = null;
 +    File containerFileBK = null, containerFile = null;
 +    boolean deleted = false;
 +
 +    if(!containerMap.containsKey(containerName)) {
 +      throw new StorageContainerException("Container doesn't exist. Name :"
 +          + containerName, CONTAINER_NOT_FOUND);
 +    }
 +
 +    try {
 +      sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
 +    } catch (NoSuchAlgorithmException e) {
 +      throw new StorageContainerException("Unable to create Message Digest,"
 +          + " usually this is a java configuration issue.",
 +          NO_SUCH_ALGORITHM);
 +    }
 +
 +    try {
 +      Path location = locationManager.getContainerPath();
 +      ContainerData orgData = containerMap.get(containerName).getContainer();
 +      if (orgData == null) {
 +        // updating a invalid container
 +        throw new StorageContainerException("Update a container with invalid" +
 +            "container meta data", CONTAINER_INTERNAL_ERROR);
 +      }
 +
 +      if (!forceUpdate && !orgData.isOpen()) {
 +        throw new StorageContainerException(
 +            "Update a closed container is not allowed. Name: " + containerName,
 +            UNSUPPORTED_REQUEST);
 +      }
 +
 +      containerFile = ContainerUtils.getContainerFile(orgData, location);
 +      // If forceUpdate is true, there is no need to check
 +      // whether the container file exists.
 +      if (!forceUpdate) {
 +        if (!containerFile.exists() || !containerFile.canWrite()) {
 +          throw new StorageContainerException(
 +              "Container file not exists or corrupted. Name: " + containerName,
 +              CONTAINER_INTERNAL_ERROR);
 +        }
 +
 +        // Backup the container file
 +        containerFileBK = File.createTempFile(
 +            "tmp_" + System.currentTimeMillis() + "_",
 +            containerFile.getName(), containerFile.getParentFile());
 +        FileUtils.copyFile(containerFile, containerFileBK);
 +
 +        deleted = containerFile.delete();
 +        containerStream = new FileOutputStream(containerFile);
 +        dos = new DigestOutputStream(containerStream, sha);
 +
 +        ContainerProtos.ContainerData protoData = data.getProtoBufMessage();
 +        protoData.writeDelimitedTo(dos);
 +      }
 +
 +      // Update the in-memory map
 +      ContainerStatus newStatus = new ContainerStatus(data);
 +      containerMap.replace(containerName, newStatus);
 +    } catch (IOException e) {
 +      // Restore the container file from backup
 +      if(containerFileBK != null && containerFileBK.exists() && deleted) {
 +        if(containerFile.delete()
 +            && containerFileBK.renameTo(containerFile)) {
 +          throw new StorageContainerException("Container update failed,"
 +              + " container data restored from the backup.",
 +              CONTAINER_INTERNAL_ERROR);
 +        } else {
 +          throw new StorageContainerException(
 +              "Failed to restore container data from the backup. Name: "
 +                  + containerName, CONTAINER_INTERNAL_ERROR);
 +        }
 +      } else {
 +        throw new StorageContainerException(
 +            e.getMessage(), CONTAINER_INTERNAL_ERROR);
 +      }
 +    } finally {
 +      if (containerFileBK != null && containerFileBK.exists()) {
 +        if(!containerFileBK.delete()) {
 +          LOG.warn("Unable to delete container file backup : {}.",
 +              containerFileBK.getAbsolutePath());
 +        }
 +      }
 +      IOUtils.closeStream(dos);
 +      IOUtils.closeStream(containerStream);
 +    }
 +  }
 +
 +  @VisibleForTesting
 +  protected File getContainerFile(ContainerData data) throws IOException {
 +    return ContainerUtils.getContainerFile(data,
 +        this.locationManager.getContainerPath());
 +  }
 +
 +  /**
 +   * Checks if a container exists.
 +   *
 +   * @param containerName - Name of the container.
 +   * @return true if the container is open false otherwise.
 +   * @throws StorageContainerException - Throws Exception if we are not able to
 +   *                                   find the container.
 +   */
 +  @Override
 +  public boolean isOpen(String containerName) throws StorageContainerException {
 +    final ContainerStatus status = containerMap.get(containerName);
 +    if (status == null) {
 +      throw new StorageContainerException(
 +          "Container status not found: " + containerName, CONTAINER_NOT_FOUND);
 +    }
 +    final ContainerData cData = status.getContainer();
 +    if (cData == null) {
 +      throw new StorageContainerException(
 +          "Container not found: " + containerName, CONTAINER_NOT_FOUND);
 +    }
 +    return cData.isOpen();
 +  }
 +
 +  /**
 +   * Supports clean shutdown of container.
 +   *
 +   * @throws IOException
 +   */
 +  @Override
 +  public void shutdown() throws IOException {
 +    Preconditions.checkState(this.hasWriteLock(),
 +        "Assumption that we are holding the lock violated.");
 +    this.containerMap.clear();
 +    this.locationManager.shutdown();
 +  }
 +
 +
 +  @VisibleForTesting
 +  public ConcurrentSkipListMap<String, ContainerStatus> getContainerMap() {
 +    return containerMap;
 +  }
 +
 +  /**
 +   * Acquire read lock.
 +   */
 +  @Override
 +  public void readLock() {
 +    this.lock.readLock().lock();
 +
 +  }
 +
++  @Override
++  public void readLockInterruptibly() throws InterruptedException {
++    this.lock.readLock().lockInterruptibly();
++  }
++
 +  /**
 +   * Release read lock.
 +   */
 +  @Override
 +  public void readUnlock() {
 +    this.lock.readLock().unlock();
 +  }
 +
 +  /**
 +   * Check if the current thread holds read lock.
 +   */
 +  @Override
 +  public boolean hasReadLock() {
 +    return this.lock.readLock().tryLock();
 +  }
 +
 +  /**
 +   * Acquire write lock.
 +   */
 +  @Override
 +  public void writeLock() {
 +    this.lock.writeLock().lock();
 +  }
 +
 +  /**
 +   * Acquire write lock, unless interrupted while waiting.
 +   */
 +  @Override
 +  public void writeLockInterruptibly() throws InterruptedException {
 +    this.lock.writeLock().lockInterruptibly();
 +
 +  }
 +
 +  /**
 +   * Release write lock.
 +   */
 +  @Override
 +  public void writeUnlock() {
 +    this.lock.writeLock().unlock();
 +
 +  }
 +
 +  /**
 +   * Check if the current thread holds write lock.
 +   */
 +  @Override
 +  public boolean hasWriteLock() {
 +    return this.lock.writeLock().isHeldByCurrentThread();
 +  }
 +
 +  public ChunkManager getChunkManager() {
 +    return this.chunkManager;
 +  }
 +
 +  /**
 +   * Sets the chunk Manager.
 +   *
 +   * @param chunkManager - Chunk Manager
 +   */
 +  public void setChunkManager(ChunkManager chunkManager) {
 +    this.chunkManager = chunkManager;
 +  }
 +
 +  /**
 +   * Gets the Key Manager.
 +   *
 +   * @return KeyManager.
 +   */
 +  @Override
 +  public KeyManager getKeyManager() {
 +    return this.keyManager;
 +  }
 +
 +  /**
 +   * Get the node report.
 +   * @return node report.
 +   */
 +  @Override
 +  public SCMNodeReport getNodeReport() throws IOException {
 +    StorageLocationReport[] reports = locationManager.getLocationReport();
 +    SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder();
 +    for (int i = 0; i < reports.length; i++) {
 +      SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
 +      nrb.addStorageReport(i, srb.setStorageUuid(reports[i].getId())
 +          .setCapacity(reports[i].getCapacity())
 +          .setScmUsed(reports[i].getScmUsed())
 +          .setRemaining(reports[i].getRemaining())
 +          .build());
 +    }
 +    return nrb.build();
 +  }
 +
 +
 +  /**
 +   * Gets container reports.
 +   *
 +   * @return List of all closed containers.
 +   * @throws IOException
 +   */
 +  @Override
 +  public List<ContainerData> getContainerReports() throws IOException {
 +    LOG.debug("Starting container report iteration.");
 +    // No need for locking since containerMap is a ConcurrentSkipListMap
 +    // And we can never get the exact state since close might happen
 +    // after we iterate a point.
 +    return containerMap.entrySet().stream()
 +        .filter(containerStatus ->
 +            !containerStatus.getValue().getContainer().isOpen())
 +        .map(containerStatus -> containerStatus.getValue().getContainer())
 +        .collect(Collectors.toList());
 +  }
 +
 +  /**
 +   * Get container report.
 +   *
 +   * @return The container report.
 +   * @throws IOException
 +   */
 +  @Override
 +  public ContainerReportsRequestProto getContainerReport() throws IOException {
 +    LOG.debug("Starting container report iteration.");
 +    // No need for locking since containerMap is a ConcurrentSkipListMap
 +    // And we can never get the exact state since close might happen
 +    // after we iterate a point.
 +    List<ContainerStatus> containers = containerMap.values().stream()
 +        .collect(Collectors.toList());
 +
 +    ContainerReportsRequestProto.Builder crBuilder =
 +        ContainerReportsRequestProto.newBuilder();
 +
 +    // TODO: support delta based container report
 +    crBuilder.setDatanodeID(datanodeID.getProtoBufMessage())
 +        .setType(ContainerReportsRequestProto.reportType.fullReport);
 +
 +    for (ContainerStatus container: containers) {
 +      StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder =
 +          StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder();
 +      ciBuilder.setContainerName(container.getContainer().getContainerName())
 +          .setSize(container.getContainer().getMaxSize())
 +          .setUsed(container.getContainer().getBytesUsed())
 +          .setKeyCount(container.getContainer().getKeyCount())
 +          .setReadCount(container.getReadCount())
 +          .setWriteCount(container.getWriteCount())
 +          .setReadBytes(container.getReadBytes())
 +          .setWriteBytes(container.getWriteBytes());
 +
 +      if (container.getContainer().getHash() != null) {
 +        ciBuilder.setFinalhash(container.getContainer().getHash());
 +      }
 +      crBuilder.addReports(ciBuilder.build());
 +    }
 +
 +    return crBuilder.build();
 +  }
 +
 +  /**
 +   * Sets the Key Manager.
 +   *
 +   * @param keyManager - Key Manager.
 +   */
 +  @Override
 +  public void setKeyManager(KeyManager keyManager) {
 +    this.keyManager = keyManager;
 +  }
 +
 +  /**
 +   * Filter out only container files from the container metadata dir.
 +   */
 +  private static class ContainerFilter implements FilenameFilter {
 +    /**
 +     * Tests if a specified file should be included in a file list.
 +     *
 +     * @param dir the directory in which the file was found.
 +     * @param name the name of the file.
 +     * @return <code>true</code> if and only if the name should be included in
 +     * the file list; <code>false</code> otherwise.
 +     */
 +    @Override
 +    public boolean accept(File dir, String name) {
 +      return name.endsWith(CONTAINER_EXTENSION);
 +    }
 +  }
 +
 +  @Override
 +  public List<ContainerData> chooseContainerForBlockDeletion(
 +      int count) throws StorageContainerException {
 +    readLock();
 +    try {
 +      return containerDeletionChooser.chooseContainerForBlockDeletion(
 +          count, containerMap);
 +    } finally {
 +      readUnlock();
 +    }
 +  }
 +
 +  @VisibleForTesting
 +  public ContainerDeletionChoosingPolicy getContainerDeletionChooser() {
 +    return containerDeletionChooser;
 +  }
 +
 +  @Override
 +  public void incrPendingDeletionBlocks(int numBlocks, String containerId) {
 +    writeLock();
 +    try {
 +      ContainerStatus status = containerMap.get(containerId);
 +      status.incrPendingDeletionBlocks(numBlocks);
 +    } finally {
 +      writeUnlock();
 +    }
 +  }
 +
 +  @Override
 +  public void decrPendingDeletionBlocks(int numBlocks, String containerId) {
 +    writeLock();
 +    try {
 +      ContainerStatus status = containerMap.get(containerId);
 +      status.decrPendingDeletionBlocks(numBlocks);
 +    } finally {
 +      writeUnlock();
 +    }
 +  }
 +
 +  /**
 +   * Increase the read count of the container.
 +   *
 +   * @param containerName - Name of the container.
 +   */
 +  @Override
 +  public void incrReadCount(String containerName) {
 +    ContainerStatus status = containerMap.get(containerName);
 +    status.incrReadCount();
 +  }
 +
 +  public long getReadCount(String containerName) {
 +    ContainerStatus status = containerMap.get(containerName);
 +    return status.getReadCount();
 +  }
 +
 +  /**
 +   * Increse the read counter for bytes read from the container.
 +   *
 +   * @param containerName - Name of the container.
 +   * @param readBytes     - bytes read from the container.
 +   */
 +  @Override
 +  public void incrReadBytes(String containerName, long readBytes) {
 +    ContainerStatus status = containerMap.get(containerName);
 +    status.incrReadBytes(readBytes);
 +  }
 +
 +  public long getReadBytes(String containerName) {
 +    readLock();
 +    try {
 +      ContainerStatus status = containerMap.get(containerName);
 +      return status.getReadBytes();
 +    } finally {
 +      readUnlock();
 +    }
 +  }
 +
 +  /**
 +   * Increase the write count of the container.
 +   *
 +   * @param containerName - Name of the container.
 +   */
 +  @Override
 +  public void incrWriteCount(String containerName) {
 +    ContainerStatus status = containerMap.get(containerName);
 +    status.incrWriteCount();
 +  }
 +
 +  public long getWriteCount(String containerName) {
 +    ContainerStatus status = containerMap.get(containerName);
 +    return status.getWriteCount();
 +  }
 +
 +  /**
 +   * Increse the write counter for bytes write into the container.
 +   *
 +   * @param containerName - Name of the container.
 +   * @param writeBytes    - bytes write into the container.
 +   */
 +  @Override
 +  public void incrWriteBytes(String containerName, long writeBytes) {
 +    ContainerStatus status = containerMap.get(containerName);
 +    status.incrWriteBytes(writeBytes);
 +  }
 +
 +  public long getWriteBytes(String containerName) {
 +    ContainerStatus status = containerMap.get(containerName);
 +    return status.getWriteBytes();
 +  }
 +
 +  /**
 +   * Increase the bytes used by the container.
 +   *
 +   * @param containerName - Name of the container.
 +   * @param used          - additional bytes used by the container.
 +   * @return the current bytes used.
 +   */
 +  @Override
 +  public long incrBytesUsed(String containerName, long used) {
 +    ContainerStatus status = containerMap.get(containerName);
 +    return status.incrBytesUsed(used);
 +  }
 +
 +  /**
 +   * Decrease the bytes used by the container.
 +   *
 +   * @param containerName - Name of the container.
 +   * @param used          - additional bytes reclaimed by the container.
 +   * @return the current bytes used.
 +   */
 +  @Override
 +  public long decrBytesUsed(String containerName, long used) {
 +    ContainerStatus status = containerMap.get(containerName);
 +    return status.decrBytesUsed(used);
 +  }
 +
 +  public long getBytesUsed(String containerName) {
 +    ContainerStatus status = containerMap.get(containerName);
 +    return status.getBytesUsed();
 +  }
 +
 +  /**
 +   * Get the number of keys in the container.
 +   *
 +   * @param containerName - Name of the container.
 +   * @return the current key count.
 +   */
 +  @Override
 +  public long getNumKeys(String containerName) {
 +    ContainerStatus status = containerMap.get(containerName);
 +    return status.getNumKeys();  }
 +
 +  /**
 +   * Get the container report state to send via HB to SCM.
 +   *
 +   * @return container report state.
 +   */
 +  @Override
 +  public ReportState getContainerReportState() {
 +    return containerReportManager.getContainerReportState();
 +  }
 +
 +}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47919787/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47919787/hadoop-minicluster/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47919787/hadoop-project/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47919787/hadoop-tools/hadoop-ozone/pom.xml
----------------------------------------------------------------------
diff --cc hadoop-tools/hadoop-ozone/pom.xml
index 758eeae,0000000..df78a31
mode 100644,000000..100644
--- a/hadoop-tools/hadoop-ozone/pom.xml
+++ b/hadoop-tools/hadoop-ozone/pom.xml
@@@ -1,122 -1,0 +1,122 @@@
 +<?xml version="1.0" encoding="UTF-8"?>
 +<!--
 +  Licensed under the Apache License, Version 2.0 (the "License");
 +  you may not use this file except in compliance with the License.
 +  You may obtain a copy of the License at
 +
 +    http://www.apache.org/licenses/LICENSE-2.0
 +
 +  Unless required by applicable law or agreed to in writing, software
 +  distributed under the License is distributed on an "AS IS" BASIS,
 +  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 +  See the License for the specific language governing permissions and
 +  limitations under the License. See accompanying LICENSE file.
 +-->
 +<project xmlns="http://maven.apache.org/POM/4.0.0"
 +         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
 +         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
 +  <modelVersion>4.0.0</modelVersion>
 +  <parent>
 +    <groupId>org.apache.hadoop</groupId>
 +    <artifactId>hadoop-project</artifactId>
-     <version>3.1.0-SNAPSHOT</version>
++    <version>3.2.0-SNAPSHOT</version>
 +    <relativePath>../../hadoop-project</relativePath>
 +  </parent>
 +  <artifactId>hadoop-ozone</artifactId>
 +  <name>Apache Hadoop Ozone FileSystem</name>
 +  <packaging>jar</packaging>
 +
 +  <properties>
 +    <file.encoding>UTF-8</file.encoding>
 +    <downloadSources>true</downloadSources>
 +  </properties>
 +
 +  <build>
 +    <plugins>
 +      <plugin>
 +        <groupId>org.apache.maven.plugins</groupId>
 +        <artifactId>maven-jar-plugin</artifactId>
 +        <executions>
 +          <execution>
 +            <goals>
 +              <goal>test-jar</goal>
 +            </goals>
 +          </execution>
 +        </executions>
 +      </plugin>
 +      <plugin>
 +        <groupId>org.apache.maven.plugins</groupId>
 +        <artifactId>maven-dependency-plugin</artifactId>
 +        <executions>
 +          <execution>
 +            <id>deplist</id>
 +            <phase>compile</phase>
 +            <goals>
 +              <goal>list</goal>
 +            </goals>
 +            <configuration>
 +              <!-- build a shellprofile -->
 +              <outputFile>${project.basedir}/target/hadoop-tools-deps/${project.artifactId}.tools-optional.txt</outputFile>
 +            </configuration>
 +          </execution>
 +        </executions>
 +      </plugin>
 +    </plugins>
 +  </build>
 +
 +  <dependencies>
 +    <dependency>
 +      <groupId>org.apache.hadoop</groupId>
 +      <artifactId>hadoop-common</artifactId>
 +    </dependency>
 +    <dependency>
 +      <groupId>org.apache.hadoop</groupId>
 +      <artifactId>hadoop-common</artifactId>
 +      <scope>test</scope>
 +      <type>test-jar</type>
 +    </dependency>
 +    <dependency>
 +      <groupId>org.apache.hadoop</groupId>
 +      <artifactId>hadoop-hdfs</artifactId>
 +      <scope>compile</scope>
 +    </dependency>
 +    <dependency>
 +      <groupId>org.apache.hadoop</groupId>
 +      <artifactId>hadoop-hdfs</artifactId>
 +      <scope>test</scope>
 +      <type>test-jar</type>
 +    </dependency>
 +    <dependency>
 +      <groupId>org.apache.hadoop</groupId>
 +      <artifactId>hadoop-hdfs-client</artifactId>
 +      <scope>compile</scope>
 +    </dependency>
 +
 +    <dependency>
 +      <groupId>org.mockito</groupId>
 +      <artifactId>mockito-all</artifactId>
 +      <scope>test</scope>
 +    </dependency>
 +    <dependency>
 +      <groupId>junit</groupId>
 +      <artifactId>junit</artifactId>
 +      <scope>test</scope>
 +    </dependency>
 +    <dependency>
 +      <groupId>org.apache.hadoop</groupId>
 +      <artifactId>hadoop-distcp</artifactId>
 +      <scope>test</scope>
 +    </dependency>
 +    <dependency>
 +      <groupId>org.apache.hadoop</groupId>
 +      <artifactId>hadoop-distcp</artifactId>
 +      <scope>test</scope>
 +      <type>test-jar</type>
 +    </dependency>
 +    <dependency>
 +      <groupId>org.apache.hadoop</groupId>
 +      <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
 +      <scope>test</scope>
 +    </dependency>
 +  </dependencies>
 +</project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47919787/hadoop-tools/hadoop-tools-dist/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47919787/hadoop-tools/pom.xml
----------------------------------------------------------------------


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: HADOOP-15204. Add Configuration API for parsing storage sizes. Contributed by Anu Engineer.

Posted by ae...@apache.org.
HADOOP-15204. Add Configuration API for parsing storage sizes. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f66affd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f66affd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f66affd

Branch: refs/heads/HDFS-7240
Commit: 8f66affd6265c9e4231e18d7ca352fb3035dae9a
Parents: bddfe42
Author: Anu Engineer <ae...@apache.org>
Authored: Wed Feb 14 13:11:37 2018 -0800
Committer: Anu Engineer <ae...@apache.org>
Committed: Wed Feb 14 13:11:37 2018 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/conf/Configuration.java   |  80 +++
 .../org/apache/hadoop/conf/StorageSize.java     | 106 ++++
 .../org/apache/hadoop/conf/StorageUnit.java     | 530 +++++++++++++++++++
 .../apache/hadoop/conf/TestConfiguration.java   |  76 +++
 .../org/apache/hadoop/conf/TestStorageUnit.java | 277 ++++++++++
 5 files changed, 1069 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f66affd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index fce2194..f8e4638 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -109,6 +109,9 @@ import org.w3c.dom.Element;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 
+import static org.apache.commons.lang3.StringUtils.isBlank;
+import static org.apache.commons.lang3.StringUtils.isNotBlank;
+
 /**
  * Provides access to configuration parameters.
  *
@@ -1818,6 +1821,83 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     }
     return durations;
   }
+  /**
+   * Gets the Storage Size from the config, or returns the defaultValue. The
+   * unit of return value is specified in target unit.
+   *
+   * @param name - Key Name
+   * @param defaultValue - Default Value -- e.g. 100MB
+   * @param targetUnit - The units that we want result to be in.
+   * @return double -- formatted in target Units
+   */
+  public double getStorageSize(String name, String defaultValue,
+      StorageUnit targetUnit) {
+    Preconditions.checkState(isNotBlank(name), "Key cannot be blank.");
+    String vString = get(name);
+    if (isBlank(vString)) {
+      vString = defaultValue;
+    }
+
+    // Please note: There is a bit of subtlety here. If the user specifies
+    // the default unit as "1GB", but the requested unit is MB, we will return
+    // the format in MB even thought the default string is specified in GB.
+
+    // Converts a string like "1GB" to to unit specified in targetUnit.
+
+    StorageSize measure = StorageSize.parse(vString);
+    return convertStorageUnit(measure.getValue(), measure.getUnit(),
+        targetUnit);
+  }
+
+  /**
+   * Gets storage size from a config file.
+   *
+   * @param name - Key to read.
+   * @param defaultValue - The default value to return in case the key is
+   * not present.
+   * @param targetUnit - The Storage unit that should be used
+   * for the return value.
+   * @return - double value in the Storage Unit specified.
+   */
+  public double getStorageSize(String name, double defaultValue,
+      StorageUnit targetUnit) {
+    Preconditions.checkNotNull(targetUnit, "Conversion unit cannot be null.");
+    Preconditions.checkState(isNotBlank(name), "Name cannot be blank.");
+    String vString = get(name);
+    if (isBlank(vString)) {
+      return targetUnit.getDefault(defaultValue);
+    }
+
+    StorageSize measure = StorageSize.parse(vString);
+    return convertStorageUnit(measure.getValue(), measure.getUnit(),
+        targetUnit);
+
+  }
+
+  /**
+   * Sets Storage Size for the specified key.
+   *
+   * @param name - Key to set.
+   * @param value - The numeric value to set.
+   * @param unit - Storage Unit to be used.
+   */
+  public void setStorageSize(String name, double value, StorageUnit unit) {
+    set(name, value + unit.getShortName());
+  }
+
+  /**
+   * convert the value from one storage unit to another.
+   *
+   * @param value - value
+   * @param sourceUnit - Source unit to convert from
+   * @param targetUnit - target unit.
+   * @return double.
+   */
+  private double convertStorageUnit(double value, StorageUnit sourceUnit,
+      StorageUnit targetUnit) {
+    double byteValue = sourceUnit.toBytes(value);
+    return targetUnit.fromBytes(byteValue);
+  }
 
   /**
    * Get the value of the <code>name</code> property as a <code>Pattern</code>.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f66affd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/StorageSize.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/StorageSize.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/StorageSize.java
new file mode 100644
index 0000000..6cad6f7
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/StorageSize.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.conf;
+
+import java.util.Locale;
+
+import static org.apache.commons.lang3.StringUtils.isNotBlank;
+
+/**
+ * A class that contains the numeric value and the unit of measure.
+ */
+public class StorageSize {
+  private final StorageUnit unit;
+  private final double value;
+
+  /**
+   * Constucts a Storage Measure, which contains the value and the unit of
+   * measure.
+   *
+   * @param unit - Unit of Measure
+   * @param value - Numeric value.
+   */
+  public StorageSize(StorageUnit unit, double value) {
+    this.unit = unit;
+    this.value = value;
+  }
+
+  private static void checkState(boolean state, String errorString){
+    if(!state) {
+      throw new IllegalStateException(errorString);
+    }
+  }
+
+  public static StorageSize parse(String value) {
+    checkState(isNotBlank(value), "value cannot be blank");
+    String sanitizedValue = value.trim().toLowerCase(Locale.ENGLISH);
+    StorageUnit parsedUnit = null;
+    for (StorageUnit unit : StorageUnit.values()) {
+      if (sanitizedValue.endsWith(unit.getShortName()) ||
+          sanitizedValue.endsWith(unit.getLongName()) ||
+          sanitizedValue.endsWith(unit.getSuffixChar())) {
+        parsedUnit = unit;
+        break;
+      }
+    }
+
+    if (parsedUnit == null) {
+      throw new IllegalArgumentException(value + " is not in expected format." +
+          "Expected format is <number><unit>. e.g. 1000MB");
+    }
+
+
+    String suffix = "";
+    boolean found = false;
+
+    // We are trying to get the longest match first, so the order of
+    // matching is getLongName, getShortName and then getSuffixChar.
+    if (!found && sanitizedValue.endsWith(parsedUnit.getLongName())) {
+      found = true;
+      suffix = parsedUnit.getLongName();
+    }
+
+    if (!found && sanitizedValue.endsWith(parsedUnit.getShortName())) {
+      found = true;
+      suffix = parsedUnit.getShortName();
+    }
+
+    if (!found && sanitizedValue.endsWith(parsedUnit.getSuffixChar())) {
+      found = true;
+      suffix = parsedUnit.getSuffixChar();
+    }
+
+    checkState(found, "Something is wrong, we have to find a " +
+        "match. Internal error.");
+
+    String valString =
+        sanitizedValue.substring(0, value.length() - suffix.length());
+    return new StorageSize(parsedUnit, Double.parseDouble(valString));
+
+  }
+
+  public StorageUnit getUnit() {
+    return unit;
+  }
+
+  public double getValue() {
+    return value;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f66affd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/StorageUnit.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/StorageUnit.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/StorageUnit.java
new file mode 100644
index 0000000..fe3c6f8
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/StorageUnit.java
@@ -0,0 +1,530 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.conf;
+
+import java.math.BigDecimal;
+import java.math.RoundingMode;
+
+/**
+ * Class that maintains different forms of Storage Units.
+ */
+public enum StorageUnit {
+  /*
+    We rely on BYTES being the last to get longest matching short names first.
+    The short name of bytes is b and it will match with other longer names.
+
+    if we change this order, the corresponding code in
+    Configuration#parseStorageUnit needs to be changed too, since values()
+    call returns the Enums in declared order and we depend on it.
+   */
+
+  EB {
+    @Override
+    public double toBytes(double value) {
+      return multiply(value, EXABYTES);
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return multiply(value, EXABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return multiply(value, EXABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return multiply(value, EXABYTES / GIGABYTES);
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return multiply(value, EXABYTES / TERABYTES);
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return multiply(value, EXABYTES / PETABYTES);
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return value;
+    }
+
+    @Override
+    public String getLongName() {
+      return "exabytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "eb";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "e";
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toEBs(value);
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return divide(value, EXABYTES);
+    }
+  },
+  PB {
+    @Override
+    public double toBytes(double value) {
+      return multiply(value, PETABYTES);
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return multiply(value, PETABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return multiply(value, PETABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return multiply(value, PETABYTES / GIGABYTES);
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return multiply(value, PETABYTES / TERABYTES);
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return value;
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return divide(value, EXABYTES / PETABYTES);
+    }
+
+    @Override
+    public String getLongName() {
+      return "petabytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "pb";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "p";
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toPBs(value);
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return divide(value, PETABYTES);
+    }
+  },
+  TB {
+    @Override
+    public double toBytes(double value) {
+      return multiply(value, TERABYTES);
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return multiply(value, TERABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return multiply(value, TERABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return multiply(value, TERABYTES / GIGABYTES);
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return value;
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return divide(value, PETABYTES / TERABYTES);
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return divide(value, EXABYTES / TERABYTES);
+    }
+
+    @Override
+    public String getLongName() {
+      return "terabytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "tb";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "t";
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toTBs(value);
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return divide(value, TERABYTES);
+    }
+  },
+  GB {
+    @Override
+    public double toBytes(double value) {
+      return multiply(value, GIGABYTES);
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return multiply(value, GIGABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return multiply(value, GIGABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return value;
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return divide(value, TERABYTES / GIGABYTES);
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return divide(value, PETABYTES / GIGABYTES);
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return divide(value, EXABYTES / GIGABYTES);
+    }
+
+    @Override
+    public String getLongName() {
+      return "gigabytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "gb";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "g";
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toGBs(value);
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return divide(value, GIGABYTES);
+    }
+  },
+  MB {
+    @Override
+    public double toBytes(double value) {
+      return multiply(value, MEGABYTES);
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return multiply(value, MEGABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return value;
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return divide(value, GIGABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return divide(value, TERABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return divide(value, PETABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return divide(value, EXABYTES / MEGABYTES);
+    }
+
+    @Override
+    public String getLongName() {
+      return "megabytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "mb";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "m";
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return divide(value, MEGABYTES);
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toMBs(value);
+    }
+  },
+  KB {
+    @Override
+    public double toBytes(double value) {
+      return multiply(value, KILOBYTES);
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return value;
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return divide(value, MEGABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return divide(value, GIGABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return divide(value, TERABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return divide(value, PETABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return divide(value, EXABYTES / KILOBYTES);
+    }
+
+    @Override
+    public String getLongName() {
+      return "kilobytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "kb";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "k";
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toKBs(value);
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return divide(value, KILOBYTES);
+    }
+  },
+  BYTES {
+    @Override
+    public double toBytes(double value) {
+      return value;
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return divide(value, KILOBYTES);
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return divide(value, MEGABYTES);
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return divide(value, GIGABYTES);
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return divide(value, TERABYTES);
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return divide(value, PETABYTES);
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return divide(value, EXABYTES);
+    }
+
+    @Override
+    public String getLongName() {
+      return "bytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "b";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "b";
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toBytes(value);
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return value;
+    }
+  };
+
+  private static final double BYTE = 1L;
+  private static final double KILOBYTES = BYTE * 1024L;
+  private static final double MEGABYTES = KILOBYTES * 1024L;
+  private static final double GIGABYTES = MEGABYTES * 1024L;
+  private static final double TERABYTES = GIGABYTES * 1024L;
+  private static final double PETABYTES = TERABYTES * 1024L;
+  private static final double EXABYTES = PETABYTES * 1024L;
+  private static final int PRECISION = 4;
+
+  /**
+   * Using BigDecimal to avoid issues with overflow and underflow.
+   *
+   * @param value - value
+   * @param divisor - divisor.
+   * @return -- returns a double that represents this value
+   */
+  private static double divide(double value, double divisor) {
+    BigDecimal val = new BigDecimal(value);
+    BigDecimal bDivisor = new BigDecimal(divisor);
+    return val.divide(bDivisor).setScale(PRECISION, RoundingMode.HALF_UP)
+        .doubleValue();
+  }
+
+  /**
+   * Using BigDecimal so we can throw if we are overflowing the Long.Max.
+   *
+   * @param first - First Num.
+   * @param second - Second Num.
+   * @return Returns a double
+   */
+  private static double multiply(double first, double second) {
+    BigDecimal firstVal = new BigDecimal(first);
+    BigDecimal secondVal = new BigDecimal(second);
+    return firstVal.multiply(secondVal)
+        .setScale(PRECISION, RoundingMode.HALF_UP).doubleValue();
+  }
+
+  public abstract double toBytes(double value);
+
+  public abstract double toKBs(double value);
+
+  public abstract double toMBs(double value);
+
+  public abstract double toGBs(double value);
+
+  public abstract double toTBs(double value);
+
+  public abstract double toPBs(double value);
+
+  public abstract double toEBs(double value);
+
+  public abstract String getLongName();
+
+  public abstract String getShortName();
+
+  public abstract String getSuffixChar();
+
+  public abstract double getDefault(double value);
+
+  public abstract double fromBytes(double value);
+
+  public String toString() {
+    return getLongName();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f66affd/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 78dcdd6..24ec4fc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -48,7 +48,15 @@ import com.fasterxml.jackson.databind.ObjectMapper;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+
+import static org.apache.hadoop.conf.StorageUnit.BYTES;
+import static org.apache.hadoop.conf.StorageUnit.GB;
+import static org.apache.hadoop.conf.StorageUnit.KB;
+import static org.apache.hadoop.conf.StorageUnit.MB;
+import static org.apache.hadoop.conf.StorageUnit.TB;
+import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.*;
 import static org.junit.Assert.assertArrayEquals;
 
@@ -68,10 +76,13 @@ import org.apache.log4j.AppenderSkeleton;
 import org.apache.log4j.Logger;
 import org.apache.log4j.spi.LoggingEvent;
 import org.hamcrest.CoreMatchers;
+import org.junit.rules.ExpectedException;
 import org.mockito.Mockito;
 
 public class TestConfiguration {
 
+  @Rule
+  public ExpectedException thrown= ExpectedException.none();
   private static final double DOUBLE_DELTA = 0.000000001f;
   private Configuration conf;
   final static String CONFIG = new File("./test-config-TestConfiguration.xml").getAbsolutePath();
@@ -1326,6 +1337,71 @@ public class TestConfiguration {
   }
 
   @Test
+  public void testStorageUnit() {
+    final String key = "valid.key";
+    final String nonKey = "not.a.key";
+    Configuration conf = new Configuration(false);
+
+    conf.setStorageSize(key, 10, MB);
+    // This call returns the value specified in the Key as a double in MBs.
+    assertThat(conf.getStorageSize(key, "1GB", MB),
+        is(10.0));
+
+    // Since this key is missing, This call converts the default value of  1GB
+    // to MBs are returns that value.
+    assertThat(conf.getStorageSize(nonKey, "1GB", MB),
+        is(1024.0));
+
+
+    conf.setStorageSize(key, 1024, BYTES);
+    assertThat(conf.getStorageSize(key, 100, KB), is(1.0));
+
+    assertThat(conf.getStorageSize(nonKey, 100.0, KB), is(100.0));
+
+    // We try out different kind of String formats to see if they work and
+    // during read, we also try to read using a different Storage Units.
+    conf.setStrings(key, "1TB");
+    assertThat(conf.getStorageSize(key, "1PB", GB), is(1024.0));
+
+    conf.setStrings(key, "1bytes");
+    assertThat(conf.getStorageSize(key, "1PB", KB), is(0.001));
+
+    conf.setStrings(key, "2048b");
+    assertThat(conf.getStorageSize(key, "1PB", KB), is(2.0));
+
+    conf.setStrings(key, "64 GB");
+    assertThat(conf.getStorageSize(key, "1PB", GB), is(64.0));
+
+    // Match the parsing patterns of getLongBytes, which takes single char
+    // suffix.
+    conf.setStrings(key, "1T");
+    assertThat(conf.getStorageSize(key, "1GB", TB), is(1.0));
+
+    conf.setStrings(key, "1k");
+    assertThat(conf.getStorageSize(key, "1GB", KB), is(1.0));
+
+    conf.setStrings(key, "10m");
+    assertThat(conf.getStorageSize(key, "1GB", MB), is(10.0));
+
+
+
+    // Missing format specification, this should throw.
+    conf.setStrings(key, "100");
+    thrown.expect(IllegalArgumentException.class);
+    conf.getStorageSize(key, "1PB", GB);
+
+    // illegal format specification, this should throw.
+    conf.setStrings(key, "1HB");
+    thrown.expect(IllegalArgumentException.class);
+    conf.getStorageSize(key, "1PB", GB);
+
+    // Illegal number  specification, this should throw.
+    conf.setStrings(key, "HadoopGB");
+    thrown.expect(IllegalArgumentException.class);
+    conf.getStorageSize(key, "1PB", GB);
+  }
+
+  @Test
   public void testTimeDurationWarning() {
     // check warn for possible loss of precision
     final String warnFormat = "Possible loss of precision converting %s" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f66affd/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestStorageUnit.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestStorageUnit.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestStorageUnit.java
new file mode 100644
index 0000000..e29345d
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestStorageUnit.java
@@ -0,0 +1,277 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.conf;
+
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.MatcherAssert.assertThat;
+
+/**
+ * Tests that Storage Units work as expected.
+ */
+public class TestStorageUnit {
+  final static double KB = 1024.0;
+  final static double MB = KB * 1024.0;
+  final static double GB = MB * 1024.0;
+  final static double TB = GB * 1024.0;
+  final static double PB = TB * 1024.0;
+  final static double EB = PB * 1024.0;
+
+  @Test
+  public void testByteToKiloBytes() {
+    Map<Double, Double> results = new HashMap<>();
+    results.put(1024.0, 1.0);
+    results.put(2048.0, 2.0);
+    results.put(-1024.0, -1.0);
+    results.put(34565.0, 33.7549);
+    results.put(223344332.0, 218109.6992);
+    results.put(1234983.0, 1206.0381);
+    results.put(1234332.0, 1205.4023);
+    results.put(0.0, 0.0);
+
+    for (Map.Entry<Double, Double> entry : results.entrySet()) {
+      assertThat(StorageUnit.BYTES.toKBs(entry.getKey()), is(entry.getValue()));
+    }
+  }
+
+  @Test
+  public void testBytesToMegaBytes() {
+    Map<Double, Double> results = new HashMap<>();
+    results.put(1048576.0, 1.0);
+    results.put(24117248.0, 23.0);
+    results.put(459920023.0, 438.6139);
+    results.put(234443233.0, 223.5825);
+    results.put(-35651584.0, -34.0);
+    results.put(0.0, 0.0);
+    for (Map.Entry<Double, Double> entry : results.entrySet()) {
+      assertThat(StorageUnit.BYTES.toMBs(entry.getKey()), is(entry.getValue()));
+    }
+  }
+
+  @Test
+  public void testBytesToGigaBytes() {
+    Map<Double, Double> results = new HashMap<>();
+    results.put(1073741824.0, 1.0);
+    results.put(24696061952.0, 23.0);
+    results.put(459920023.0, 0.4283);
+    results.put(234443233.0, 0.2183);
+    results.put(-36507222016.0, -34.0);
+    results.put(0.0, 0.0);
+    for (Map.Entry<Double, Double> entry : results.entrySet()) {
+      assertThat(StorageUnit.BYTES.toGBs(entry.getKey()), is(entry.getValue()));
+    }
+  }
+
+  @Test
+  public void testBytesToTerraBytes() {
+    Map<Double, Double> results = new HashMap<>();
+    results.put(1.09951E+12, 1.0);
+    results.put(2.52888E+13, 23.0);
+    results.put(459920023.0, 0.0004);
+    results.put(234443233.0, 0.0002);
+    results.put(-3.73834E+13, -34.0);
+    results.put(0.0, 0.0);
+    for (Map.Entry<Double, Double> entry : results.entrySet()) {
+      assertThat(StorageUnit.BYTES.toTBs(entry.getKey()), is(entry.getValue()));
+    }
+  }
+
+  @Test
+  public void testBytesToPetaBytes() {
+    Map<Double, Double> results = new HashMap<>();
+    results.put(1.1259E+15, 1.0);
+    results.put(2.58957E+16, 23.0);
+    results.put(4.70958E+11, 0.0004);
+    results.put(234443233.0, 0.0000); // Out of precision window.
+    results.put(-3.82806E+16, -34.0);
+    results.put(0.0, 0.0);
+    for (Map.Entry<Double, Double> entry : results.entrySet()) {
+      assertThat(StorageUnit.BYTES.toPBs(entry.getKey()), is(entry.getValue()));
+    }
+  }
+
+  @Test
+  public void testBytesToExaBytes() {
+    Map<Double, Double> results = new HashMap<>();
+    results.put(1.15292E+18, 1.0);
+    results.put(2.65172E+19, 23.0);
+    results.put(4.82261E+14, 0.0004);
+    results.put(234443233.0, 0.0000); // Out of precision window.
+    results.put(-3.91993E+19, -34.0);
+    results.put(0.0, 0.0);
+    for (Map.Entry<Double, Double> entry : results.entrySet()) {
+      assertThat(StorageUnit.BYTES.toEBs(entry.getKey()), is(entry.getValue()));
+    }
+  }
+
+  @Test
+  public void testByteConversions() {
+    assertThat(StorageUnit.BYTES.getShortName(), is("b"));
+    assertThat(StorageUnit.BYTES.getSuffixChar(), is("b"));
+
+    assertThat(StorageUnit.BYTES.getLongName(), is("bytes"));
+    assertThat(StorageUnit.BYTES.toString(), is("bytes"));
+    assertThat(StorageUnit.BYTES.toBytes(1), is(1.0));
+    assertThat(StorageUnit.BYTES.toBytes(1024),
+        is(StorageUnit.BYTES.getDefault(1024)));
+    assertThat(StorageUnit.BYTES.fromBytes(10), is(10.0));
+  }
+
+  @Test
+  public void testKBConversions() {
+    assertThat(StorageUnit.KB.getShortName(), is("kb"));
+    assertThat(StorageUnit.KB.getSuffixChar(), is("k"));
+    assertThat(StorageUnit.KB.getLongName(), is("kilobytes"));
+    assertThat(StorageUnit.KB.toString(), is("kilobytes"));
+    assertThat(StorageUnit.KB.toKBs(1024),
+        is(StorageUnit.KB.getDefault(1024)));
+
+
+    assertThat(StorageUnit.KB.toBytes(1), is(KB));
+    assertThat(StorageUnit.KB.fromBytes(KB), is(1.0));
+
+    assertThat(StorageUnit.KB.toKBs(10), is(10.0));
+    assertThat(StorageUnit.KB.toMBs(3.0 * 1024.0), is(3.0));
+    assertThat(StorageUnit.KB.toGBs(1073741824), is(1024.0));
+    assertThat(StorageUnit.KB.toTBs(1073741824), is(1.0));
+    assertThat(StorageUnit.KB.toPBs(1.0995116e+12), is(1.0));
+    assertThat(StorageUnit.KB.toEBs(1.1258999e+15), is(1.0));
+  }
+
+  @Test
+  public void testMBConversions() {
+    assertThat(StorageUnit.MB.getShortName(), is("mb"));
+    assertThat(StorageUnit.MB.getSuffixChar(), is("m"));
+    assertThat(StorageUnit.MB.getLongName(), is("megabytes"));
+    assertThat(StorageUnit.MB.toString(), is("megabytes"));
+    assertThat(StorageUnit.MB.toMBs(1024),
+        is(StorageUnit.MB.getDefault(1024)));
+
+
+
+    assertThat(StorageUnit.MB.toBytes(1), is(MB));
+    assertThat(StorageUnit.MB.fromBytes(MB), is(1.0));
+
+    assertThat(StorageUnit.MB.toKBs(1), is(1024.0));
+    assertThat(StorageUnit.MB.toMBs(10), is(10.0));
+
+    assertThat(StorageUnit.MB.toGBs(44040192), is(43008.0));
+    assertThat(StorageUnit.MB.toTBs(1073741824), is(1024.0));
+    assertThat(StorageUnit.MB.toPBs(1073741824), is(1.0));
+    assertThat(StorageUnit.MB.toEBs(1 * (EB/MB)), is(1.0));
+  }
+
+  @Test
+  public void testGBConversions() {
+    assertThat(StorageUnit.GB.getShortName(), is("gb"));
+    assertThat(StorageUnit.GB.getSuffixChar(), is("g"));
+    assertThat(StorageUnit.GB.getLongName(), is("gigabytes"));
+    assertThat(StorageUnit.GB.toString(), is("gigabytes"));
+    assertThat(StorageUnit.GB.toGBs(1024),
+        is(StorageUnit.GB.getDefault(1024)));
+
+
+    assertThat(StorageUnit.GB.toBytes(1), is(GB));
+    assertThat(StorageUnit.GB.fromBytes(GB), is(1.0));
+
+    assertThat(StorageUnit.GB.toKBs(1), is(1024.0 * 1024));
+    assertThat(StorageUnit.GB.toMBs(10), is(10.0 * 1024));
+
+    assertThat(StorageUnit.GB.toGBs(44040192.0), is(44040192.0));
+    assertThat(StorageUnit.GB.toTBs(1073741824), is(1048576.0));
+    assertThat(StorageUnit.GB.toPBs(1.07375e+9), is(1024.0078));
+    assertThat(StorageUnit.GB.toEBs(1 * (EB/GB)), is(1.0));
+  }
+
+  @Test
+  public void testTBConversions() {
+    assertThat(StorageUnit.TB.getShortName(), is("tb"));
+    assertThat(StorageUnit.TB.getSuffixChar(), is("t"));
+    assertThat(StorageUnit.TB.getLongName(), is("terabytes"));
+    assertThat(StorageUnit.TB.toString(), is("terabytes"));
+    assertThat(StorageUnit.TB.toTBs(1024),
+        is(StorageUnit.TB.getDefault(1024)));
+
+    assertThat(StorageUnit.TB.toBytes(1), is(TB));
+    assertThat(StorageUnit.TB.fromBytes(TB), is(1.0));
+
+    assertThat(StorageUnit.TB.toKBs(1), is(1024.0 * 1024* 1024));
+    assertThat(StorageUnit.TB.toMBs(10), is(10.0 * 1024 * 1024));
+
+    assertThat(StorageUnit.TB.toGBs(44040192.0), is(45097156608.0));
+    assertThat(StorageUnit.TB.toTBs(1073741824.0), is(1073741824.0));
+    assertThat(StorageUnit.TB.toPBs(1024), is(1.0));
+    assertThat(StorageUnit.TB.toEBs(1 * (EB/TB)), is(1.0));
+  }
+
+  @Test
+  public void testPBConversions() {
+    assertThat(StorageUnit.PB.getShortName(), is("pb"));
+    assertThat(StorageUnit.PB.getSuffixChar(), is("p"));
+    assertThat(StorageUnit.PB.getLongName(), is("petabytes"));
+    assertThat(StorageUnit.PB.toString(), is("petabytes"));
+    assertThat(StorageUnit.PB.toPBs(1024),
+        is(StorageUnit.PB.getDefault(1024)));
+
+
+    assertThat(StorageUnit.PB.toBytes(1), is(PB));
+    assertThat(StorageUnit.PB.fromBytes(PB), is(1.0));
+
+    assertThat(StorageUnit.PB.toKBs(1), is(PB/KB));
+    assertThat(StorageUnit.PB.toMBs(10), is(10.0 * (PB / MB)));
+
+    assertThat(StorageUnit.PB.toGBs(44040192.0),
+        is(44040192.0 * PB/GB));
+    assertThat(StorageUnit.PB.toTBs(1073741824.0),
+        is(1073741824.0 * (PB/TB)));
+    assertThat(StorageUnit.PB.toPBs(1024.0), is(1024.0));
+    assertThat(StorageUnit.PB.toEBs(1024.0), is(1.0));
+  }
+
+
+  @Test
+  public void testEBConversions() {
+    assertThat(StorageUnit.EB.getShortName(), is("eb"));
+    assertThat(StorageUnit.EB.getSuffixChar(), is("e"));
+
+    assertThat(StorageUnit.EB.getLongName(), is("exabytes"));
+    assertThat(StorageUnit.EB.toString(), is("exabytes"));
+    assertThat(StorageUnit.EB.toEBs(1024),
+        is(StorageUnit.EB.getDefault(1024)));
+
+    assertThat(StorageUnit.EB.toBytes(1), is(EB));
+    assertThat(StorageUnit.EB.fromBytes(EB), is(1.0));
+
+    assertThat(StorageUnit.EB.toKBs(1), is(EB/KB));
+    assertThat(StorageUnit.EB.toMBs(10), is(10.0 * (EB / MB)));
+
+    assertThat(StorageUnit.EB.toGBs(44040192.0),
+        is(44040192.0 * EB/GB));
+    assertThat(StorageUnit.EB.toTBs(1073741824.0),
+        is(1073741824.0 * (EB/TB)));
+    assertThat(StorageUnit.EB.toPBs(1.0), is(1024.0));
+    assertThat(StorageUnit.EB.toEBs(42.0), is(42.0));
+  }
+
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: HADOOP-13972. ADLS to support per-store configuration. Contributed by Sharad Sonker.

Posted by ae...@apache.org.
HADOOP-13972. ADLS to support per-store configuration.
Contributed by Sharad Sonker.

(cherry picked from commit 050f5287b79324b7f6231b879c0bfc608203b980)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/481d79fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/481d79fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/481d79fe

Branch: refs/heads/HDFS-7240
Commit: 481d79fedc48942654dab08e23e71e80c8eb2aca
Parents: 9a013b2
Author: Steve Loughran <st...@apache.org>
Authored: Thu Feb 15 16:25:55 2018 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Thu Feb 15 16:27:31 2018 +0000

----------------------------------------------------------------------
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |   5 +
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  81 +++++++++-
 .../src/site/markdown/index.md                  |  37 +++++
 .../fs/adl/TestValidateConfiguration.java       | 152 +++++++++++++++----
 .../hadoop/fs/adl/common/Parallelized.java      |   2 +-
 5 files changed, 239 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/481d79fe/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 790902c..e3a4ad6 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -33,6 +33,11 @@ public final class AdlConfKeys {
   public static final String AZURE_AD_REFRESH_URL_KEY =
       "fs.adl.oauth2.refresh.url";
 
+  public static final String AZURE_AD_ACCOUNT_PREFIX =
+          "fs.adl.account.";
+  public static final String AZURE_AD_PREFIX =
+          "fs.adl.";
+
   // optional when provider type is refresh or client id.
   public static final String AZURE_AD_TOKEN_PROVIDER_CLASS_KEY =
       "fs.adl.oauth2.access.token.provider";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481d79fe/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index a496595..9f54a36 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -24,8 +24,10 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.List;
+import java.util.Map;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import com.microsoft.azure.datalake.store.ADLStoreClient;
 import com.microsoft.azure.datalake.store.ADLStoreOptions;
 import com.microsoft.azure.datalake.store.DirectoryEntry;
@@ -37,6 +39,8 @@ import com.microsoft.azure.datalake.store.oauth2.ClientCredsTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.DeviceCodeTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.MsiTokenProvider;
 import com.microsoft.azure.datalake.store.oauth2.RefreshTokenBasedTokenProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -74,6 +78,8 @@ import static org.apache.hadoop.fs.adl.AdlConfKeys.*;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class AdlFileSystem extends FileSystem {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(AdlFileSystem.class);
   public static final String SCHEME = "adl";
   static final int DEFAULT_PORT = 443;
   private URI uri;
@@ -115,12 +121,19 @@ public class AdlFileSystem extends FileSystem {
   /**
    * Called after a new FileSystem instance is constructed.
    *
-   * @param storeUri a uri whose authority section names the host, port, etc.
-   *                 for this FileSystem
-   * @param conf     the configuration
+   * @param storeUri      a uri whose authority section names the host, port,
+   *                      etc. for this FileSystem
+   * @param originalConf  the configuration to use for the FS. The account-
+   *                      specific options are patched over the base ones
+   *                      before any use is made of the config.
    */
   @Override
-  public void initialize(URI storeUri, Configuration conf) throws IOException {
+  public void initialize(URI storeUri, Configuration originalConf)
+      throws IOException {
+    String hostname = storeUri.getHost();
+    String accountName = getAccountNameFromFQDN(hostname);
+    Configuration conf = propagateAccountOptions(originalConf, accountName);
+
     super.initialize(storeUri, conf);
     this.setConf(conf);
     this.uri = URI
@@ -144,7 +157,6 @@ public class AdlFileSystem extends FileSystem {
 
     String accountFQDN = null;
     String mountPoint = null;
-    String hostname = storeUri.getHost();
     if (!hostname.contains(".") && !hostname.equalsIgnoreCase(
         "localhost")) {  // this is a symbolic name. Resolve it.
       String hostNameProperty = "dfs.adls." + hostname + ".hostname";
@@ -985,4 +997,63 @@ public class AdlFileSystem extends FileSystem {
     oidOrUpn = enableUPN ? UserGroupRepresentation.UPN :
         UserGroupRepresentation.OID;
   }
+
+  /**
+   * Gets ADL account name from ADL FQDN.
+   * @param accountFQDN ADL account fqdn
+   * @return ADL account name
+   */
+  public static String getAccountNameFromFQDN(String accountFQDN) {
+    return accountFQDN.contains(".")
+            ? accountFQDN.substring(0, accountFQDN.indexOf("."))
+            : accountFQDN;
+  }
+
+  /**
+   * Propagates account-specific settings into generic ADL configuration keys.
+   * This is done by propagating the values of the form
+   * {@code fs.adl.account.${account_name}.key} to
+   * {@code fs.adl.key}, for all values of "key"
+   *
+   * The source of the updated property is set to the key name of the account
+   * property, to aid in diagnostics of where things came from.
+   *
+   * Returns a new configuration. Why the clone?
+   * You can use the same conf for different filesystems, and the original
+   * values are not updated.
+   *
+   *
+   * @param source Source Configuration object
+   * @param accountName account name. Must not be empty
+   * @return a (potentially) patched clone of the original
+   */
+  public static Configuration propagateAccountOptions(Configuration source,
+      String accountName) {
+
+    Preconditions.checkArgument(StringUtils.isNotEmpty(accountName),
+        "accountName");
+    final String accountPrefix = AZURE_AD_ACCOUNT_PREFIX + accountName +'.';
+    LOG.debug("Propagating entries under {}", accountPrefix);
+    final Configuration dest = new Configuration(source);
+    for (Map.Entry<String, String> entry : source) {
+      final String key = entry.getKey();
+      // get the (unexpanded) value.
+      final String value = entry.getValue();
+      if (!key.startsWith(accountPrefix) || accountPrefix.equals(key)) {
+        continue;
+      }
+      // there's a account prefix, so strip it
+      final String stripped = key.substring(accountPrefix.length());
+
+      // propagate the value, building a new origin field.
+      // to track overwrites, the generic key is overwritten even if
+      // already matches the new one.
+      String origin = "[" + StringUtils.join(
+              source.getPropertySources(key), ", ") +"]";
+      final String generic = AZURE_AD_PREFIX + stripped;
+      LOG.debug("Updating {} from {}", generic, origin);
+      dest.set(generic, value, key + " via " + origin);
+    }
+    return dest;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481d79fe/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
index d2b6edf..ad2a983 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
@@ -36,6 +36,7 @@ This support comes via the JAR file `azure-datalake-store.jar`.
 * Tested for scale.
 * API `setOwner()`, `setAcl`, `removeAclEntries()`, `modifyAclEntries()` accepts UPN or OID
   (Object ID) as user and group names.
+* Supports per-account configuration.
 
 ## Limitations
 
@@ -328,6 +329,42 @@ Add the following properties to `core-site.xml`
   </description>
 </property>
 ```
+## Configurations for different ADL accounts
+Different ADL accounts can be accessed with different ADL client configurations.
+This also allows for different login details.
+
+1. All `fs.adl` options can be set on a per account basis.
+1. The account specific option is set by replacing the `fs.adl.` prefix on an option
+with `fs.adl.account.ACCOUNTNAME.`, where `ACCOUNTNAME` is the name of the account.
+1. When connecting to an account, all options explicitly set will override
+the base `fs.adl.` values.
+
+As an example, a configuration could have a base configuration to use the public account
+`adl://<some-public-account>.azuredatalakestore.net/` and an account-specific configuration
+to use some private account `adl://myprivateaccount.azuredatalakestore.net/`
+
+```xml
+<property>
+  <name>fs.adl.oauth2.client.id</name>
+  <value>CLIENTID</value>
+</property>
+
+<property>
+  <name>fs.adl.oauth2.credential</name>
+  <value>CREDENTIAL</value>
+</property>
+
+<property>
+  <name>fs.adl.account.myprivateaccount.oauth2.client.id</name>
+  <value>CLIENTID1</value>
+</property>
+
+<property>
+  <name>fs.adl.account.myprivateaccount.oauth2.credential</name>
+  <value>CREDENTIAL1</value>
+</property>
+```
+
 ## Testing the azure-datalake-store Module
 The `hadoop-azure` module includes a full suite of unit tests.
 Most of the tests will run without additional configuration by running `mvn test`.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481d79fe/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestValidateConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestValidateConfiguration.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestValidateConfiguration.java
index 3d51b42..0308a69 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestValidateConfiguration.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestValidateConfiguration.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.fs.adl;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
 import org.junit.Test;
 
 import static org.apache.hadoop.fs.adl.AdlConfKeys.ADL_BLOCK_SIZE;
@@ -58,6 +57,8 @@ import static org.apache.hadoop.fs.adl.AdlConfKeys
     .TOKEN_PROVIDER_TYPE_REFRESH_TOKEN;
 import static org.apache.hadoop.fs.adl.AdlConfKeys.WRITE_BUFFER_SIZE_KEY;
 
+import static org.junit.Assert.assertEquals;
+
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
@@ -70,50 +71,48 @@ public class TestValidateConfiguration {
 
   @Test
   public void validateConfigurationKeys() {
-    Assert
-        .assertEquals("fs.adl.oauth2.refresh.url", AZURE_AD_REFRESH_URL_KEY);
-    Assert.assertEquals("fs.adl.oauth2.access.token.provider",
+    assertEquals("fs.adl.oauth2.refresh.url", AZURE_AD_REFRESH_URL_KEY);
+    assertEquals("fs.adl.oauth2.access.token.provider",
         AZURE_AD_TOKEN_PROVIDER_CLASS_KEY);
-    Assert.assertEquals("fs.adl.oauth2.client.id", AZURE_AD_CLIENT_ID_KEY);
-    Assert.assertEquals("fs.adl.oauth2.refresh.token",
+    assertEquals("fs.adl.oauth2.client.id", AZURE_AD_CLIENT_ID_KEY);
+    assertEquals("fs.adl.oauth2.refresh.token",
         AZURE_AD_REFRESH_TOKEN_KEY);
-    Assert
-        .assertEquals("fs.adl.oauth2.credential", AZURE_AD_CLIENT_SECRET_KEY);
-    Assert.assertEquals("adl.debug.override.localuserasfileowner",
+    assertEquals("fs.adl.oauth2.credential", AZURE_AD_CLIENT_SECRET_KEY);
+    assertEquals("adl.debug.override.localuserasfileowner",
         ADL_DEBUG_OVERRIDE_LOCAL_USER_AS_OWNER);
 
-    Assert.assertEquals("fs.adl.oauth2.access.token.provider.type",
+    assertEquals("fs.adl.oauth2.access.token.provider.type",
         AZURE_AD_TOKEN_PROVIDER_TYPE_KEY);
 
-    Assert.assertEquals("adl.feature.client.cache.readahead",
+    assertEquals("adl.feature.client.cache.readahead",
         READ_AHEAD_BUFFER_SIZE_KEY);
 
-    Assert.assertEquals("adl.feature.client.cache.drop.behind.writes",
+    assertEquals("adl.feature.client.cache.drop.behind.writes",
         WRITE_BUFFER_SIZE_KEY);
 
-    Assert.assertEquals("RefreshToken", TOKEN_PROVIDER_TYPE_REFRESH_TOKEN);
+    assertEquals("RefreshToken", TOKEN_PROVIDER_TYPE_REFRESH_TOKEN);
 
-    Assert.assertEquals("ClientCredential", TOKEN_PROVIDER_TYPE_CLIENT_CRED);
+    assertEquals("ClientCredential", TOKEN_PROVIDER_TYPE_CLIENT_CRED);
 
-    Assert.assertEquals("adl.enable.client.latency.tracker",
+    assertEquals("adl.enable.client.latency.tracker",
         LATENCY_TRACKER_KEY);
 
-    Assert.assertEquals(true, LATENCY_TRACKER_DEFAULT);
+    assertEquals(true, LATENCY_TRACKER_DEFAULT);
 
-    Assert.assertEquals(true, ADL_EXPERIMENT_POSITIONAL_READ_DEFAULT);
+    assertEquals(true, ADL_EXPERIMENT_POSITIONAL_READ_DEFAULT);
 
-    Assert.assertEquals("adl.feature.experiment.positional.read.enable",
+    assertEquals("adl.feature.experiment.positional.read.enable",
         ADL_EXPERIMENT_POSITIONAL_READ_KEY);
 
-    Assert.assertEquals(1, ADL_REPLICATION_FACTOR);
-    Assert.assertEquals(256 * 1024 * 1024, ADL_BLOCK_SIZE);
-    Assert.assertEquals(false, ADL_DEBUG_SET_LOCAL_USER_AS_OWNER_DEFAULT);
-    Assert.assertEquals(4 * 1024 * 1024, DEFAULT_READ_AHEAD_BUFFER_SIZE);
-    Assert.assertEquals(4 * 1024 * 1024, DEFAULT_WRITE_AHEAD_BUFFER_SIZE);
+    assertEquals(1, ADL_REPLICATION_FACTOR);
+    assertEquals(256 * 1024 * 1024, ADL_BLOCK_SIZE);
+    assertEquals(false, ADL_DEBUG_SET_LOCAL_USER_AS_OWNER_DEFAULT);
+    assertEquals(4 * 1024 * 1024, DEFAULT_READ_AHEAD_BUFFER_SIZE);
+    assertEquals(4 * 1024 * 1024, DEFAULT_WRITE_AHEAD_BUFFER_SIZE);
 
-    Assert.assertEquals("adl.feature.ownerandgroup.enableupn",
+    assertEquals("adl.feature.ownerandgroup.enableupn",
         ADL_ENABLEUPN_FOR_OWNERGROUP_KEY);
-    Assert.assertEquals(false,
+    assertEquals(false,
         ADL_ENABLEUPN_FOR_OWNERGROUP_DEFAULT);
   }
 
@@ -152,6 +151,95 @@ public class TestValidateConfiguration {
     assertDeprecatedKeys(conf);
   }
 
+  @Test
+  public void testGetAccountNameFromFQDN() {
+    assertEquals("dummy", AdlFileSystem.
+        getAccountNameFromFQDN("dummy.azuredatalakestore.net"));
+    assertEquals("localhost", AdlFileSystem.
+        getAccountNameFromFQDN("localhost"));
+  }
+
+  @Test
+  public void testPropagateAccountOptionsDefault() {
+    Configuration conf = new Configuration(false);
+    conf.set("fs.adl.oauth2.client.id", "defaultClientId");
+    conf.set("fs.adl.oauth2.credential", "defaultCredential");
+    conf.set("some.other.config", "someValue");
+    Configuration propagatedConf =
+        AdlFileSystem.propagateAccountOptions(conf, "dummy");
+    assertEquals("defaultClientId",
+        propagatedConf.get(AZURE_AD_CLIENT_ID_KEY));
+    assertEquals("defaultCredential",
+        propagatedConf.get(AZURE_AD_CLIENT_SECRET_KEY));
+    assertEquals("someValue",
+        propagatedConf.get("some.other.config"));
+  }
+
+  @Test
+  public void testPropagateAccountOptionsSpecified() {
+    Configuration conf = new Configuration(false);
+    conf.set("fs.adl.account.dummy.oauth2.client.id", "dummyClientId");
+    conf.set("fs.adl.account.dummy.oauth2.credential", "dummyCredential");
+    conf.set("some.other.config", "someValue");
+
+    Configuration propagatedConf =
+        AdlFileSystem.propagateAccountOptions(conf, "dummy");
+    assertEquals("dummyClientId",
+        propagatedConf.get(AZURE_AD_CLIENT_ID_KEY));
+    assertEquals("dummyCredential",
+        propagatedConf.get(AZURE_AD_CLIENT_SECRET_KEY));
+    assertEquals("someValue",
+        propagatedConf.get("some.other.config"));
+
+    propagatedConf =
+        AdlFileSystem.propagateAccountOptions(conf, "anotherDummy");
+    assertEquals(null,
+        propagatedConf.get(AZURE_AD_CLIENT_ID_KEY));
+    assertEquals(null,
+        propagatedConf.get(AZURE_AD_CLIENT_SECRET_KEY));
+    assertEquals("someValue",
+        propagatedConf.get("some.other.config"));
+  }
+
+  @Test
+  public void testPropagateAccountOptionsAll() {
+    Configuration conf = new Configuration(false);
+    conf.set("fs.adl.oauth2.client.id", "defaultClientId");
+    conf.set("fs.adl.oauth2.credential", "defaultCredential");
+    conf.set("some.other.config", "someValue");
+    conf.set("fs.adl.account.dummy1.oauth2.client.id", "dummyClientId1");
+    conf.set("fs.adl.account.dummy1.oauth2.credential", "dummyCredential1");
+    conf.set("fs.adl.account.dummy2.oauth2.client.id", "dummyClientId2");
+    conf.set("fs.adl.account.dummy2.oauth2.credential", "dummyCredential2");
+
+    Configuration propagatedConf =
+        AdlFileSystem.propagateAccountOptions(conf, "dummy1");
+    assertEquals("dummyClientId1",
+        propagatedConf.get(AZURE_AD_CLIENT_ID_KEY));
+    assertEquals("dummyCredential1",
+        propagatedConf.get(AZURE_AD_CLIENT_SECRET_KEY));
+    assertEquals("someValue",
+        propagatedConf.get("some.other.config"));
+
+    propagatedConf =
+        AdlFileSystem.propagateAccountOptions(conf, "dummy2");
+    assertEquals("dummyClientId2",
+        propagatedConf.get(AZURE_AD_CLIENT_ID_KEY));
+    assertEquals("dummyCredential2",
+        propagatedConf.get(AZURE_AD_CLIENT_SECRET_KEY));
+    assertEquals("someValue",
+        propagatedConf.get("some.other.config"));
+
+    propagatedConf =
+        AdlFileSystem.propagateAccountOptions(conf, "anotherDummy");
+    assertEquals("defaultClientId",
+        propagatedConf.get(AZURE_AD_CLIENT_ID_KEY));
+    assertEquals("defaultCredential",
+        propagatedConf.get(AZURE_AD_CLIENT_SECRET_KEY));
+    assertEquals("someValue",
+        propagatedConf.get("some.other.config"));
+  }
+
   private void setDeprecatedKeys(Configuration conf) {
     conf.set("dfs.adls.oauth2.access.token.provider.type", "dummyType");
     conf.set("dfs.adls.oauth2.client.id", "dummyClientId");
@@ -163,19 +251,19 @@ public class TestValidateConfiguration {
   }
 
   private void assertDeprecatedKeys(Configuration conf) {
-    Assert.assertEquals("dummyType",
+    assertEquals("dummyType",
         conf.get(AZURE_AD_TOKEN_PROVIDER_TYPE_KEY));
-    Assert.assertEquals("dummyClientId",
+    assertEquals("dummyClientId",
         conf.get(AZURE_AD_CLIENT_ID_KEY));
-    Assert.assertEquals("dummyRefreshToken",
+    assertEquals("dummyRefreshToken",
         conf.get(AZURE_AD_REFRESH_TOKEN_KEY));
-    Assert.assertEquals("dummyRefreshUrl",
+    assertEquals("dummyRefreshUrl",
         conf.get(AZURE_AD_REFRESH_URL_KEY));
-    Assert.assertEquals("dummyCredential",
+    assertEquals("dummyCredential",
         conf.get(AZURE_AD_CLIENT_SECRET_KEY));
-    Assert.assertEquals("dummyClass",
+    assertEquals("dummyClass",
         conf.get(AZURE_AD_TOKEN_PROVIDER_CLASS_KEY));
-    Assert.assertEquals("dummyTracker",
+    assertEquals("dummyTracker",
         conf.get(LATENCY_TRACKER_KEY));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/481d79fe/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/common/Parallelized.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/common/Parallelized.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/common/Parallelized.java
index b08a892..7ac010c 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/common/Parallelized.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/common/Parallelized.java
@@ -39,7 +39,7 @@ public class Parallelized extends Parameterized {
   private static class ThreadPoolScheduler implements RunnerScheduler {
     private ExecutorService executor;
 
-    public ThreadPoolScheduler() {
+    ThreadPoolScheduler() {
       int numThreads = 10;
       executor = Executors.newFixedThreadPool(numThreads);
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: YARN-7789. Should fail RM if 3rd resource type is configured but RM uses DefaultResourceCalculator. (Zian Chen via wangda)

Posted by ae...@apache.org.
YARN-7789. Should fail RM if 3rd resource type is configured but RM uses DefaultResourceCalculator. (Zian Chen via wangda)

Change-Id: I1f6a451fe16758def6f47c046a9b8a67ad7255af


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/042ef2fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/042ef2fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/042ef2fa

Branch: refs/heads/HDFS-7240
Commit: 042ef2fa7bcc22e3ca4eb8205c34d83e594bc7de
Parents: 60971b8
Author: Wangda Tan <wa...@apache.org>
Authored: Wed Feb 14 23:11:10 2018 +0800
Committer: Wangda Tan <wa...@apache.org>
Committed: Wed Feb 14 23:11:10 2018 +0800

----------------------------------------------------------------------
 .../scheduler/capacity/CapacityScheduler.java   |  8 +++++
 ...CapacitySchedulerWithMultiResourceTypes.java | 37 ++++++++++++++++++++
 2 files changed, 45 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/042ef2fa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index d3aa5cb..cd9d1373 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -338,6 +338,14 @@ public class CapacityScheduler extends
       this.minimumAllocation = super.getMinimumAllocation();
       initMaximumResourceCapability(super.getMaximumAllocation());
       this.calculator = this.conf.getResourceCalculator();
+      if (this.calculator instanceof DefaultResourceCalculator
+          && ResourceUtils.getNumberOfKnownResourceTypes() > 2) {
+        throw new YarnRuntimeException("RM uses DefaultResourceCalculator which"
+            + " used only memory as resource-type but invalid resource-types"
+            + " specified " + ResourceUtils.getResourceTypes() + ". Use"
+            + " DomainantResourceCalculator instead to make effective use of"
+            + " these resource-types");
+      }
       this.usePortForNodeName = this.conf.getUsePortForNodeName();
       this.applications = new ConcurrentHashMap<>();
       this.labelManager = rmContext.getNodeLabelManager();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/042ef2fa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWithMultiResourceTypes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWithMultiResourceTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWithMultiResourceTypes.java
index 2da2cdd..ea29f7f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWithMultiResourceTypes.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerWithMultiResourceTypes.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
@@ -38,6 +39,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.junit.Assert;
@@ -286,4 +288,39 @@ public class TestCapacitySchedulerWithMultiResourceTypes {
 
     rm.close();
   }
+
+  @Test
+  public void testDefaultResourceCalculatorWithThirdResourceTypes() throws Exception {
+
+    CapacitySchedulerConfiguration csconf =
+        new CapacitySchedulerConfiguration();
+    csconf.setResourceComparator(DefaultResourceCalculator.class);
+
+    YarnConfiguration conf = new YarnConfiguration(csconf);
+
+    String[] res1 = {"resource1", "M"};
+    String[] res2 = {"resource2", "G"};
+    String[] res3 = {"resource3", "H"};
+
+    String[][] test = {res1, res2, res3};
+
+    String resSt = "";
+    for (String[] resources : test) {
+      resSt += (resources[0] + ",");
+    }
+    resSt = resSt.substring(0, resSt.length() - 1);
+    conf.set(YarnConfiguration.RESOURCE_TYPES, resSt);
+
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+
+    boolean exception = false;
+    try {
+      MockRM rm = new MockRM(conf);
+    } catch (YarnRuntimeException e) {
+      exception = true;
+    }
+
+    Assert.assertTrue("Should have exception in CS", exception);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: HDFS-10453. ReplicationMonitor thread could stuck for long time due to the race between replication and delete of same file in a large cluster.. Contributed by He Xiaoqiao.

Posted by ae...@apache.org.
HDFS-10453. ReplicationMonitor thread could stuck for long time due to the race between replication and delete of same file in a large cluster.. Contributed by He Xiaoqiao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96bb6a51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96bb6a51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96bb6a51

Branch: refs/heads/HDFS-7240
Commit: 96bb6a51ec4a470e9b287c94e377444a9f97c410
Parents: 8cf88fc
Author: Arpit Agarwal <ar...@apache.org>
Authored: Mon Feb 12 07:00:50 2018 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Feb 12 07:17:40 2018 -0800

----------------------------------------------------------------------
 .../hdfs/server/blockmanagement/BlockReconstructionWork.java   | 6 ++++++
 .../hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java  | 3 +--
 .../hadoop/hdfs/server/blockmanagement/ReplicationWork.java    | 6 ++----
 3 files changed, 9 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96bb6a51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
index 3f591e8..d383191 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
@@ -33,6 +33,7 @@ abstract class BlockReconstructionWork {
   private final BlockInfo block;
 
   private final String srcPath;
+  private final long blockSize;
   private final byte storagePolicyID;
 
   /**
@@ -59,6 +60,7 @@ abstract class BlockReconstructionWork {
       int priority) {
     this.block = block;
     this.srcPath = bc.getName();
+    this.blockSize = block.getNumBytes();
     this.storagePolicyID = bc.getStoragePolicyID();
     this.srcNodes = srcNodes;
     this.containingNodes = containingNodes;
@@ -100,6 +102,10 @@ abstract class BlockReconstructionWork {
     return srcPath;
   }
 
+  public long getBlockSize() {
+    return blockSize;
+  }
+
   public byte getStoragePolicyID() {
     return storagePolicyID;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96bb6a51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
index a23b1d5..147f8cf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
@@ -59,8 +59,7 @@ class ErasureCodingWork extends BlockReconstructionWork {
     // TODO: new placement policy for EC considering multiple writers
     DatanodeStorageInfo[] chosenTargets = blockplacement.chooseTarget(
         getSrcPath(), getAdditionalReplRequired(), getSrcNodes()[0],
-        getLiveReplicaStorages(), false, excludedNodes,
-        getBlock().getNumBytes(),
+        getLiveReplicaStorages(), false, excludedNodes, getBlockSize(),
         storagePolicySuite.getPolicy(getStoragePolicyID()), null);
     setTargets(chosenTargets);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96bb6a51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
index 26c38cb..f250bcb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
@@ -45,10 +45,8 @@ class ReplicationWork extends BlockReconstructionWork {
     try {
       DatanodeStorageInfo[] chosenTargets = blockplacement.chooseTarget(
           getSrcPath(), getAdditionalReplRequired(), getSrcNodes()[0],
-          getLiveReplicaStorages(), false, excludedNodes,
-          getBlock().getNumBytes(),
-          storagePolicySuite.getPolicy(getStoragePolicyID()),
-          null);
+          getLiveReplicaStorages(), false, excludedNodes, getBlockSize(),
+          storagePolicySuite.getPolicy(getStoragePolicyID()), null);
       setTargets(chosenTargets);
     } finally {
       getSrcNodes()[0].decrementPendingReplicationWithoutTargets();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: YARN-7917. Fix failing test TestDockerContainerRuntime#testLaunchContainerWithDockerTokens. Contributed by Shane Kumpf

Posted by ae...@apache.org.
YARN-7917. Fix failing test TestDockerContainerRuntime#testLaunchContainerWithDockerTokens. Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3414fd12
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3414fd12
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3414fd12

Branch: refs/heads/HDFS-7240
Commit: 3414fd1243c0b0dcbb9cea986af5f096300eba97
Parents: 96bb6a5
Author: Jason Lowe <jl...@apache.org>
Authored: Mon Feb 12 09:27:43 2018 -0600
Committer: Jason Lowe <jl...@apache.org>
Committed: Mon Feb 12 09:27:43 2018 -0600

----------------------------------------------------------------------
 .../linux/runtime/TestDockerContainerRuntime.java       | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3414fd12/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index e9cf765..4c53eb1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -1775,7 +1775,7 @@ public class TestDockerContainerRuntime {
     List<String> dockerCommands = Files
         .readAllLines(Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
 
-    int expected = 15;
+    int expected = 16;
     int counter = 0;
     Assert.assertEquals(expected, dockerCommands.size());
     Assert.assertEquals("[docker-command-execution]",
@@ -1797,12 +1797,12 @@ public class TestDockerContainerRuntime {
         dockerCommands.get(counter++));
     Assert.assertEquals("  name=container_id", dockerCommands.get(counter++));
     Assert.assertEquals("  net=host", dockerCommands.get(counter++));
+    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
+            + "/test_user_filecache_dir:/test_user_filecache_dir",
+        dockerCommands.get(counter++));
     Assert.assertEquals(
-        "  rw-mounts=/test_container_local_dir:/test_container_local_dir,"
-            + "/test_filecache_dir:/test_filecache_dir,"
-            + "/test_container_work_dir:/test_container_work_dir,"
-            + "/test_container_log_dir:/test_container_log_dir,"
-            + "/test_user_local_dir:/test_user_local_dir",
+        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
+            + "/test_application_local_dir:/test_application_local_dir",
         dockerCommands.get(counter++));
     Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
     Assert.assertEquals("  workdir=/test_container_work_dir",


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: HADOOP-15176. Enhance IAM Assumed Role support in S3A client. Contributed by Steve Loughran

Posted by ae...@apache.org.
HADOOP-15176. Enhance IAM Assumed Role support in S3A client.
Contributed by Steve Loughran

(cherry picked from commit 96c047fbb98c2378eed9693a724d4cbbd03c00fd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a013b25
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a013b25
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a013b25

Branch: refs/heads/HDFS-7240
Commit: 9a013b255f301c557c3868dc1ad657202e9e7a67
Parents: b27ab7d
Author: Steve Loughran <st...@apache.org>
Authored: Thu Feb 15 15:56:10 2018 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Thu Feb 15 15:57:10 2018 +0000

----------------------------------------------------------------------
 .../apache/hadoop/util/JsonSerialization.java   |   8 +
 .../src/main/resources/core-default.xml         |  13 +-
 .../org/apache/hadoop/test/LambdaTestUtils.java |  40 +-
 .../apache/hadoop/test/TestLambdaTestUtils.java |  36 +
 .../fs/s3a/AssumedRoleCredentialProvider.java   | 197 -----
 .../org/apache/hadoop/fs/s3a/Constants.java     |   2 +-
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  17 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  53 +-
 .../s3a/auth/AssumedRoleCredentialProvider.java | 205 +++++
 .../apache/hadoop/fs/s3a/auth/RoleModel.java    | 314 ++++++++
 .../apache/hadoop/fs/s3a/auth/RolePolicies.java | 228 ++++++
 .../apache/hadoop/fs/s3a/auth/package-info.java |  27 +
 .../hadoop/fs/s3a/commit/CommitOperations.java  |   2 +-
 .../markdown/tools/hadoop-aws/assumed_roles.md  | 274 ++++++-
 .../site/markdown/tools/hadoop-aws/testing.md   |  15 +-
 .../s3a/ITestS3AContractDistCpAssumedRole.java  |  52 --
 .../apache/hadoop/fs/s3a/ITestAssumeRole.java   | 324 --------
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  46 +-
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   |  40 +-
 .../hadoop/fs/s3a/auth/ITestAssumeRole.java     | 789 +++++++++++++++++++
 .../auth/ITestAssumedRoleCommitOperations.java  | 130 +++
 .../hadoop/fs/s3a/auth/RoleTestUtils.java       | 171 ++++
 .../fs/s3a/commit/AbstractCommitITest.java      |  12 +-
 .../fs/s3a/commit/ITestCommitOperations.java    |   4 +-
 24 files changed, 2345 insertions(+), 654 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
index 15f4fef..86c4df6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
@@ -90,6 +90,14 @@ public class JsonSerialization<T> {
   }
 
   /**
+   * Get the mapper of this class.
+   * @return the mapper
+   */
+  public ObjectMapper getMapper() {
+    return mapper;
+  }
+
+  /**
    * Convert from JSON.
    *
    * @param json input

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index ede1f1c..ece54c4 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -977,20 +977,21 @@
 </property>
 
 <property>
-  <name>fs.s3a.assumed.role.session.duration</name>
-  <value>30m</value>
+  <name>fs.s3a.assumed.role.policy</name>
+  <value/>
   <description>
-    Duration of assumed roles before a refresh is attempted.
+    JSON policy to apply to the role.
     Only used if AssumedRoleCredentialProvider is the AWS credential provider.
   </description>
 </property>
 
 <property>
-  <name>fs.s3a.assumed.role.policy</name>
-  <value/>
+  <name>fs.s3a.assumed.role.session.duration</name>
+  <value>30m</value>
   <description>
-    JSON policy containing more restrictions to apply to the role.
+    Duration of assumed roles before a refresh is attempted.
     Only used if AssumedRoleCredentialProvider is the AWS credential provider.
+    Range: 15m to 1h
   </description>
 </property>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
index 22208f7..cbb5288 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
@@ -604,8 +604,44 @@ public final class LambdaTestUtils {
   public static <T> void assertOptionalUnset(String message,
       Optional<T> actual) {
     Assert.assertNotNull(message, actual);
-    if (actual.isPresent()) {
-      Assert.fail("Expected empty option, got " + actual.get().toString());
+    actual.ifPresent(
+        t -> Assert.fail("Expected empty option, got " + t.toString()));
+  }
+
+  /**
+   * Invoke a callable; wrap all checked exceptions with an
+   * AssertionError.
+   * @param closure closure to execute
+   * @param <T> return type of closure
+   * @return the value of the closure
+   * @throws AssertionError if the operation raised an IOE or
+   * other checked exception.
+   */
+  public static <T> T eval(Callable<T> closure) {
+    try {
+      return closure.call();
+    } catch (RuntimeException e) {
+      throw e;
+    } catch (Exception e) {
+      throw new AssertionError(e.toString(), e);
+    }
+  }
+
+  /**
+   * Invoke a callable; wrap all checked exceptions with an
+   * AssertionError.
+   * @param closure closure to execute
+   * @return the value of the closure
+   * @throws AssertionError if the operation raised an IOE or
+   * other checked exception.
+   */
+  public static void eval(VoidCallable closure) {
+    try {
+      closure.call();
+    } catch (RuntimeException e) {
+      throw e;
+    } catch (Exception e) {
+      throw new AssertionError(e.toString(), e);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestLambdaTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestLambdaTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestLambdaTestUtils.java
index c790a18..694fe73 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestLambdaTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestLambdaTestUtils.java
@@ -493,4 +493,40 @@ public class TestLambdaTestUtils extends Assert {
     assertMinRetryCount(0);
   }
 
+  @Test
+  public void testEvalToSuccess() {
+    assertTrue("Eval to success", eval(() -> true));
+  }
+
+  /**
+   * There's no attempt to wrap an unchecked exception
+   * with an AssertionError.
+   */
+  @Test
+  public void testEvalDoesntWrapRTEs() throws Throwable {
+    intercept(RuntimeException.class, "",
+        () -> eval(() -> {
+          throw new RuntimeException("t");
+        }));
+  }
+
+  /**
+   * Verify that IOEs are caught and wrapped, and that the
+   * inner cause is the original IOE.
+   */
+  @Test
+  public void testEvalDoesWrapIOEs() throws Throwable {
+    AssertionError ex = intercept(AssertionError.class, "ioe",
+        () -> eval(() -> {
+          throw new IOException("ioe");
+        }));
+    Throwable cause = ex.getCause();
+    if (cause == null) {
+      throw ex;
+    }
+    if (!(cause instanceof IOException)) {
+      throw cause;
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AssumedRoleCredentialProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AssumedRoleCredentialProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AssumedRoleCredentialProvider.java
deleted file mode 100644
index 26f1f4e..0000000
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AssumedRoleCredentialProvider.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3a;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.Locale;
-import java.util.concurrent.TimeUnit;
-
-import com.amazonaws.auth.AWSCredentials;
-import com.amazonaws.auth.AWSCredentialsProvider;
-import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider;
-import com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException;
-import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import static org.apache.hadoop.fs.s3a.Constants.*;
-import static org.apache.hadoop.fs.s3a.S3AUtils.createAWSCredentialProvider;
-import static org.apache.hadoop.fs.s3a.S3AUtils.loadAWSProviderClasses;
-
-/**
- * Support IAM Assumed roles by instantiating an instance of
- * {@code STSAssumeRoleSessionCredentialsProvider} from configuration
- * properties, including wiring up the inner authenticator, and,
- * unless overridden, creating a session name from the current user.
- */
-public class AssumedRoleCredentialProvider implements AWSCredentialsProvider,
-    Closeable {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(AssumedRoleCredentialProvider.class);
-  public static final String NAME
-      = "org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider";
-
-  static final String E_FORBIDDEN_PROVIDER =
-      "AssumedRoleCredentialProvider cannot be in "
-          + ASSUMED_ROLE_CREDENTIALS_PROVIDER;
-
-  public static final String E_NO_ROLE = "Unset property "
-      + ASSUMED_ROLE_ARN;
-
-  private final STSAssumeRoleSessionCredentialsProvider stsProvider;
-
-  private final String sessionName;
-
-  private final long duration;
-
-  private final String arn;
-
-  /**
-   * Instantiate.
-   * This calls {@link #getCredentials()} to fail fast on the inner
-   * role credential retrieval.
-   * @param conf configuration
-   * @throws IOException on IO problems and some parameter checking
-   * @throws IllegalArgumentException invalid parameters
-   * @throws AWSSecurityTokenServiceException problems getting credentials
-   */
-  public AssumedRoleCredentialProvider(Configuration conf) throws IOException {
-
-    arn = conf.getTrimmed(ASSUMED_ROLE_ARN, "");
-    if (StringUtils.isEmpty(arn)) {
-      throw new IOException(E_NO_ROLE);
-    }
-
-    // build up the base provider
-    Class<?>[] awsClasses = loadAWSProviderClasses(conf,
-        ASSUMED_ROLE_CREDENTIALS_PROVIDER,
-        SimpleAWSCredentialsProvider.class);
-    AWSCredentialProviderList credentials = new AWSCredentialProviderList();
-    for (Class<?> aClass : awsClasses) {
-      if (this.getClass().equals(aClass)) {
-        throw new IOException(E_FORBIDDEN_PROVIDER);
-      }
-      credentials.add(createAWSCredentialProvider(conf, aClass));
-    }
-
-    // then the STS binding
-    sessionName = conf.getTrimmed(ASSUMED_ROLE_SESSION_NAME,
-        buildSessionName());
-    duration = conf.getTimeDuration(ASSUMED_ROLE_SESSION_DURATION,
-        ASSUMED_ROLE_SESSION_DURATION_DEFAULT, TimeUnit.SECONDS);
-    String policy = conf.getTrimmed(ASSUMED_ROLE_POLICY, "");
-
-    LOG.info("{}", this);
-    STSAssumeRoleSessionCredentialsProvider.Builder builder
-        = new STSAssumeRoleSessionCredentialsProvider.Builder(arn, sessionName);
-    builder.withRoleSessionDurationSeconds((int) duration);
-    if (StringUtils.isNotEmpty(policy)) {
-      LOG.debug("Scope down policy {}", policy);
-      builder.withScopeDownPolicy(policy);
-    }
-    String epr = conf.get(ASSUMED_ROLE_STS_ENDPOINT, "");
-    if (StringUtils.isNotEmpty(epr)) {
-      LOG.debug("STS Endpoint: {}", epr);
-      builder.withServiceEndpoint(epr);
-    }
-    LOG.debug("Credentials to obtain role credentials: {}", credentials);
-    builder.withLongLivedCredentialsProvider(credentials);
-    stsProvider = builder.build();
-    // and force in a fail-fast check just to keep the stack traces less
-    // convoluted
-    getCredentials();
-  }
-
-  /**
-   * Get credentials.
-   * @return the credentials
-   * @throws AWSSecurityTokenServiceException if none could be obtained.
-   */
-  @Override
-  public AWSCredentials getCredentials() {
-    try {
-      return stsProvider.getCredentials();
-    } catch (AWSSecurityTokenServiceException e) {
-      LOG.error("Failed to get credentials for role {}",
-          arn, e);
-      throw e;
-    }
-  }
-
-  @Override
-  public void refresh() {
-    stsProvider.refresh();
-  }
-
-  /**
-   * Propagate the close() call to the inner stsProvider.
-   */
-  @Override
-  public void close() {
-    stsProvider.close();
-  }
-
-  @Override
-  public String toString() {
-    final StringBuilder sb = new StringBuilder(
-        "AssumedRoleCredentialProvider{");
-    sb.append("role='").append(arn).append('\'');
-    sb.append(", session'").append(sessionName).append('\'');
-    sb.append(", duration=").append(duration);
-    sb.append('}');
-    return sb.toString();
-  }
-
-  /**
-   * Build the session name from the current user's shortname.
-   * @return a string for the session name.
-   * @throws IOException failure to get the current user
-   */
-  static String buildSessionName() throws IOException {
-    return sanitize(UserGroupInformation.getCurrentUser()
-        .getShortUserName());
-  }
-
-  /**
-   * Build a session name from the string, sanitizing it for the permitted
-   * characters.
-   * @param session source session
-   * @return a string for use in role requests.
-   */
-  @VisibleForTesting
-  static String sanitize(String session) {
-    StringBuilder r = new StringBuilder(session.length());
-    for (char c: session.toCharArray()) {
-      if ("abcdefghijklmnopqrstuvwxyz0123456789,.@-".contains(
-          Character.toString(c).toLowerCase(Locale.ENGLISH))) {
-        r.append(c);
-      } else {
-        r.append('-');
-      }
-    }
-    return r.toString();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index 38aaeaa..faec784 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -94,7 +94,7 @@ public final class Constants {
   public static final String ASSUMED_ROLE_CREDENTIALS_PROVIDER =
       "fs.s3a.assumed.role.credentials.provider";
 
-  /** JSON policy containing more restrictions to apply to the role. */
+  /** JSON policy containing the policy to apply to the role. */
   public static final String ASSUMED_ROLE_POLICY =
       "fs.s3a.assumed.role.policy";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index fced494..da8f38b 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -1399,9 +1399,9 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
     } catch (MultiObjectDeleteException e) {
       // one or more of the operations failed.
       List<MultiObjectDeleteException.DeleteError> errors = e.getErrors();
-      LOG.error("Partial failure of delete, {} errors", errors.size(), e);
+      LOG.debug("Partial failure of delete, {} errors", errors.size(), e);
       for (MultiObjectDeleteException.DeleteError error : errors) {
-        LOG.error("{}: \"{}\" - {}",
+        LOG.debug("{}: \"{}\" - {}",
             error.getKey(), error.getCode(), error.getMessage());
       }
       throw e;
@@ -1649,7 +1649,9 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
       blockRootDelete(keyVersion.getKey());
     }
     if (enableMultiObjectsDelete) {
-      deleteObjects(new DeleteObjectsRequest(bucket).withKeys(keysToDelete));
+      deleteObjects(new DeleteObjectsRequest(bucket)
+          .withKeys(keysToDelete)
+          .withQuiet(true));
     } else {
       for (DeleteObjectsRequest.KeyVersion keyVersion : keysToDelete) {
         deleteObject(keyVersion.getKey());
@@ -1684,7 +1686,13 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
       entryPoint(INVOCATION_DELETE);
       boolean outcome = innerDelete(innerGetFileStatus(f, true), recursive);
       if (outcome) {
-        maybeCreateFakeParentDirectory(f);
+        try {
+          maybeCreateFakeParentDirectory(f);
+        } catch (AccessDeniedException e) {
+          LOG.warn("Cannot create directory marker at {}: {}",
+              f.getParent(), e.toString());
+          LOG.debug("Failed to create fake dir above {}", f, e);
+        }
       }
       return outcome;
     } catch (FileNotFoundException e) {
@@ -1827,6 +1835,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
    * @throws IOException IO problem
    * @throws AmazonClientException untranslated AWS client problem
    */
+  @Retries.RetryTranslated
   void maybeCreateFakeParentDirectory(Path path)
       throws IOException, AmazonClientException {
     Path parent = path.getParent();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
index 4dd6ed1..2b64a76 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
@@ -30,6 +30,7 @@ import com.amazonaws.services.dynamodbv2.model.LimitExceededException;
 import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughputExceededException;
 import com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException;
 import com.amazonaws.services.s3.model.AmazonS3Exception;
+import com.amazonaws.services.s3.model.MultiObjectDeleteException;
 import com.amazonaws.services.s3.model.S3ObjectSummary;
 
 import com.google.common.base.Preconditions;
@@ -248,6 +249,14 @@ public final class S3AUtils {
         ioe = new AWSStatus500Exception(message, ase);
         break;
 
+      case 200:
+        if (exception instanceof MultiObjectDeleteException) {
+          // failure during a bulk delete
+          return translateMultiObjectDeleteException(message,
+              (MultiObjectDeleteException) exception);
+        }
+        // other 200: FALL THROUGH
+
       default:
         // no specific exit code. Choose an IOE subclass based on the class
         // of the caught exception
@@ -379,6 +388,40 @@ public final class S3AUtils {
   }
 
   /**
+   * A MultiObjectDeleteException is raised if one or more delete objects
+   * listed in a bulk DELETE operation failed.
+   * The top-level exception is therefore just "something wasn't deleted",
+   * but doesn't include the what or the why.
+   * This translation will extract an AccessDeniedException if that's one of
+   * the causes, otherwise grabs the status code and uses it in the
+   * returned exception.
+   * @param message text for the exception
+   * @param ex exception to translate
+   * @return an IOE with more detail.
+   */
+  public static IOException translateMultiObjectDeleteException(String message,
+      MultiObjectDeleteException ex) {
+    List<String> keys;
+    StringBuffer result = new StringBuffer(ex.getErrors().size() * 100);
+    result.append(message).append(": ");
+    String exitCode = "";
+    for (MultiObjectDeleteException.DeleteError error : ex.getErrors()) {
+      String code = error.getCode();
+      result.append(String.format("%s: %s: %s%n", code, error.getKey(),
+          error.getMessage()));
+      if (exitCode.isEmpty() ||  "AccessDenied".equals(code)) {
+        exitCode = code;
+      }
+    }
+    if ("AccessDenied".equals(exitCode)) {
+      return (IOException) new AccessDeniedException(result.toString())
+          .initCause(ex);
+    } else {
+      return new AWSS3IOException(result.toString(), ex);
+    }
+  }
+
+  /**
    * Get low level details of an amazon exception for logging; multi-line.
    * @param e exception
    * @return string details
@@ -534,7 +577,7 @@ public final class S3AUtils {
    * @return the list of classes, possibly empty
    * @throws IOException on a failure to load the list.
    */
-  static Class<?>[] loadAWSProviderClasses(Configuration conf,
+  public static Class<?>[] loadAWSProviderClasses(Configuration conf,
       String key,
       Class<?>... defaultValue) throws IOException {
     try {
@@ -564,7 +607,7 @@ public final class S3AUtils {
    * @return the instantiated class
    * @throws IOException on any instantiation failure.
    */
-  static AWSCredentialsProvider createAWSCredentialProvider(
+  public static AWSCredentialsProvider createAWSCredentialProvider(
       Configuration conf, Class<?> credClass) throws IOException {
     AWSCredentialsProvider credentials;
     String className = credClass.getName();
@@ -973,14 +1016,18 @@ public final class S3AUtils {
    * iterator.
    * @param iterator iterator from a list
    * @param eval closure to evaluate
+   * @return the number of files processed
    * @throws IOException anything in the closure, or iteration logic.
    */
-  public static void applyLocatedFiles(
+  public static long applyLocatedFiles(
       RemoteIterator<LocatedFileStatus> iterator,
       CallOnLocatedFileStatus eval) throws IOException {
+    long count = 0;
     while (iterator.hasNext()) {
+      count++;
       eval.call(iterator.next());
     }
+    return count;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java
new file mode 100644
index 0000000..42809c8
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.auth;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Locale;
+import java.util.concurrent.TimeUnit;
+
+import com.amazonaws.auth.AWSCredentials;
+import com.amazonaws.auth.AWSCredentialsProvider;
+import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider;
+import com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException;
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.s3a.AWSCredentialProviderList;
+import org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import static org.apache.hadoop.fs.s3a.Constants.*;
+import static org.apache.hadoop.fs.s3a.S3AUtils.createAWSCredentialProvider;
+import static org.apache.hadoop.fs.s3a.S3AUtils.loadAWSProviderClasses;
+
+/**
+ * Support IAM Assumed roles by instantiating an instance of
+ * {@code STSAssumeRoleSessionCredentialsProvider} from configuration
+ * properties, including wiring up the inner authenticator, and,
+ * unless overridden, creating a session name from the current user.
+ *
+ * Classname is used in configuration files; do not move.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class AssumedRoleCredentialProvider implements AWSCredentialsProvider,
+    Closeable {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(AssumedRoleCredentialProvider.class);
+  public static final String NAME
+      = "org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider";
+
+  static final String E_FORBIDDEN_PROVIDER =
+      "AssumedRoleCredentialProvider cannot be in "
+          + ASSUMED_ROLE_CREDENTIALS_PROVIDER;
+
+  public static final String E_NO_ROLE = "Unset property "
+      + ASSUMED_ROLE_ARN;
+
+  private final STSAssumeRoleSessionCredentialsProvider stsProvider;
+
+  private final String sessionName;
+
+  private final long duration;
+
+  private final String arn;
+
+  /**
+   * Instantiate.
+   * This calls {@link #getCredentials()} to fail fast on the inner
+   * role credential retrieval.
+   * @param conf configuration
+   * @throws IOException on IO problems and some parameter checking
+   * @throws IllegalArgumentException invalid parameters
+   * @throws AWSSecurityTokenServiceException problems getting credentials
+   */
+  public AssumedRoleCredentialProvider(Configuration conf) throws IOException {
+
+    arn = conf.getTrimmed(ASSUMED_ROLE_ARN, "");
+    if (StringUtils.isEmpty(arn)) {
+      throw new IOException(E_NO_ROLE);
+    }
+
+    // build up the base provider
+    Class<?>[] awsClasses = loadAWSProviderClasses(conf,
+        ASSUMED_ROLE_CREDENTIALS_PROVIDER,
+        SimpleAWSCredentialsProvider.class);
+    AWSCredentialProviderList credentials = new AWSCredentialProviderList();
+    for (Class<?> aClass : awsClasses) {
+      if (this.getClass().equals(aClass)) {
+        throw new IOException(E_FORBIDDEN_PROVIDER);
+      }
+      credentials.add(createAWSCredentialProvider(conf, aClass));
+    }
+
+    // then the STS binding
+    sessionName = conf.getTrimmed(ASSUMED_ROLE_SESSION_NAME,
+        buildSessionName());
+    duration = conf.getTimeDuration(ASSUMED_ROLE_SESSION_DURATION,
+        ASSUMED_ROLE_SESSION_DURATION_DEFAULT, TimeUnit.SECONDS);
+    String policy = conf.getTrimmed(ASSUMED_ROLE_POLICY, "");
+
+    LOG.debug("{}", this);
+    STSAssumeRoleSessionCredentialsProvider.Builder builder
+        = new STSAssumeRoleSessionCredentialsProvider.Builder(arn, sessionName);
+    builder.withRoleSessionDurationSeconds((int) duration);
+    if (StringUtils.isNotEmpty(policy)) {
+      LOG.debug("Scope down policy {}", policy);
+      builder.withScopeDownPolicy(policy);
+    }
+    String epr = conf.get(ASSUMED_ROLE_STS_ENDPOINT, "");
+    if (StringUtils.isNotEmpty(epr)) {
+      LOG.debug("STS Endpoint: {}", epr);
+      builder.withServiceEndpoint(epr);
+    }
+    LOG.debug("Credentials to obtain role credentials: {}", credentials);
+    builder.withLongLivedCredentialsProvider(credentials);
+    stsProvider = builder.build();
+    // and force in a fail-fast check just to keep the stack traces less
+    // convoluted
+    getCredentials();
+  }
+
+  /**
+   * Get credentials.
+   * @return the credentials
+   * @throws AWSSecurityTokenServiceException if none could be obtained.
+   */
+  @Override
+  public AWSCredentials getCredentials() {
+    try {
+      return stsProvider.getCredentials();
+    } catch (AWSSecurityTokenServiceException e) {
+      LOG.error("Failed to get credentials for role {}",
+          arn, e);
+      throw e;
+    }
+  }
+
+  @Override
+  public void refresh() {
+    stsProvider.refresh();
+  }
+
+  /**
+   * Propagate the close() call to the inner stsProvider.
+   */
+  @Override
+  public void close() {
+    stsProvider.close();
+  }
+
+  @Override
+  public String toString() {
+    final StringBuilder sb = new StringBuilder(
+        "AssumedRoleCredentialProvider{");
+    sb.append("role='").append(arn).append('\'');
+    sb.append(", session'").append(sessionName).append('\'');
+    sb.append(", duration=").append(duration);
+    sb.append('}');
+    return sb.toString();
+  }
+
+  /**
+   * Build the session name from the current user's shortname.
+   * @return a string for the session name.
+   * @throws IOException failure to get the current user
+   */
+  static String buildSessionName() throws IOException {
+    return sanitize(UserGroupInformation.getCurrentUser()
+        .getShortUserName());
+  }
+
+  /**
+   * Build a session name from the string, sanitizing it for the permitted
+   * characters.
+   * @param session source session
+   * @return a string for use in role requests.
+   */
+  @VisibleForTesting
+  static String sanitize(String session) {
+    StringBuilder r = new StringBuilder(session.length());
+    for (char c: session.toCharArray()) {
+      if ("abcdefghijklmnopqrstuvwxyz0123456789,.@-".contains(
+          Character.toString(c).toLowerCase(Locale.ENGLISH))) {
+        r.append(c);
+      } else {
+        r.append('-');
+      }
+    }
+    return r.toString();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RoleModel.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RoleModel.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RoleModel.java
new file mode 100644
index 0000000..ca2c993
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RoleModel.java
@@ -0,0 +1,314 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.auth;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.util.JsonSerialization;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.base.Preconditions.checkState;
+
+/**
+ * Jackson Role Model for Role Properties, for API clients and tests.
+ *
+ * Doesn't have complete coverage of the entire AWS IAM policy model;
+ * don't expect to be able to parse everything.
+ * It can generate simple models.
+ * @see <a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html">Example S3 Policies</a>
+ * @see <a href="https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/api-permissions-reference.html">Dynamno DB Permissions</a>
+ */
+@InterfaceAudience.LimitedPrivate("Tests")
+@InterfaceStability.Unstable
+public class RoleModel {
+
+  public static final String VERSION = "2012-10-17";
+
+  public static final String BUCKET_RESOURCE_F = "arn:aws:s3:::%s/%s";
+
+
+  private static final AtomicLong SID_COUNTER = new AtomicLong(0);
+
+
+  private final JsonSerialization<Policy> serialization =
+      new JsonSerialization<>(Policy.class, false, true);
+
+  public RoleModel() {
+    ObjectMapper mapper = serialization.getMapper();
+    mapper.enable(SerializationFeature.WRITE_SINGLE_ELEM_ARRAYS_UNWRAPPED);
+  }
+
+  public String toJson(Policy policy) throws JsonProcessingException {
+    return serialization.toJson(policy);
+  }
+
+  /**
+   * Statement ID factory.
+   * @return a statement ID unique for this JVM's life.
+   */
+  public static String newSid() {
+    SID_COUNTER.incrementAndGet();
+    return SID_COUNTER.toString();
+  }
+
+  /**
+   * Map a bool to an effect.
+   * @param allowed is the statement to allow actions?
+   * @return the appropriate effect.
+   */
+  public static Effects effect(final boolean allowed) {
+    return allowed ? Effects.Allow : Effects.Deny;
+  }
+
+  /**
+   * Create a resource.
+   * @param bucket bucket
+   * @param key key
+   * @param addWildcard add a * to the tail of the key?
+   * @return a resource for a statement.
+   */
+  @SuppressWarnings("StringConcatenationMissingWhitespace")
+  public static String resource(String bucket, String key,
+      boolean addWildcard) {
+    return String.format(BUCKET_RESOURCE_F, bucket,
+        key + (addWildcard ? "*" : ""));
+  }
+
+  /**
+   * Given a path, return the S3 resource to it.
+   * If {@code isDirectory} is true, a "/" is added to the path.
+   * This is critical when adding wildcard permissions under
+   * a directory, and also needed when locking down dir-as-file
+   * and dir-as-directory-marker access.
+   * @param path a path
+   * @param isDirectory is this a directory?
+   * @param addWildcard add a * to the tail of the key?
+   * @return a resource for a statement.
+   */
+  public static String resource(Path path,
+      final boolean isDirectory,
+      boolean addWildcard) {
+    String key = pathToKey(path);
+    if (isDirectory && !key.isEmpty()) {
+      key = key + "/";
+    }
+    return resource(path.toUri().getHost(), key, addWildcard);
+  }
+
+  /**
+   * Given a directory path, return the S3 resource to it.
+   * @param path a path
+   * @return a resource for a statement.
+   */
+  public static String[] directory(Path path) {
+    String host = path.toUri().getHost();
+    String key = pathToKey(path);
+    if (!key.isEmpty()) {
+      return new String[] {
+          resource(host, key + "/", true),
+          resource(host, key, false),
+          resource(host, key + "/", false),
+      };
+    } else {
+      return new String[]{
+          resource(host, key, true),
+      };
+    }
+  }
+
+  /**
+   * Variant of {@link S3AFileSystem#pathToKey(Path)} which doesn't care
+   * about working directories, so can be static and stateless.
+   * @param path path to map
+   * @return key or ""
+   */
+  public static String pathToKey(Path path) {
+    if (path.toUri().getScheme() != null && path.toUri().getPath().isEmpty()) {
+      return "";
+    }
+    return path.toUri().getPath().substring(1);
+  }
+
+  /**
+   * Create a statement.
+   * @param allow allow or deny
+   * @param scope scope
+   * @param actions actions
+   * @return the formatted json statement
+   */
+  public static Statement statement(boolean allow,
+      String scope,
+      String... actions) {
+    return new Statement(RoleModel.effect(allow))
+        .addActions(actions)
+        .addResources(scope);
+  }
+
+  /**
+   * Create a statement.
+   * If {@code isDirectory} is true, a "/" is added to the path.
+   * This is critical when adding wildcard permissions under
+   * a directory, and also needed when locking down dir-as-file
+   * and dir-as-directory-marker access.
+   * @param allow allow or deny
+   * @param path path
+   * @param isDirectory is this a directory?
+   * @param actions action
+   * @return the formatted json statement
+   */
+  public static Statement statement(
+      final boolean allow,
+      final Path path,
+      final boolean isDirectory,
+      final boolean wildcards,
+      final String... actions) {
+    return new Statement(RoleModel.effect(allow))
+        .addActions(actions)
+        .addResources(resource(path, isDirectory, wildcards));
+  }
+
+  /**
+   * From a set of statements, create a policy.
+   * @param statements statements
+   * @return the policy
+   */
+  public static Policy policy(Statement... statements) {
+    return new Policy(statements);
+  }
+
+
+  /**
+   * Effect options.
+   */
+  public enum Effects {
+    Allow,
+    Deny
+  }
+
+  /**
+   * Any element in a role.
+   */
+  public static abstract class RoleElt {
+
+    protected RoleElt() {
+    }
+
+    /**
+     * validation operation.
+     */
+    public void validate() {
+
+    }
+  }
+
+  /**
+   * A single statement.
+   */
+  public static class Statement extends RoleElt {
+
+    @JsonProperty("Sid")
+    public String sid = newSid();
+
+    /**
+     * Default effect is Deny; forces callers to switch on Allow.
+     */
+    @JsonProperty("Effect")
+    public Effects effect;
+
+    @JsonProperty("Action")
+    public List<String> action = new ArrayList<>(1);
+
+    @JsonProperty("Resource")
+    public List<String> resource = new ArrayList<>(1);
+
+    public Statement(final Effects effect) {
+      this.effect = effect;
+    }
+
+    @Override
+    public void validate() {
+      checkNotNull(sid, "Sid");
+      checkNotNull(effect, "Effect");
+      checkState(!(action.isEmpty()), "Empty Action");
+      checkState(!(resource.isEmpty()), "Empty Resource");
+    }
+
+    public Statement setAllowed(boolean f) {
+      effect = effect(f);
+      return this;
+    }
+
+    public Statement addActions(String... actions) {
+      Collections.addAll(action, actions);
+      return this;
+    }
+
+    public Statement addResources(String... resources) {
+      Collections.addAll(resource, resources);
+      return this;
+    }
+
+  }
+
+  /**
+   * A policy is one or more statements.
+   */
+  public static class Policy extends RoleElt {
+
+    @JsonProperty("Version")
+    public String version = VERSION;
+
+    @JsonProperty("Statement")
+    public List<Statement> statement;
+
+    public Policy(final List<RoleModel.Statement> statement) {
+      this.statement = statement;
+    }
+
+    public Policy(RoleModel.Statement... statements) {
+      statement = Arrays.asList(statements);
+    }
+
+    /**
+     * Validation includes validating all statements.
+     */
+    @Override
+    public void validate() {
+      checkNotNull(statement, "Statement");
+      checkState(VERSION.equals(version), "Invalid Version: %s", version);
+      statement.stream().forEach((a) -> a.validate());
+    }
+
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java
new file mode 100644
index 0000000..6711eee
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java
@@ -0,0 +1,228 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.auth;
+
+import static org.apache.hadoop.fs.s3a.auth.RoleModel.*;
+
+/**
+ * Operations, statements and policies covering the operations
+ * needed to work with S3 and S3Guard.
+ */
+public final class RolePolicies {
+
+  private RolePolicies() {
+  }
+
+  /**
+   * All S3 operations: {@value}.
+   */
+  public static final String S3_ALL_OPERATIONS = "s3:*";
+
+  /**
+   * All S3 buckets: {@value}.
+   */
+  public static final String S3_ALL_BUCKETS = "arn:aws:s3:::*";
+
+
+  public static final String S3_ALL_LIST_OPERATIONS = "s3:List*";
+
+  public static final String S3_ALL_LIST_BUCKET = "s3:ListBucket*";
+
+  public static final String S3_LIST_BUCKET = "s3:ListBucket";
+
+  /**
+   * This is used by the abort operation in S3A commit work.
+   */
+  public static final String S3_LIST_BUCKET_MULTPART_UPLOADS =
+      "s3:ListBucketMultipartUploads";
+
+
+  /**
+   * List multipart upload is needed for the S3A Commit protocols.
+   */
+  public static final String S3_LIST_MULTIPART_UPLOAD_PARTS
+      = "s3:ListMultipartUploadParts";
+
+  /**
+   * abort multipart upload is needed for the S3A Commit protocols.
+   */
+  public static final String S3_ABORT_MULTIPART_UPLOAD
+      = "s3:AbortMultipartUpload";
+
+  /**
+   * All s3:Delete* operations.
+   */
+  public static final String S3_ALL_DELETE = "s3:Delete*";
+
+
+  public static final String S3_DELETE_OBJECT = "s3:DeleteObject";
+
+  public static final String S3_DELETE_OBJECT_TAGGING
+      = "s3:DeleteObjectTagging";
+
+  public static final String S3_DELETE_OBJECT_VERSION
+      = "s3:DeleteObjectVersion";
+
+  public static final String S3_DELETE_OBJECT_VERSION_TAGGING
+      = "s3:DeleteObjectVersionTagging";
+
+  /**
+   * All s3:Get* operations.
+   */
+  public static final String S3_ALL_GET = "s3:Get*";
+
+  public static final String S3_GET_OBJECT = "s3:GetObject";
+
+  public static final String S3_GET_OBJECT_ACL = "s3:GetObjectAcl";
+
+  public static final String S3_GET_OBJECT_TAGGING = "s3:GetObjectTagging";
+
+  public static final String S3_GET_OBJECT_TORRENT = "s3:GetObjectTorrent";
+
+  public static final String S3_GET_OBJECT_VERSION = "s3:GetObjectVersion";
+
+  public static final String S3_GET_OBJECT_VERSION_ACL
+      = "s3:GetObjectVersionAcl";
+
+  public static final String S3_GET_OBJECT_VERSION_TAGGING
+      = "s3:GetObjectVersionTagging";
+
+  public static final String S3_GET_OBJECT_VERSION_TORRENT
+      = "s3:GetObjectVersionTorrent";
+
+
+  /**
+   * S3 Put*.
+   * This covers single an multipart uploads, but not list/abort of the latter.
+   */
+  public static final String S3_ALL_PUT = "s3:Put*";
+
+  public static final String S3_PUT_OBJECT = "s3:PutObject";
+
+  public static final String S3_PUT_OBJECT_ACL = "s3:PutObjectAcl";
+
+  public static final String S3_PUT_OBJECT_TAGGING = "s3:PutObjectTagging";
+
+  public static final String S3_PUT_OBJECT_VERSION_ACL
+      = "s3:PutObjectVersionAcl";
+
+  public static final String S3_PUT_OBJECT_VERSION_TAGGING
+      = "s3:PutObjectVersionTagging";
+
+  public static final String S3_RESTORE_OBJECT = "s3:RestoreObject";
+
+  /**
+   * Actions needed to read data from S3 through S3A.
+   */
+  public static final String[] S3_PATH_READ_OPERATIONS =
+      new String[]{
+          S3_GET_OBJECT,
+      };
+
+  /**
+   * Actions needed to read data from S3 through S3A.
+   */
+  public static final String[] S3_ROOT_READ_OPERATIONS =
+      new String[]{
+          S3_LIST_BUCKET,
+          S3_LIST_BUCKET_MULTPART_UPLOADS,
+          S3_GET_OBJECT,
+      };
+
+  /**
+   * Actions needed to write data to an S3A Path.
+   * This includes the appropriate read operations.
+   */
+  public static final String[] S3_PATH_RW_OPERATIONS =
+      new String[]{
+          S3_ALL_GET,
+          S3_PUT_OBJECT,
+          S3_DELETE_OBJECT,
+          S3_ABORT_MULTIPART_UPLOAD,
+          S3_LIST_MULTIPART_UPLOAD_PARTS,
+      };
+
+  /**
+   * Actions needed to write data to an S3A Path.
+   * This is purely the extra operations needed for writing atop
+   * of the read operation set.
+   * Deny these and a path is still readable, but not writeable.
+   */
+  public static final String[] S3_PATH_WRITE_OPERATIONS =
+      new String[]{
+          S3_PUT_OBJECT,
+          S3_DELETE_OBJECT,
+          S3_ABORT_MULTIPART_UPLOAD
+      };
+
+  /**
+   * Actions needed for R/W IO from the root of a bucket.
+   */
+  public static final String[] S3_ROOT_RW_OPERATIONS =
+      new String[]{
+          S3_LIST_BUCKET,
+          S3_ALL_GET,
+          S3_PUT_OBJECT,
+          S3_DELETE_OBJECT,
+          S3_ABORT_MULTIPART_UPLOAD,
+          S3_LIST_MULTIPART_UPLOAD_PARTS,
+          S3_ALL_LIST_BUCKET,
+      };
+
+  /**
+   * All DynamoDB operations: {@value}.
+   */
+  public static final String DDB_ALL_OPERATIONS = "dynamodb:*";
+
+  public static final String DDB_ADMIN = "dynamodb:*";
+
+
+  public static final String DDB_BATCH_WRITE = "dynamodb:BatchWriteItem";
+
+  /**
+   * All DynamoDB tables: {@value}.
+   */
+  public static final String ALL_DDB_TABLES = "arn:aws:dynamodb:::*";
+
+
+
+  public static final String WILDCARD = "*";
+
+  /**
+   * Allow all S3 Operations.
+   */
+  public static final Statement STATEMENT_ALL_S3 = statement(true,
+      S3_ALL_BUCKETS,
+      S3_ALL_OPERATIONS);
+
+  /**
+   * Statement to allow all DDB access.
+   */
+  public static final Statement STATEMENT_ALL_DDB = statement(true,
+      ALL_DDB_TABLES, DDB_ALL_OPERATIONS);
+
+  /**
+   * Allow all S3 and S3Guard operations.
+   */
+  public static final Policy ALLOW_S3_AND_SGUARD = policy(
+      STATEMENT_ALL_S3,
+      STATEMENT_ALL_DDB
+  );
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/package-info.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/package-info.java
new file mode 100644
index 0000000..e34d68e
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/package-info.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Authentication and permissions support.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.fs.s3a.auth;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java
index f6e12f4..55ace17 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/CommitOperations.java
@@ -309,7 +309,7 @@ public class CommitOperations {
         } catch (FileNotFoundException e) {
           LOG.debug("listed file already deleted: {}", pendingFile);
         } catch (IOException | IllegalArgumentException e) {
-          if (outcome == null) {
+          if (MaybeIOE.NONE.equals(outcome)) {
             outcome = new MaybeIOE(makeIOE(pendingFile.toString(), e));
           }
         } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/assumed_roles.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/assumed_roles.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/assumed_roles.md
index 50a9a0d..3afd63f 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/assumed_roles.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/assumed_roles.md
@@ -26,6 +26,9 @@ to obtain the assumed role and refresh it regularly.
 By using per-filesystem configuration, it is possible to use different
 assumed roles for different buckets.
 
+*IAM Assumed Roles are unlikely to be supported by third-party systems
+supporting the S3 APIs.*
+
 ## Using IAM Assumed Roles
 
 ### Before You Begin
@@ -38,14 +41,13 @@ are, how to configure their policies, etc.
 * Have the AWS CLI installed, and test that it works there.
 * Give the role access to S3, and, if using S3Guard, to DynamoDB.
 
-
 Trying to learn how IAM Assumed Roles work by debugging stack traces from
 the S3A client is "suboptimal".
 
 ### <a name="how_it_works"></a> How the S3A connector support IAM Assumed Roles.
 
 To use assumed roles, the client must be configured to use the
-*Assumed Role Credential Provider*, `org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider`,
+*Assumed Role Credential Provider*, `org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider`,
 in the configuration option `fs.s3a.aws.credentials.provider`.
 
 This AWS Credential provider will read in the `fs.s3a.assumed.role` options needed to connect to the
@@ -54,7 +56,8 @@ first authenticating with the full credentials, then assuming the specific role
 specified. It will then refresh this login at the configured rate of
 `fs.s3a.assumed.role.session.duration`
 
-To authenticate with the STS service both for the initial credential retrieval
+To authenticate with the  [AWS STS service](https://docs.aws.amazon.com/STS/latest/APIReference/Welcome.html)
+both for the initial credential retrieval
 and for background refreshes, a different credential provider must be
 created, one which uses long-lived credentials (secret keys, environment variables).
 Short lived credentials (e.g other session tokens, EC2 instance credentials) cannot be used.
@@ -76,6 +79,7 @@ the previously created ARN.
 <property>
   <name>fs.s3a.aws.credentials.provider</name>
   <value>org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider</value>
+  <value>org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider</value>
 </property>
 
 <property>
@@ -116,7 +120,7 @@ Here are the full set of configuration options.
   <value />
   <description>
     AWS ARN for the role to be assumed.
-    Requires the fs.s3a.aws.credentials.provider list to contain
+    Required if the fs.s3a.aws.credentials.provider contains
     org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider
   </description>
 </property>
@@ -127,23 +131,27 @@ Here are the full set of configuration options.
   <description>
     Session name for the assumed role, must be valid characters according to
     the AWS APIs.
+    Only used if AssumedRoleCredentialProvider is the AWS credential provider.
     If not set, one is generated from the current Hadoop/Kerberos username.
   </description>
 </property>
 
 <property>
-  <name>fs.s3a.assumed.role.session.duration</name>
-  <value>30m</value>
+  <name>fs.s3a.assumed.role.policy</name>
+  <value/>
   <description>
-    Duration of assumed roles before a refresh is attempted.
+    JSON policy to apply to the role.
+    Only used if AssumedRoleCredentialProvider is the AWS credential provider.
   </description>
 </property>
 
 <property>
-  <name>fs.s3a.assumed.role.policy</name>
-  <value/>
+  <name>fs.s3a.assumed.role.session.duration</name>
+  <value>30m</value>
   <description>
-    Extra policy containing more restrictions to apply to the role.
+    Duration of assumed roles before a refresh is attempted.
+    Only used if AssumedRoleCredentialProvider is the AWS credential provider.
+    Range: 15m to 1h
   </description>
 </property>
 
@@ -152,37 +160,173 @@ Here are the full set of configuration options.
   <value/>
   <description>
     AWS Simple Token Service Endpoint. If unset, uses the default endpoint.
+    Only used if AssumedRoleCredentialProvider is the AWS credential provider.
   </description>
 </property>
 
 <property>
   <name>fs.s3a.assumed.role.credentials.provider</name>
-  <value/>
+  <value>org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider</value>
   <description>
-    Credential providers used to authenticate with the STS endpoint and retrieve
-    the role tokens.
+    List of credential providers to authenticate with the STS endpoint and
+    retrieve short-lived role credentials.
+    Only used if AssumedRoleCredentialProvider is the AWS credential provider.
     If unset, uses "org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider".
   </description>
 </property>
 ```
 
+## <a name="polices"></a> Restricting S3A operations through AWS Policies
+
+The S3A client needs to be granted specific permissions in order
+to work with a bucket.
+Here is a non-normative list of the permissions which must be granted
+for FileSystem operations to work.
+
+*Disclaimer* The specific set of actions which the S3A connector needs
+will change over time.
+
+As more operations are added to the S3A connector, and as the
+means by which existing operations are implemented change, the
+AWS actions which are required by the client will change.
+
+These lists represent the minimum actions to which the client's principal
+must have in order to work with a bucket.
+
+
+### Read Access Permissions
+
+Permissions which must be granted when reading from a bucket:
+
+
+| Action | S3A operations |
+|--------|----------|
+| `s3:ListBucket` | `listStatus()`, `getFileStatus()` and elsewhere |
+| `s3:GetObject` | `getFileStatus()`, `open()` and elsewhere |
+| `s3:ListBucketMultipartUploads` |  Aborting/cleaning up S3A commit operations|
+
+
+The `s3:ListBucketMultipartUploads` is only needed when committing work
+via the [S3A committers](committers.html).
+However, it must be granted to the root path in order to safely clean up jobs.
+It is simplest to permit this in all buckets, even if it is only actually
+needed when writing data.
+
+
+### Write Access Permissions
+
+These permissions must *also* be granted for write access:
+
+
+| Action | S3A operations |
+|--------|----------|
+| `s3:PutObject` | `mkdir()`, `create()`, `rename()`, `delete()` |
+| `s3:DeleteObject` | `mkdir()`, `create()`, `rename()`, `delete()` |
+| `s3:AbortMultipartUpload` | S3A committer `abortJob()` and `cleanup()` operations |
+| `s3:ListMultipartUploadParts` | S3A committer `abortJob()` and `cleanup()` operations |
+
+
+### Mixed Permissions in a single S3 Bucket
+
+Mixing permissions down the "directory tree" is limited
+only to the extent of supporting writeable directories under
+read-only parent paths.
+
+*Disclaimer:* When a client lacks write access up the entire
+directory tree, there are no guarantees of consistent filesystem
+views or operations.
+
+Particular troublespots are "directory markers" and
+failures of non-atomic operations, particularly `rename()` and `delete()`.
+
+A directory marker such as `/users/` will not be deleted if the user `alice`
+creates a directory `/users/alice` *and* she only has access to `/users/alice`.
+
+When a path or directory is deleted, the parent directory may not exist afterwards.
+In the example above, if `alice` deletes `/users/alice` and there are no
+other entries under `/users/alice`, then the directory marker `/users/` cannot
+be created. The directory `/users` will not exist in listings,
+`getFileStatus("/users")` or similar.
+
+Rename will fail if it cannot delete the items it has just copied, that is
+`rename(read-only-source, writeable-dest)` will fail &mdash;but only after
+performing the COPY of the data.
+Even though the operation failed, for a single file copy, the destination
+file will exist.
+For a directory copy, only a partial copy of the source data may take place
+before the permission failure is raised.
+
+
+*S3Guard*: if [S3Guard](s3guard.html) is used to manage the directory listings,
+then after partial failures of rename/copy the DynamoDB tables can get out of sync.
+
+### Example: Read access to the base, R/W to the path underneath
+
+This example has the base bucket read only, and a directory underneath,
+`/users/alice/` granted full R/W access.
+
+```json
+{
+  "Version" : "2012-10-17",
+  "Statement" : [ {
+    "Sid" : "4",
+    "Effect" : "Allow",
+    "Action" : [
+      "s3:ListBucket",
+      "s3:ListBucketMultipartUploads",
+      "s3:GetObject"
+      ],
+    "Resource" : "arn:aws:s3:::example-bucket/*"
+  }, {
+    "Sid" : "5",
+    "Effect" : "Allow",
+    "Action" : [
+      "s3:Get*",
+      "s3:PutObject",
+      "s3:DeleteObject",
+      "s3:AbortMultipartUpload",
+      "s3:ListMultipartUploadParts" ],
+    "Resource" : [
+      "arn:aws:s3:::example-bucket/users/alice/*",
+      "arn:aws:s3:::example-bucket/users/alice",
+      "arn:aws:s3:::example-bucket/users/alice/"
+      ]
+  } ]
+}
+```
+
+Note how three resources are provided to represent the path `/users/alice`
+
+|  Path | Matches |
+|-------|----------|
+| `/users/alice` |  Any file `alice` created under `/users` |
+| `/users/alice/` |  The directory marker `alice/` created under `/users` |
+| `/users/alice/*` |  All files and directories under the path `/users/alice` |
+
+Note that the resource `arn:aws:s3:::example-bucket/users/alice*` cannot
+be used to refer to all of these paths, because it would also cover
+adjacent paths like `/users/alice2` and `/users/alicebob`.
+
+
 ## <a name="troubleshooting"></a> Troubleshooting Assumed Roles
 
 1. Make sure the role works and the user trying to enter it can do so from AWS
 the command line before trying to use the S3A client.
 1. Try to access the S3 bucket with reads and writes from the AWS CLI.
-1. Then, with the hadoop settings updated, try to read data from the `hadoop fs` CLI:
+1. With the Hadoop configuration set too use the role,
+ try to read data from the `hadoop fs` CLI:
 `hadoop fs -ls -p s3a://bucket/`
-1. Then, with the hadoop CLI, try to create a new directory with a request such as
+1. With the hadoop CLI, try to create a new directory with a request such as
 `hadoop fs -mkdirs -p s3a://bucket/path/p1/`
 
+
 ### <a name="no_role"></a>IOException: "Unset property fs.s3a.assumed.role.arn"
 
 The Assumed Role Credential Provider is enabled, but `fs.s3a.assumed.role.arn` is unset.
 
 ```
 java.io.IOException: Unset property fs.s3a.assumed.role.arn
-  at org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider.<init>(AssumedRoleCredentialProvider.java:76)
+  at org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider.<init>(AssumedRoleCredentialProvider.java:76)
   at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
   at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
   at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
@@ -201,7 +345,7 @@ This can arise if the role ARN set in `fs.s3a.assumed.role.arn` is invalid
 or one to which the caller has no access.
 
 ```
-java.nio.file.AccessDeniedException: : Instantiate org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider
+java.nio.file.AccessDeniedException: : Instantiate org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider
  on : com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException:
   Not authorized to perform sts:AssumeRole (Service: AWSSecurityTokenService; Status Code: 403;
    Error Code: AccessDenied; Request ID: aad4e59a-f4b0-11e7-8c78-f36aaa9457f6):AccessDenied
@@ -217,12 +361,12 @@ java.nio.file.AccessDeniedException: : Instantiate org.apache.hadoop.fs.s3a.Assu
 
 ### <a name="root_account"></a> "Roles may not be assumed by root accounts"
 
-You can't use assume a role with the root acount of an AWS account;
+You can't assume a role with the root account of an AWS account;
 you need to create a new user and give it the permission to change into
 the role.
 
 ```
-java.nio.file.AccessDeniedException: : Instantiate org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider
+java.nio.file.AccessDeniedException: : Instantiate org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider
  on : com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException:
     Roles may not be assumed by root accounts. (Service: AWSSecurityTokenService; Status Code: 403; Error Code: AccessDenied;
     Request ID: e86dfd8f-e758-11e7-88e7-ad127c04b5e2):
@@ -257,7 +401,7 @@ The value of `fs.s3a.assumed.role.session.duration` is out of range.
 ```
 java.lang.IllegalArgumentException: Assume Role session duration should be in the range of 15min - 1Hr
   at com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider$Builder.withRoleSessionDurationSeconds(STSAssumeRoleSessionCredentialsProvider.java:437)
-  at org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider.<init>(AssumedRoleCredentialProvider.java:86)
+  at org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider.<init>(AssumedRoleCredentialProvider.java:86)
 ```
 
 
@@ -268,7 +412,7 @@ The policy set in `fs.s3a.assumed.role.policy` is not valid according to the
 AWS specification of Role Policies.
 
 ```
-rg.apache.hadoop.fs.s3a.AWSBadRequestException: Instantiate org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider on :
+rg.apache.hadoop.fs.s3a.AWSBadRequestException: Instantiate org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider on :
  com.amazonaws.services.securitytoken.model.MalformedPolicyDocumentException:
   The policy is not in the valid JSON format. (Service: AWSSecurityTokenService; Status Code: 400;
    Error Code: MalformedPolicyDocument; Request ID: baf8cb62-f552-11e7-9768-9df3b384e40c):
@@ -308,8 +452,8 @@ Caused by: com.amazonaws.services.securitytoken.model.MalformedPolicyDocumentExc
   at com.amazonaws.auth.RefreshableTask.blockingRefresh(RefreshableTask.java:212)
   at com.amazonaws.auth.RefreshableTask.getValue(RefreshableTask.java:153)
   at com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider.getCredentials(STSAssumeRoleSessionCredentialsProvider.java:299)
-  at org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider.getCredentials(AssumedRoleCredentialProvider.java:127)
-  at org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider.<init>(AssumedRoleCredentialProvider.java:116)
+  at org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider.getCredentials(AssumedRoleCredentialProvider.java:127)
+  at org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider.<init>(AssumedRoleCredentialProvider.java:116)
   at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
   at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
   at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
@@ -324,7 +468,7 @@ The policy set in `fs.s3a.assumed.role.policy` is not valid JSON.
 
 ```
 org.apache.hadoop.fs.s3a.AWSBadRequestException:
-Instantiate org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider on :
+Instantiate org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider on :
  com.amazonaws.services.securitytoken.model.MalformedPolicyDocumentException:
   Syntax errors in policy. (Service: AWSSecurityTokenService;
   Status Code: 400; Error Code: MalformedPolicyDocument;
@@ -363,8 +507,8 @@ Instantiate org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider on :
   at com.amazonaws.auth.RefreshableTask.blockingRefresh(RefreshableTask.java:212)
   at com.amazonaws.auth.RefreshableTask.getValue(RefreshableTask.java:153)
   at com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider.getCredentials(STSAssumeRoleSessionCredentialsProvider.java:299)
-  at org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider.getCredentials(AssumedRoleCredentialProvider.java:127)
-  at org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider.<init>(AssumedRoleCredentialProvider.java:116)
+  at org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider.getCredentials(AssumedRoleCredentialProvider.java:127)
+  at org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider.<init>(AssumedRoleCredentialProvider.java:116)
   at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
   at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
   at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
@@ -380,7 +524,7 @@ You can't use the Assumed Role Credential Provider as the provider in
 
 ```
 java.io.IOException: AssumedRoleCredentialProvider cannot be in fs.s3a.assumed.role.credentials.provider
-  at org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider.<init>(AssumedRoleCredentialProvider.java:86)
+  at org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider.<init>(AssumedRoleCredentialProvider.java:86)
   at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
   at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
   at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
@@ -401,7 +545,7 @@ There's an space or other typo in the `fs.s3a.access.key` or `fs.s3a.secret.key`
 inner authentication which is breaking signature creation.
 
 ```
- org.apache.hadoop.fs.s3a.AWSBadRequestException: Instantiate org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider
+ org.apache.hadoop.fs.s3a.AWSBadRequestException: Instantiate org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider
   on : com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException:
    'valid/20180109/us-east-1/sts/aws4_request' not a valid key=value pair (missing equal-sign) in Authorization header:
     'AWS4-HMAC-SHA256 Credential=not valid/20180109/us-east-1/sts/aws4_request,
@@ -447,8 +591,8 @@ Caused by: com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceExc
   at com.amazonaws.auth.RefreshableTask.blockingRefresh(RefreshableTask.java:212)
   at com.amazonaws.auth.RefreshableTask.getValue(RefreshableTask.java:153)
   at com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider.getCredentials(STSAssumeRoleSessionCredentialsProvider.java:299)
-  at org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider.getCredentials(AssumedRoleCredentialProvider.java:127)
-  at org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider.<init>(AssumedRoleCredentialProvider.java:116)
+  at org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider.getCredentials(AssumedRoleCredentialProvider.java:127)
+  at org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider.<init>(AssumedRoleCredentialProvider.java:116)
   at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
   at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
   at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
@@ -463,7 +607,7 @@ The credentials used to authenticate with the AWS Simple Token Service are inval
 
 ```
 [ERROR] Failures:
-[ERROR] java.nio.file.AccessDeniedException: : Instantiate org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider on :
+[ERROR] java.nio.file.AccessDeniedException: : Instantiate org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider on :
  com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException:
   The security token included in the request is invalid.
   (Service: AWSSecurityTokenService; Status Code: 403; Error Code: InvalidClientTokenId;
@@ -501,8 +645,8 @@ The security token included in the request is invalid.
   at com.amazonaws.auth.RefreshableTask.blockingRefresh(RefreshableTask.java:212)
   at com.amazonaws.auth.RefreshableTask.getValue(RefreshableTask.java:153)
   at com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider.getCredentials(STSAssumeRoleSessionCredentialsProvider.java:299)
-  at org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider.getCredentials(AssumedRoleCredentialProvider.java:127)
-  at org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider.<init>(AssumedRoleCredentialProvider.java:116)
+  at org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider.getCredentials(AssumedRoleCredentialProvider.java:127)
+  at org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider.<init>(AssumedRoleCredentialProvider.java:116)
   at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
   at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
   at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
@@ -521,7 +665,7 @@ match these constraints.
 If set explicitly, it must be valid.
 
 ```
-org.apache.hadoop.fs.s3a.AWSBadRequestException: Instantiate org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider on
+org.apache.hadoop.fs.s3a.AWSBadRequestException: Instantiate org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider on
     com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceException:
     1 validation error detected: Value 'Session Names cannot Hava Spaces!' at 'roleSessionName'
     failed to satisfy constraint: Member must satisfy regular expression pattern: [\w+=,.@-]*
@@ -584,8 +728,8 @@ Caused by: com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceExc
   at com.amazonaws.auth.RefreshableTask.blockingRefresh(RefreshableTask.java:212)
   at com.amazonaws.auth.RefreshableTask.getValue(RefreshableTask.java:153)
   at com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider.getCredentials(STSAssumeRoleSessionCredentialsProvider.java:299)
-  at org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider.getCredentials(AssumedRoleCredentialProvider.java:135)
-  at org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider.<init>(AssumedRoleCredentialProvider.java:124)
+  at org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider.getCredentials(AssumedRoleCredentialProvider.java:135)
+  at org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider.<init>(AssumedRoleCredentialProvider.java:124)
   at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
   at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
   at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
@@ -593,3 +737,61 @@ Caused by: com.amazonaws.services.securitytoken.model.AWSSecurityTokenServiceExc
   at org.apache.hadoop.fs.s3a.S3AUtils.createAWSCredentialProvider(S3AUtils.java:583)
   ... 26 more
 ```
+
+
+### <a name="access_denied"></a> `java.nio.file.AccessDeniedException` within a FileSystem API call
+
+If an operation fails with an `AccessDeniedException`, then the role does not have
+the permission for the S3 Operation invoked during the call
+
+```
+java.nio.file.AccessDeniedException: s3a://bucket/readonlyDir: rename(s3a://bucket/readonlyDir, s3a://bucket/renameDest)
+ on s3a://bucket/readonlyDir:
+  com.amazonaws.services.s3.model.AmazonS3Exception: Access Denied
+  (Service: Amazon S3; Status Code: 403; Error Code: AccessDenied; Request ID: 2805F2ABF5246BB1;
+   S3 Extended Request ID: iEXDVzjIyRbnkAc40MS8Sjv+uUQNvERRcqLsJsy9B0oyrjHLdkRKwJ/phFfA17Kjn483KSlyJNw=),
+   S3 Extended Request ID: iEXDVzjIyRbnkAc40MS8Sjv+uUQNvERRcqLsJsy9B0oyrjHLdkRKwJ/phFfA17Kjn483KSlyJNw=:AccessDenied
+  at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:216)
+  at org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:143)
+  at org.apache.hadoop.fs.s3a.S3AFileSystem.rename(S3AFileSystem.java:853)
+ ...
+Caused by: com.amazonaws.services.s3.model.AmazonS3Exception: Access Denied
+ (Service: Amazon S3; Status Code: 403; Error Code: AccessDenied; Request ID: 2805F2ABF5246BB1;
+  S3 Extended Request ID: iEXDVzjIyRbnkAc40MS8Sjv+uUQNvERRcqLsJsy9B0oyrjHLdkRKwJ/phFfA17Kjn483KSlyJNw=),
+  S3 Extended Request ID: iEXDVzjIyRbnkAc40MS8Sjv+uUQNvERRcqLsJsy9B0oyrjHLdkRKwJ/phFfA17Kjn483KSlyJNw=
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleErrorResponse(AmazonHttpClient.java:1638)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1303)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1055)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:743)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:717)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:699)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:667)
+  at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:649)
+  at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:513)
+  at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4229)
+  at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4176)
+  at com.amazonaws.services.s3.AmazonS3Client.deleteObject(AmazonS3Client.java:2066)
+  at com.amazonaws.services.s3.AmazonS3Client.deleteObject(AmazonS3Client.java:2052)
+  at org.apache.hadoop.fs.s3a.S3AFileSystem.lambda$deleteObject$7(S3AFileSystem.java:1338)
+  at org.apache.hadoop.fs.s3a.Invoker.retryUntranslated(Invoker.java:314)
+  at org.apache.hadoop.fs.s3a.Invoker.retryUntranslated(Invoker.java:280)
+  at org.apache.hadoop.fs.s3a.S3AFileSystem.deleteObject(S3AFileSystem.java:1334)
+  at org.apache.hadoop.fs.s3a.S3AFileSystem.removeKeys(S3AFileSystem.java:1657)
+  at org.apache.hadoop.fs.s3a.S3AFileSystem.innerRename(S3AFileSystem.java:1046)
+  at org.apache.hadoop.fs.s3a.S3AFileSystem.rename(S3AFileSystem.java:851)
+```
+
+This is the policy restriction behaving as intended: the caller is trying to
+perform an action which is forbidden.
+
+1. If a policy has been set in `fs.s3a.assumed.role.policy` then it must declare *all*
+permissions which the caller is allowed to perform. The existing role policies
+act as an outer constraint on what the caller can perform, but are not inherited.
+
+1. If the policy for a bucket is set up with complex rules on different paths,
+check the path for the operation.
+
+1. The policy may have omitted one or more actions which are required.
+Make sure that all the read and write permissions are allowed for any bucket/path
+to which data is being written to, and read permissions for all
+buckets read from.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
index d2edce2..4924b45 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
@@ -1043,24 +1043,25 @@ If this role is not set, the tests which require it will be skipped.
 
 To run the tests in `ITestAssumeRole`, you need:
 
-1. A role in your AWS account with the relevant access rights to
-the S3 buckets used in the tests, and ideally DynamoDB, for S3Guard.
+1. A role in your AWS account will full read and write access rights to
+the S3 bucket used in the tests, and ideally DynamoDB, for S3Guard.
 If your bucket is set up by default to use S3Guard, the role must have access
 to that service.
 
-1.  Your IAM User  to have the permissions to adopt that role.
+1.  Your IAM User to have the permissions to adopt that role.
 
 1. The role ARN must be set in `fs.s3a.assumed.role.arn`.
 
 ```xml
 <property>
   <name>fs.s3a.assumed.role.arn</name>
-  <value>arn:aws:kms:eu-west-1:00000000000:key/0000000-16c9-4832-a1a9-c8bbef25ec8b</value>
+  <value>arn:aws:iam::9878543210123:role/role-s3-restricted</value>
 </property>
 ```
 
-The tests don't do much other than verify that basic file IO works with the role,
-and trigger various failures.
+The tests assume the role with different subsets of permissions and verify
+that the S3A client (mostly) works when the caller has only write access
+to part of the directory tree.
 
 You can also run the entire test suite in an assumed role, a more
 thorough test, by switching to the credentials provider.
@@ -1068,7 +1069,7 @@ thorough test, by switching to the credentials provider.
 ```xml
 <property>
   <name>fs.s3a.aws.credentials.provider</name>
-  <value>org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider</value>
+  <value>org.apache.hadoop.fs.s3a.auth.AssumedRoleCredentialProvider</value>
 </property>
 ```
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a013b25/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDistCpAssumedRole.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDistCpAssumedRole.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDistCpAssumedRole.java
deleted file mode 100644
index 94e7adf..0000000
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDistCpAssumedRole.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.contract.s3a;
-
-import org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider;
-
-import static org.apache.hadoop.fs.s3a.Constants.ASSUMED_ROLE_ARN;
-import static org.apache.hadoop.fs.s3a.S3ATestUtils.assume;
-import static org.apache.hadoop.fs.s3a.S3ATestUtils.authenticationContains;
-
-/**
- * Run DistCP under an assumed role.
- * This is skipped if the FS is already set to run under an assumed role,
- * because it would duplicate that of the superclass.
- */
-public class ITestS3AContractDistCpAssumedRole extends ITestS3AContractDistCp {
-
-  @Override
-  public void setup() throws Exception {
-
-    super.setup();
-    // check for the fs having assumed roles
-    assume("No ARN for role tests", !getAssumedRoleARN().isEmpty());
-    assume("Already running as an assumed role",
-        !authenticationContains(getFileSystem().getConf(),
-            AssumedRoleCredentialProvider.NAME));
-  }
-
-  /**
-   * Probe for an ARN for the test FS.
-   * @return any ARN for the (previous created) filesystem.
-   */
-  private String getAssumedRoleARN() {
-    return getFileSystem().getConf().getTrimmed(ASSUMED_ROLE_ARN, "");
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: YARN-7655. Avoid AM preemption caused by RRs for specific nodes or racks. Contributed by Steven Rand.

Posted by ae...@apache.org.
YARN-7655. Avoid AM preemption caused by RRs for specific nodes or racks. Contributed by Steven Rand.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1bc03ddf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1bc03ddf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1bc03ddf

Branch: refs/heads/HDFS-7240
Commit: 1bc03ddf97f3f0e0ecc1b00217438d3c91d29be5
Parents: eb2449d
Author: Yufei Gu <yu...@apache.org>
Authored: Thu Feb 8 12:32:43 2018 -0800
Committer: Yufei Gu <yu...@apache.org>
Committed: Thu Feb 8 12:32:43 2018 -0800

----------------------------------------------------------------------
 .../scheduler/fair/FSPreemptionThread.java      | 62 +++++++++++++-------
 .../fair/TestFairSchedulerPreemption.java       | 55 +++++++++++++++++
 2 files changed, 96 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bc03ddf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
index c05bff9..c32565f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
@@ -99,7 +99,10 @@ class FSPreemptionThread extends Thread {
    * starvation.
    * 2. For each {@link ResourceRequest}, iterate through matching
    * nodes and identify containers to preempt all on one node, also
-   * optimizing for least number of AM container preemptions.
+   * optimizing for least number of AM container preemptions. Only nodes
+   * that match the locality level specified in the {@link ResourceRequest}
+   * are considered. However, if this would lead to AM preemption, and locality
+   * relaxation is allowed, then the search space is expanded to all nodes.
    *
    * @param starvedApp starved application for which we are identifying
    *                   preemption targets
@@ -111,27 +114,21 @@ class FSPreemptionThread extends Thread {
 
     // Iterate through enough RRs to address app's starvation
     for (ResourceRequest rr : starvedApp.getStarvedResourceRequests()) {
+      List<FSSchedulerNode> potentialNodes = scheduler.getNodeTracker()
+              .getNodesByResourceName(rr.getResourceName());
       for (int i = 0; i < rr.getNumContainers(); i++) {
-        PreemptableContainers bestContainers = null;
-        List<FSSchedulerNode> potentialNodes = scheduler.getNodeTracker()
-            .getNodesByResourceName(rr.getResourceName());
-        int maxAMContainers = Integer.MAX_VALUE;
-
-        for (FSSchedulerNode node : potentialNodes) {
-          PreemptableContainers preemptableContainers =
-              identifyContainersToPreemptOnNode(
-                  rr.getCapability(), node, maxAMContainers);
-
-          if (preemptableContainers != null) {
-            // This set is better than any previously identified set.
-            bestContainers = preemptableContainers;
-            maxAMContainers = bestContainers.numAMContainers;
-
-            if (maxAMContainers == 0) {
-              break;
-            }
-          }
-        } // End of iteration through nodes for one RR
+        PreemptableContainers bestContainers =
+                identifyContainersToPreemptForOneContainer(potentialNodes, rr);
+
+        // Don't preempt AM containers just to satisfy local requests if relax
+        // locality is enabled.
+        if (bestContainers != null
+                && bestContainers.numAMContainers > 0
+                && !ResourceRequest.isAnyLocation(rr.getResourceName())
+                && rr.getRelaxLocality()) {
+          bestContainers = identifyContainersToPreemptForOneContainer(
+                  scheduler.getNodeTracker().getAllNodes(), rr);
+        }
 
         if (bestContainers != null) {
           List<RMContainer> containers = bestContainers.getAllContainers();
@@ -154,6 +151,29 @@ class FSPreemptionThread extends Thread {
     return containersToPreempt;
   }
 
+  private PreemptableContainers identifyContainersToPreemptForOneContainer(
+          List<FSSchedulerNode> potentialNodes, ResourceRequest rr) {
+    PreemptableContainers bestContainers = null;
+    int maxAMContainers = Integer.MAX_VALUE;
+
+    for (FSSchedulerNode node : potentialNodes) {
+      PreemptableContainers preemptableContainers =
+              identifyContainersToPreemptOnNode(
+                      rr.getCapability(), node, maxAMContainers);
+
+      if (preemptableContainers != null) {
+        // This set is better than any previously identified set.
+        bestContainers = preemptableContainers;
+        maxAMContainers = bestContainers.numAMContainers;
+
+        if (maxAMContainers == 0) {
+          break;
+        }
+      }
+    }
+    return bestContainers;
+  }
+
   /**
    * Identify containers to preempt on a given node. Try to find a list with
    * least AM containers to avoid preempting AM containers. This method returns

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bc03ddf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
index ac5d9fe..da6428a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
@@ -18,8 +18,11 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
@@ -384,6 +387,13 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase {
     }
   }
 
+  private void setAllAMContainersOnNode(NodeId nodeId) {
+    SchedulerNode node = scheduler.getNodeTracker().getNode(nodeId);
+    for (RMContainer container: node.getCopiedListOfRunningContainers()) {
+      ((RMContainerImpl) container).setAMContainer(true);
+    }
+  }
+
   @Test
   public void testPreemptionSelectNonAMContainer() throws Exception {
     takeAllResources("root.preemptable.child-1");
@@ -403,6 +413,51 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase {
   }
 
   @Test
+  public void testRelaxLocalityToNotPreemptAM() throws Exception {
+    takeAllResources("root.preemptable.child-1");
+    RMNode node1 = rmNodes.get(0);
+    setAllAMContainersOnNode(node1.getNodeID());
+    SchedulerNode node = scheduler.getNodeTracker().getNode(node1.getNodeID());
+    ApplicationAttemptId greedyAppAttemptId =
+            node.getCopiedListOfRunningContainers().get(0)
+                    .getApplicationAttemptId();
+
+    // Make the RACK_LOCAL and OFF_SWITCH requests big enough that they can't be
+    // satisfied. This forces the RR that we consider for preemption to be the
+    // NODE_LOCAL one.
+    ResourceRequest nodeRequest =
+            createResourceRequest(GB, node1.getHostName(), 1, 4, true);
+    ResourceRequest rackRequest =
+            createResourceRequest(GB * 10, node1.getRackName(), 1, 1, true);
+    ResourceRequest anyRequest =
+            createResourceRequest(GB * 10, ResourceRequest.ANY, 1, 1, true);
+
+    List<ResourceRequest> resourceRequests =
+            Arrays.asList(nodeRequest, rackRequest, anyRequest);
+
+    ApplicationAttemptId starvedAppAttemptId = createSchedulingRequest(
+            "root.preemptable.child-2", "default", resourceRequests);
+    starvingApp = scheduler.getSchedulerApp(starvedAppAttemptId);
+
+    // Move clock enough to identify starvation
+    clock.tickSec(1);
+    scheduler.update();
+
+    // Make sure 4 containers were preempted from the greedy app, but also that
+    // none were preempted on our all-AM node, even though the NODE_LOCAL RR
+    // asked for resources on it.
+
+    // TODO (YARN-7655) The starved app should be allocated 4 containers.
+    // It should be possible to modify the RRs such that this is true
+    // after YARN-7903.
+    verifyPreemption(0, 4);
+    for (RMContainer container : node.getCopiedListOfRunningContainers()) {
+      assert (container.isAMContainer());
+      assert (container.getApplicationAttemptId().equals(greedyAppAttemptId));
+    }
+  }
+
+  @Test
   public void testAppNotPreemptedBelowFairShare() throws Exception {
     takeAllResources("root.preemptable.child-1");
     tryPreemptMoreThanFairShare("root.preemptable.child-2");


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: YARN-5848. Remove unnecessary public/crossdomain.xml from YARN UIv2 sub project. (Sunil G via wangda)

Posted by ae...@apache.org.
YARN-5848. Remove unnecessary public/crossdomain.xml from YARN UIv2 sub project. (Sunil G via wangda)

Change-Id: Ie295f88232192e6b520c335b0332383cc6a232c0


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/789a185c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/789a185c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/789a185c

Branch: refs/heads/HDFS-7240
Commit: 789a185c16351d2343e075413a50eb3e5849cc5f
Parents: e795833
Author: Wangda Tan <wa...@apache.org>
Authored: Mon Feb 12 10:27:15 2018 +0800
Committer: Wangda Tan <wa...@apache.org>
Committed: Mon Feb 12 10:27:15 2018 +0800

----------------------------------------------------------------------
 .../hadoop-yarn-ui/public/crossdomain.xml            | 15 ---------------
 1 file changed, 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/789a185c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/crossdomain.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/crossdomain.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/crossdomain.xml
deleted file mode 100644
index 0c16a7a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/public/crossdomain.xml
+++ /dev/null
@@ -1,15 +0,0 @@
-<?xml version="1.0"?>
-<!DOCTYPE cross-domain-policy SYSTEM "http://www.adobe.com/xml/dtds/cross-domain-policy.dtd">
-<cross-domain-policy>
-  <!-- Read this: www.adobe.com/devnet/articles/crossdomain_policy_file_spec.html -->
-
-  <!-- Most restrictive policy: -->
-  <site-control permitted-cross-domain-policies="none"/>
-
-  <!-- Least restrictive policy: -->
-  <!--
-  <site-control permitted-cross-domain-policies="all"/>
-  <allow-access-from domain="*" to-ports="*" secure="false"/>
-  <allow-http-request-headers-from domain="*" headers="*" secure="false"/>
-  -->
-</cross-domain-policy>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: HADOOP-15090. Add ADL troubleshooting doc. Contributed by Steve Loughran.

Posted by ae...@apache.org.
HADOOP-15090. Add ADL troubleshooting doc.
Contributed by Steve Loughran.

(cherry picked from commit 58a2120e8a31307f19551f87be4e81d4fb626de1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9a373fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9a373fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9a373fb

Branch: refs/heads/HDFS-7240
Commit: c9a373fb14bbf826324c2547397f82b73bd466f4
Parents: 6ea7d78
Author: Steve Loughran <st...@apache.org>
Authored: Thu Feb 15 14:26:00 2018 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Thu Feb 15 14:26:00 2018 +0000

----------------------------------------------------------------------
 .../src/site/markdown/index.md                  |   4 +
 .../src/site/markdown/troubleshooting_adl.md    | 146 +++++++++++++++++++
 2 files changed, 150 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9a373fb/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
index ca79321..d2b6edf 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
@@ -22,6 +22,10 @@ The `hadoop-azure-datalake` module provides support for integration with the
 [Azure Data Lake Store](https://azure.microsoft.com/en-in/documentation/services/data-lake-store/).
 This support comes via the JAR file `azure-datalake-store.jar`.
 
+### Related Documents
+
+* [Troubleshooting](troubleshooting_adl.html).
+
 ## Features
 
 * Read and write data stored in an Azure Data Lake Storage account.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9a373fb/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md
new file mode 100644
index 0000000..80b2a6f
--- /dev/null
+++ b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/troubleshooting_adl.md
@@ -0,0 +1,146 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+# Troubleshooting ADL
+
+<!-- MACRO{toc|fromDepth=1|toDepth=3} -->
+
+
+## Error messages
+
+
+### Error fetching access token:
+
+You aren't authenticated.
+
+### Error fetching access token:  JsonParseException
+
+This means a problem talking to the oauth endpoint.
+
+
+```
+Operation null failed with exception com.fasterxml.jackson.core.JsonParseException : Unexpected character ('<' (code 60)): expected a valid value (number, String, array, object, 'true', 'false' or 'null')
+  at [Source: sun.net.www.protocol.http.HttpURLConnection$HttpInputStream@211d30ed; line: 3, column: 2]
+  Last encountered exception thrown after 5 tries. [com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException,com.fasterxml.jackson.core.JsonParseException]
+  [ServerRequestId:null]
+  at com.microsoft.azure.datalake.store.ADLStoreClient.getExceptionFromResponse(ADLStoreClient.java:1147)
+  at com.microsoft.azure.datalake.store.ADLStoreClient.getDirectoryEntry(ADLStoreClient.java:725)
+  at org.apache.hadoop.fs.adl.AdlFileSystem.getFileStatus(AdlFileSystem.java:476)
+  at org.apache.hadoop.fs.FileSystem.exists(FileSystem.java:1713)
+  at org.apache.hadoop.fs.contract.ContractTestUtils.rm(ContractTestUtils.java:397)
+  at org.apache.hadoop.fs.contract.ContractTestUtils.cleanup(ContractTestUtils.java:374)
+  at org.apache.hadoop.fs.contract.AbstractFSContractTestBase.deleteTestDirInTeardown(AbstractFSContractTestBase.java:213)
+  at org.apache.hadoop.fs.contract.AbstractFSContractTestBase.teardown(AbstractFSContractTestBase.java:204)
+  at org.apache.hadoop.fs.contract.AbstractContractOpenTest.teardown(AbstractContractOpenTest.java:64)
+  at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
+  at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
+  at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
+  at java.lang.reflect.Method.invoke(Method.java:498)
+  at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
+  at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
+  at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
+  at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33)
+  at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
+  at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
+  ```
+
+The endpoint for token refresh is wrong; the web site at the far end is returning HTML, which breaks the JSON parser.
+Fix: get the right endpoint from the web UI; make sure it ends in `oauth2/token`.
+
+If there is a proxy betwen the application and ADL, make sure that the JVM proxy
+settings are correct.
+
+### `UnknownHostException : yourcontainer.azuredatalakestore.net`
+
+The name of the ADL container is wrong, and does not resolve to any known container.
+
+
+```
+Operation MKDIRS failed with exception java.net.UnknownHostException : yourcontainer.azuredatalakestore.net
+Last encountered exception thrown after 5 tries. [java.net.UnknownHostException,java.net.UnknownHostException,java.net.UnknownHostException,java.net.UnknownHostException,java.net.UnknownHostException]
+  [ServerRequestId:null]
+  at com.microsoft.azure.datalake.store.ADLStoreClient.getExceptionFromResponse(ADLStoreClient.java:1147)
+  at com.microsoft.azure.datalake.store.ADLStoreClient.createDirectory(ADLStoreClient.java:582)
+  at org.apache.hadoop.fs.adl.AdlFileSystem.mkdirs(AdlFileSystem.java:598)
+  at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:2305)
+  at org.apache.hadoop.fs.contract.AbstractFSContractTestBase.mkdirs(AbstractFSContractTestBase.java:338)
+  at org.apache.hadoop.fs.contract.AbstractFSContractTestBase.setup(AbstractFSContractTestBase.java:193)
+  at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
+  at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
+  at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
+  at java.lang.reflect.Method.invoke(Method.java:498)
+  at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
+  at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
+  at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
+  at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:24)
+  at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
+  at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
+  at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
+Caused by: java.net.UnknownHostException: yourcontainer.azuredatalakestore.net
+  at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:184)
+  at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
+  at java.net.Socket.connect(Socket.java:589)
+  at sun.security.ssl.SSLSocketImpl.connect(SSLSocketImpl.java:668)
+  at sun.net.NetworkClient.doConnect(NetworkClient.java:175)
+  at sun.net.www.http.HttpClient.openServer(HttpClient.java:432)
+  at sun.net.www.http.HttpClient.openServer(HttpClient.java:527)
+  at sun.net.www.protocol.https.HttpsClient.<init>(HttpsClient.java:264)
+  at sun.net.www.protocol.https.HttpsClient.New(HttpsClient.java:367)
+  at sun.net.www.protocol.https.AbstractDelegateHttpsURLConnection.getNewHttpClient(AbstractDelegateHttpsURLConnection.java:191)
+  at sun.net.www.protocol.http.HttpURLConnection.plainConnect0(HttpURLConnection.java:1138)
+  at sun.net.www.protocol.http.HttpURLConnection.plainConnect(HttpURLConnection.java:1032)
+  at sun.net.www.protocol.https.AbstractDelegateHttpsURLConnection.connect(AbstractDelegateHttpsURLConnection.java:177)
+  at sun.net.www.protocol.http.HttpURLConnection.getOutputStream0(HttpURLConnection.java:1316)
+  at sun.net.www.protocol.http.HttpURLConnection.getOutputStream(HttpURLConnection.java:1291)
+  at sun.net.www.protocol.https.HttpsURLConnectionImpl.getOutputStream(HttpsURLConnectionImpl.java:250)
+  at com.microsoft.azure.datalake.store.HttpTransport.makeSingleCall(HttpTransport.java:273)
+  at com.microsoft.azure.datalake.store.HttpTransport.makeCall(HttpTransport.java:91)
+  at com.microsoft.azure.datalake.store.Core.mkdirs(Core.java:399)
+  at com.microsoft.azure.datalake.store.ADLStoreClient.createDirectory(ADLStoreClient.java:580)
+  ... 15 more
+```
+
+### ACL verification failed
+
+
+You are logged in but have no access to the ADL container.
+
+```
+[ERROR] testOpenReadZeroByteFile(org.apache.hadoop.fs.adl.live.TestAdlContractOpenLive)  Time elapsed: 3.392 s  <<< ERROR!
+org.apache.hadoop.security.AccessControlException: MKDIRS failed with error 0x83090aa2 (Forbidden. ACL verification failed. Either the resource does not exist or the user is not authorized to perform the requested operation.). [709ad9f6-725f-45a8-8231-e9327c52e79f][2017-11-28T07:06:30.3068084-08:00] [ServerRequestId:709ad9f6-725f-45a8-8231-e9327c52e79f]
+  at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
+  at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
+  at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
+  at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
+  at com.microsoft.azure.datalake.store.ADLStoreClient.getRemoteException(ADLStoreClient.java:1167)
+  at com.microsoft.azure.datalake.store.ADLStoreClient.getExceptionFromResponse(ADLStoreClient.java:1132)
+  at com.microsoft.azure.datalake.store.ADLStoreClient.createDirectory(ADLStoreClient.java:582)
+  at org.apache.hadoop.fs.adl.AdlFileSystem.mkdirs(AdlFileSystem.java:598)
+  at org.apache.hadoop.fs.FileSystem.mkdirs(FileSystem.java:2305)
+  at org.apache.hadoop.fs.contract.AbstractFSContractTestBase.mkdirs(AbstractFSContractTestBase.java:338)
+  at org.apache.hadoop.fs.contract.AbstractFSContractTestBase.setup(AbstractFSContractTestBase.java:193)
+  at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
+  at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
+  at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
+  at java.lang.reflect.Method.invoke(Method.java:498)
+  at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
+  at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
+  at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
+  at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:24)
+  at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
+  at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
+  at org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
+```
+
+See "Adding the service principal to your ADL Account".


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: Revert "HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh may not be accessible. Contributed by Grigori Rybkine"

Posted by ae...@apache.org.
Revert "HADOOP-15195. With SELinux enabled, directories mounted with start-build-env.sh may not be accessible. Contributed by Grigori Rybkine"

This reverts commit 5b88cb339898f82519223bcd07e1caedff02d051.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9cc6d1df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9cc6d1df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9cc6d1df

Branch: refs/heads/HDFS-7240
Commit: 9cc6d1dfb351f505aaa8f9f028068650b3b00d0d
Parents: 5b88cb3
Author: Chris Douglas <cd...@apache.org>
Authored: Mon Feb 12 21:06:10 2018 -0800
Committer: Chris Douglas <cd...@apache.org>
Committed: Mon Feb 12 21:06:10 2018 -0800

----------------------------------------------------------------------
 .../src/test/scripts/start-build-env.bats       | 102 -------------------
 start-build-env.sh                              |  32 +-----
 2 files changed, 3 insertions(+), 131 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9cc6d1df/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats b/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
deleted file mode 100644
index 0c32bcf..0000000
--- a/hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
+++ /dev/null
@@ -1,102 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-load hadoop-functions_test_helper
-
-# Mock docker command
-docker () {
-  if [ "$1" = "-v" ]; then
-    shift
-    echo Docker version ${DCKR_MOCK_VER:?}
-  elif [ "$1" = run ]; then
-    shift
-    until [ $# -eq 0 ]; do
-      if [ "$1" = -v ]; then
-        shift
-        echo "$1"|awk -F':' '{if (NF == 3 && $3 == "z")
-                  printf "Mounted %s with %s option.\n", $1, $3
-                              else if (NF == 2)
-                  printf "Mounted %s without %s option.\n", $1, "z"}'
-      fi
-      shift
-    done
-  fi
-}
-export -f docker
-export DCKR_MOCK_VER
-
-# Mock a SELinux enabled system
-enable_selinux () {
-  mkdir -p "${TMP}/bin"
-  echo true >"${TMP}/bin"/selinuxenabled
-  chmod a+x "${TMP}/bin"/selinuxenabled
-  if [ "${PATH#${TMP}/bin}" = "${PATH}" ]; then
-    PATH="${TMP}/bin":"$PATH"
-  fi
-}
-
-setup_user () {
-  if [ -z "$(printenv USER)" ]; then
-    if [ -z "$USER" ]; then
-      USER=${HOME##*/}
-    fi
-    export USER
-  fi
-}
-
-# Mock stat command as used in start-build-env.sh
-stat () {
-  if [ "$1" = --printf='%C' -a $# -eq 2 ]; then
-    printf 'mock_u:mock_r:mock_t:s0'
-  else
-    command stat "$@"
-  fi
-}
-export -f stat
-
-# Verify that host directories get mounted without z option
-# and INFO messages get printed out
-@test "start-build-env.sh (Docker without z mount option)" {
-  if [ "$(uname -s)" != "Linux" ]; then
-    skip "Not on Linux platform"
-  fi
-  enable_selinux
-  setup_user
-  DCKR_MOCK_VER=1.4
-  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
-  [ "$status" -eq 0 ]
-  [[ ${lines[0]} == "INFO: SELinux policy is enforced." ]]
-  [[ ${lines[1]} =~ \
-     "Mounted ".*" may not be accessible to the container." ]]
-  [[ ${lines[2]} == \
-     "INFO: If so, on the host, run the following command:" ]]
-  [[ ${lines[3]} =~ "# chcon -Rt svirt_sandbox_file_t " ]]
-  [[ ${lines[-2]} =~ "Mounted ".*" without z option." ]]
-  [[ ${lines[-1]} =~ "Mounted ".*" without z option." ]]
-}
-
-# Verify that host directories get mounted with z option
-@test "start-build-env.sh (Docker with z mount option)" {
-  if [ "$(uname -s)" != "Linux" ]; then
-    skip "Not on Linux platform"
-  fi
-  enable_selinux
-  setup_user
-  DCKR_MOCK_VER=1.7
-  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
-  [ "$status" -eq 0 ]
-  [[ ${lines[-2]} =~ "Mounted ".*" with z option." ]]
-  [[ ${lines[-1]} =~ "Mounted ".*" with z option." ]]
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9cc6d1df/start-build-env.sh
----------------------------------------------------------------------
diff --git a/start-build-env.sh b/start-build-env.sh
index 60efea5..5a18151 100755
--- a/start-build-env.sh
+++ b/start-build-env.sh
@@ -21,36 +21,10 @@ cd "$(dirname "$0")" # connect to root
 
 docker build -t hadoop-build dev-support/docker
 
-if [ "$(uname -s)" = "Linux" ]; then
+if [ "$(uname -s)" == "Linux" ]; then
   USER_NAME=${SUDO_USER:=$USER}
   USER_ID=$(id -u "${USER_NAME}")
   GROUP_ID=$(id -g "${USER_NAME}")
-  # man docker-run
-  # When using SELinux, mounted directories may not be accessible
-  # to the container. To work around this, with Docker prior to 1.7
-  # one needs to run the "chcon -Rt svirt_sandbox_file_t" command on
-  # the directories. With Docker 1.7 and later the z mount option
-  # does this automatically.
-  if command -v selinuxenabled >/dev/null && selinuxenabled; then
-    DCKR_VER=$(docker -v|awk '$1 == "Docker" && $2 == "version"\
-                         {split($3,ver,".");print ver[1]"."ver[2]}')
-    DCKR_MAJ=${DCKR_VER%.*}
-    DCKR_MIN=${DCKR_VER#*.}
-    if [ "${DCKR_MAJ}" -eq 1 ] && [ "${DCKR_MIN}" -ge 7 ] ||
-        [ "${DCKR_MAJ}" -gt 1 ]; then
-      V_OPTS=:z
-    else
-      for d in "${PWD}" "${HOME}/.m2"; do
-        ctx=$(stat --printf='%C' "$d"|cut -d':' -f3)
-        if [ "$ctx" != svirt_sandbox_file_t ] && [ "$ctx" != container_file_t ]; then
-          printf 'INFO: SELinux policy is enforced.\n'
-          printf '\tMounted %s may not be accessible to the container.\n' "$d"
-          printf 'INFO: If so, on the host, run the following command:\n'
-          printf '\t# chcon -Rt svirt_sandbox_file_t %s\n' "$d"
-        fi
-      done
-    fi
-  fi
 else # boot2docker uid and gid
   USER_NAME=$USER
   USER_ID=1000
@@ -71,8 +45,8 @@ UserSpecificDocker
 # system.  And this also is a significant speedup in subsequent
 # builds because the dependencies are downloaded only once.
 docker run --rm=true -t -i \
-  -v "${PWD}:/home/${USER_NAME}/hadoop${V_OPTS:-}" \
+  -v "${PWD}:/home/${USER_NAME}/hadoop" \
   -w "/home/${USER_NAME}/hadoop" \
-  -v "${HOME}/.m2:/home/${USER_NAME}/.m2${V_OPTS:-}" \
+  -v "${HOME}/.m2:/home/${USER_NAME}/.m2" \
   -u "${USER_NAME}" \
   "hadoop-build-${USER_ID}"


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: HDFS-13099. RBF: Use the ZooKeeper as the default State Store. Contributed by Yiqun Lin.

Posted by ae...@apache.org.
HDFS-13099. RBF: Use the ZooKeeper as the default State Store. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/543f3abb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/543f3abb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/543f3abb

Branch: refs/heads/HDFS-7240
Commit: 543f3abbee79d7ec70353f0cdda6397ee001324e
Parents: ddec08d
Author: Yiqun Lin <yq...@apache.org>
Authored: Fri Feb 9 13:57:42 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Fri Feb 9 13:57:42 2018 +0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  4 ++--
 .../src/main/resources/hdfs-default.xml            | 10 ++++++++--
 .../src/site/markdown/HDFSRouterFederation.md      |  2 +-
 .../server/federation/RouterConfigBuilder.java     |  6 ++++++
 .../store/FederationStateStoreTestUtils.java       | 17 +++++++++++------
 5 files changed, 28 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/543f3abb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index e0b5b85..c0ad4ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -34,8 +34,8 @@ import org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCPerformance
 import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
-import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl;
 import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -1275,7 +1275,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String FEDERATION_STORE_DRIVER_CLASS =
       FEDERATION_STORE_PREFIX + "driver.class";
   public static final Class<? extends StateStoreDriver>
-      FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreFileImpl.class;
+      FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreZooKeeperImpl.class;
 
   public static final String FEDERATION_STORE_CONNECTION_TEST_MS =
       FEDERATION_STORE_PREFIX + "connection.test";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/543f3abb/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 59df122..f6d232e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -5085,9 +5085,15 @@
 
   <property>
     <name>dfs.federation.router.store.driver.class</name>
-    <value>org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl</value>
+    <value>org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl</value>
     <description>
-      Class to implement the State Store. By default it uses the local disk.
+      Class to implement the State Store. There are three implementation classes currently
+      being supported:
+      org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl,
+      org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileSystemImpl and
+      org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl.
+      These implementation classes use the local file, filesystem and ZooKeeper as a backend respectively.
+      By default it uses the ZooKeeper as the default State Store.
     </description>
   </property>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/543f3abb/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
index 5649755..ebe94a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
@@ -325,7 +325,7 @@ The connection to the State Store and the internal caching at the Router.
 |:---- |:---- |:---- |
 | dfs.federation.router.store.enable | `true` | If `true`, the Router connects to the State Store. |
 | dfs.federation.router.store.serializer | `StateStoreSerializerPBImpl` | Class to serialize State Store records. |
-| dfs.federation.router.store.driver.class | `StateStoreZKImpl` | Class to implement the State Store. |
+| dfs.federation.router.store.driver.class | `StateStoreZooKeeperImpl` | Class to implement the State Store. |
 | dfs.federation.router.store.connection.test | 60000 | How often to check for the connection to the State Store in milliseconds. |
 | dfs.federation.router.cache.ttl | 60000 | How often to refresh the State Store caches in milliseconds. |
 | dfs.federation.router.store.membership.expiration | 300000 | Expiration time in milliseconds for a membership record. |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/543f3abb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/RouterConfigBuilder.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/RouterConfigBuilder.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/RouterConfigBuilder.java
index 3659bf9..b332f1f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/RouterConfigBuilder.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/RouterConfigBuilder.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.hdfs.server.federation;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
 
 /**
  * Constructs a router configuration with individual features enabled/disabled.
@@ -119,6 +121,10 @@ public class RouterConfigBuilder {
   }
 
   public RouterConfigBuilder stateStore() {
+    // reset the State Store driver implementation class for testing
+    conf.setClass(DFSConfigKeys.FEDERATION_STORE_DRIVER_CLASS,
+        FederationStateStoreTestUtils.getTestDriverClass(),
+        StateStoreDriver.class);
     return this.stateStore(true);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/543f3abb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/FederationStateStoreTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/FederationStateStoreTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/FederationStateStoreTestUtils.java
index dbb8f3f..def3935 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/FederationStateStoreTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/FederationStateStoreTestUtils.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState;
 import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
 import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileBaseImpl;
+import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl;
 import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
 import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
 import org.apache.hadoop.hdfs.server.federation.store.records.MembershipStats;
@@ -50,17 +51,21 @@ import org.apache.hadoop.util.Time;
  */
 public final class FederationStateStoreTestUtils {
 
+  /** The State Store Driver implementation class for testing .*/
+  private static final Class<? extends StateStoreDriver>
+      FEDERATION_STORE_DRIVER_CLASS_FOR_TEST = StateStoreFileImpl.class;
+
   private FederationStateStoreTestUtils() {
     // Utility Class
   }
 
   /**
-   * Get the default State Store driver implementation.
+   * Get the State Store driver implementation for testing.
    *
-   * @return Class of the default State Store driver implementation.
+   * @return Class of the State Store driver implementation.
    */
-  public static Class<? extends StateStoreDriver> getDefaultDriver() {
-    return DFSConfigKeys.FEDERATION_STORE_DRIVER_CLASS_DEFAULT;
+  public static Class<? extends StateStoreDriver> getTestDriverClass() {
+    return FEDERATION_STORE_DRIVER_CLASS_FOR_TEST;
   }
 
   /**
@@ -69,7 +74,7 @@ public final class FederationStateStoreTestUtils {
    * @return State Store configuration.
    */
   public static Configuration getStateStoreConfiguration() {
-    Class<? extends StateStoreDriver> clazz = getDefaultDriver();
+    Class<? extends StateStoreDriver> clazz = getTestDriverClass();
     return getStateStoreConfiguration(clazz);
   }
 
@@ -146,7 +151,7 @@ public final class FederationStateStoreTestUtils {
    * @throws IOException
    */
   public static void deleteStateStore() throws IOException {
-    Class<? extends StateStoreDriver> driverClass = getDefaultDriver();
+    Class<? extends StateStoreDriver> driverClass = getTestDriverClass();
     deleteStateStore(driverClass);
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org