You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by st...@apache.org on 2017/06/26 12:49:35 UTC

[01/50] hadoop git commit: HADOOP-14289. Move log4j APIs over to slf4j in hadoop-common.

Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 be922ba6a -> 2b3c4b840


HADOOP-14289. Move log4j APIs over to slf4j in hadoop-common.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e855cc4e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e855cc4e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e855cc4e

Branch: refs/heads/HADOOP-13345
Commit: e855cc4ed467d03f4f03e8b3a1ff27654b349a52
Parents: fb68980
Author: Akira Ajisaka <aa...@apache.org>
Authored: Fri Jun 16 13:17:10 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Fri Jun 16 13:17:10 2017 +0900

----------------------------------------------------------------------
 .../org/apache/hadoop/http/HttpRequestLog.java  | 23 +++++++++-----------
 1 file changed, 10 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e855cc4e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java
index a7c23b9..05573a8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java
@@ -24,16 +24,18 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogConfigurationException;
 import org.apache.commons.logging.LogFactory;
 import org.apache.log4j.Appender;
-import org.apache.log4j.Logger;
 import org.eclipse.jetty.server.NCSARequestLog;
 import org.eclipse.jetty.server.RequestLog;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * RequestLog object for use with Http
  */
 public class HttpRequestLog {
 
-  public static final Log LOG = LogFactory.getLog(HttpRequestLog.class);
+  public static final Logger LOG =
+      LoggerFactory.getLogger(HttpRequestLog.class);
   private static final HashMap<String, String> serverToComponent;
 
   static {
@@ -65,20 +67,18 @@ public class HttpRequestLog {
     }
     if (isLog4JLogger) {
       Log4JLogger httpLog4JLog = (Log4JLogger)logger;
-      Logger httpLogger = httpLog4JLog.getLogger();
+      org.apache.log4j.Logger httpLogger = httpLog4JLog.getLogger();
       Appender appender = null;
 
       try {
         appender = httpLogger.getAppender(appenderName);
       } catch (LogConfigurationException e) {
-        LOG.warn("Http request log for " + loggerName
-            + " could not be created");
+        LOG.warn("Http request log for {} could not be created", loggerName);
         throw e;
       }
 
       if (appender == null) {
-        LOG.info("Http request log for " + loggerName
-            + " is not defined");
+        LOG.info("Http request log for {} is not defined", loggerName);
         return null;
       }
 
@@ -89,14 +89,11 @@ public class HttpRequestLog {
         requestLog.setFilename(requestLogAppender.getFilename());
         requestLog.setRetainDays(requestLogAppender.getRetainDays());
         return requestLog;
-      }
-      else {
-        LOG.warn("Jetty request log for " + loggerName
-            + " was of the wrong class");
+      } else {
+        LOG.warn("Jetty request log for {} was of the wrong class", loggerName);
         return null;
       }
-    }
-    else {
+    } else {
       LOG.warn("Jetty request log can only be enabled using Log4j");
       return null;
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] hadoop git commit: HADOOP-14568. GenericTestUtils#waitFor missing parameter verification. Contributed by Yiqun Lin

Posted by st...@apache.org.
HADOOP-14568. GenericTestUtils#waitFor missing parameter verification.
Contributed by Yiqun Lin


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e5db9af8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e5db9af8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e5db9af8

Branch: refs/heads/HADOOP-13345
Commit: e5db9af8a00ded73ee1c75bb4c0d5d50e15a305c
Parents: ca13b22
Author: Steve Loughran <st...@apache.org>
Authored: Fri Jun 23 14:39:58 2017 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Fri Jun 23 14:39:58 2017 +0100

----------------------------------------------------------------------
 .../apache/hadoop/test/GenericTestUtils.java    | 19 +++++++++---
 .../hadoop/test/TestGenericTestUtils.java       | 32 ++++++++++++++++++++
 2 files changed, 47 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5db9af8/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 7b94784..fcc4e3c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -59,6 +59,7 @@ import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Sets;
 
@@ -85,6 +86,14 @@ public abstract class GenericTestUtils {
    */
   public static final String DEFAULT_TEST_DATA_PATH = "target/test/data/";
 
+  /**
+   * Error string used in {@link GenericTestUtils#waitFor(Supplier, int, int)}.
+   */
+  public static final String ERROR_MISSING_ARGUMENT =
+      "Input supplier interface should be initailized";
+  public static final String ERROR_INVALID_ARGUMENT =
+      "Total wait time should be greater than check interval time";
+
   @SuppressWarnings("unchecked")
   public static void disableLog(Log log) {
     // We expect that commons-logging is a wrapper around Log4j.
@@ -258,10 +267,12 @@ public abstract class GenericTestUtils {
     }
   }  
 
-  public static void waitFor(Supplier<Boolean> check,
-      int checkEveryMillis, int waitForMillis)
-      throws TimeoutException, InterruptedException
-  {
+  public static void waitFor(Supplier<Boolean> check, int checkEveryMillis,
+      int waitForMillis) throws TimeoutException, InterruptedException {
+    Preconditions.checkNotNull(check, ERROR_MISSING_ARGUMENT);
+    Preconditions.checkArgument(waitForMillis > checkEveryMillis,
+        ERROR_INVALID_ARGUMENT);
+
     long st = Time.now();
     do {
       boolean result = check.get();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5db9af8/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
index 86df5d5..b3fc836 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
@@ -26,7 +26,10 @@ import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Supplier;
+
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 public class TestGenericTestUtils extends GenericTestUtils {
 
@@ -119,4 +122,33 @@ public class TestGenericTestUtils extends GenericTestUtils {
     assertTrue(logCapturer.getOutput().isEmpty());
   }
 
+  @Test
+  public void testWaitingForConditionWithInvalidParams() throws Throwable {
+    // test waitFor method with null supplier interface
+    try {
+      waitFor(null, 0, 0);
+    } catch (NullPointerException e) {
+      assertExceptionContains(GenericTestUtils.ERROR_MISSING_ARGUMENT, e);
+    }
+
+    Supplier<Boolean> simpleSupplier = new Supplier<Boolean>() {
+
+      @Override
+      public Boolean get() {
+        return true;
+      }
+    };
+
+    // test waitFor method with waitForMillis greater than checkEveryMillis
+    waitFor(simpleSupplier, 5, 10);
+    try {
+      // test waitFor method with waitForMillis smaller than checkEveryMillis
+      waitFor(simpleSupplier, 10, 5);
+      fail(
+          "Excepted a failure when the param value of"
+          + " waitForMillis is smaller than checkEveryMillis.");
+    } catch (IllegalArgumentException e) {
+      assertExceptionContains(GenericTestUtils.ERROR_INVALID_ARGUMENT, e);
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] hadoop git commit: HDFS-11647. Add -E option in hdfs "count" command to show erasure policy summarization. Contributed by luhuichun.

Posted by st...@apache.org.
HDFS-11647. Add -E option in hdfs "count" command to show erasure policy summarization. Contributed by luhuichun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45ff4d38
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45ff4d38
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45ff4d38

Branch: refs/heads/HADOOP-13345
Commit: 45ff4d38e6175bc59b126633fc46927f8af9b641
Parents: 2b654a4
Author: Lei Xu <le...@apache.org>
Authored: Tue Jun 20 11:55:09 2017 -0700
Committer: Lei Xu <le...@apache.org>
Committed: Tue Jun 20 11:55:09 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/fs/ContentSummary.java    | 16 ++++++-
 .../java/org/apache/hadoop/fs/shell/Count.java  | 41 +++++++++++++----
 .../src/site/markdown/FileSystemShell.md        |  9 +++-
 .../java/org/apache/hadoop/cli/TestCLI.java     |  2 +-
 .../org/apache/hadoop/fs/shell/TestCount.java   |  5 +-
 .../src/test/resources/testConf.xml             |  2 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  6 ++-
 .../src/main/proto/hdfs.proto                   |  1 +
 .../ContentSummaryComputationContext.java       | 48 ++++++++++++++++++++
 .../hadoop/hdfs/server/namenode/INode.java      |  1 +
 .../test/resources/testErasureCodingConf.xml    | 41 +++++++++++++++++
 11 files changed, 155 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/45ff4d38/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
index 3e75951..cdbd10f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
@@ -39,6 +39,7 @@ public class ContentSummary extends QuotaUsage implements Writable{
   private long snapshotFileCount;
   private long snapshotDirectoryCount;
   private long snapshotSpaceConsumed;
+  private String erasureCodingPolicy;
 
   /** We don't use generics. Instead override spaceConsumed and other methods
       in order to keep backward compatibility. */
@@ -81,6 +82,11 @@ public class ContentSummary extends QuotaUsage implements Writable{
       return this;
     }
 
+    public Builder erasureCodingPolicy(String ecPolicy) {
+      this.erasureCodingPolicy = ecPolicy;
+      return this;
+    }
+
     @Override
     public Builder quota(long quota){
       super.quota(quota);
@@ -136,6 +142,7 @@ public class ContentSummary extends QuotaUsage implements Writable{
     private long snapshotFileCount;
     private long snapshotDirectoryCount;
     private long snapshotSpaceConsumed;
+    private String erasureCodingPolicy;
   }
 
   /** Constructor deprecated by ContentSummary.Builder*/
@@ -175,6 +182,7 @@ public class ContentSummary extends QuotaUsage implements Writable{
     this.snapshotFileCount = builder.snapshotFileCount;
     this.snapshotDirectoryCount = builder.snapshotDirectoryCount;
     this.snapshotSpaceConsumed = builder.snapshotSpaceConsumed;
+    this.erasureCodingPolicy = builder.erasureCodingPolicy;
   }
 
   /** @return the length */
@@ -202,6 +210,10 @@ public class ContentSummary extends QuotaUsage implements Writable{
     return snapshotSpaceConsumed;
   }
 
+  public String getErasureCodingPolicy() {
+    return erasureCodingPolicy;
+  }
+
   @Override
   @InterfaceAudience.Private
   public void write(DataOutput out) throws IOException {
@@ -237,6 +249,7 @@ public class ContentSummary extends QuotaUsage implements Writable{
           getSnapshotFileCount() == right.getSnapshotFileCount() &&
           getSnapshotDirectoryCount() == right.getSnapshotDirectoryCount() &&
           getSnapshotSpaceConsumed() == right.getSnapshotSpaceConsumed() &&
+          getErasureCodingPolicy().equals(right.getErasureCodingPolicy()) &&
           super.equals(to);
     } else {
       return super.equals(to);
@@ -247,7 +260,8 @@ public class ContentSummary extends QuotaUsage implements Writable{
   public int hashCode() {
     long result = getLength() ^ getFileCount() ^ getDirectoryCount()
         ^ getSnapshotLength() ^ getSnapshotFileCount()
-        ^ getSnapshotDirectoryCount() ^ getSnapshotSpaceConsumed();
+        ^ getSnapshotDirectoryCount() ^ getSnapshotSpaceConsumed()
+        ^ getErasureCodingPolicy().hashCode();
     return ((int) result) ^ super.hashCode();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45ff4d38/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
index d15ae46..8f6fc4d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
@@ -55,12 +55,14 @@ public class Count extends FsCommand {
   private static final String OPTION_EXCLUDE_SNAPSHOT = "x";
   //return the quota, namespace count and disk space usage.
   private static final String OPTION_QUOTA_AND_USAGE = "u";
+  private static final String OPTION_ECPOLICY = "e";
 
   public static final String NAME = "count";
   public static final String USAGE =
       "[-" + OPTION_QUOTA + "] [-" + OPTION_HUMAN + "] [-" + OPTION_HEADER
           + "] [-" + OPTION_TYPE + " [<storage type>]] [-" +
           OPTION_QUOTA_AND_USAGE + "] [-" + OPTION_EXCLUDE_SNAPSHOT
+          + "] [-" + OPTION_ECPOLICY
           + "] <path> ...";
   public static final String DESCRIPTION =
       "Count the number of directories, files and bytes under the paths\n" +
@@ -90,7 +92,8 @@ public class Count extends FsCommand {
           "It can also pass the value '', 'all' or 'ALL' to specify all " +
           "the storage types.\n" +
           "The -" + OPTION_QUOTA_AND_USAGE + " option shows the quota and \n" +
-          "the usage against the quota without the detailed content summary.";
+          "the usage against the quota without the detailed content summary."+
+          "The -"+ OPTION_ECPOLICY +" option shows the erasure coding policy.";
 
   private boolean showQuotas;
   private boolean humanReadable;
@@ -98,6 +101,7 @@ public class Count extends FsCommand {
   private List<StorageType> storageTypes = null;
   private boolean showQuotasAndUsageOnly;
   private boolean excludeSnapshots;
+  private boolean displayECPolicy;
 
   /** Constructor */
   public Count() {}
@@ -118,7 +122,8 @@ public class Count extends FsCommand {
   protected void processOptions(LinkedList<String> args) {
     CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE,
         OPTION_QUOTA, OPTION_HUMAN, OPTION_HEADER, OPTION_QUOTA_AND_USAGE,
-        OPTION_EXCLUDE_SNAPSHOT);
+        OPTION_EXCLUDE_SNAPSHOT,
+        OPTION_ECPOLICY);
     cf.addOptionWithValue(OPTION_TYPE);
     cf.parse(args);
     if (args.isEmpty()) { // default path is the current working directory
@@ -128,6 +133,7 @@ public class Count extends FsCommand {
     humanReadable = cf.getOpt(OPTION_HUMAN);
     showQuotasAndUsageOnly = cf.getOpt(OPTION_QUOTA_AND_USAGE);
     excludeSnapshots = cf.getOpt(OPTION_EXCLUDE_SNAPSHOT);
+    displayECPolicy = cf.getOpt(OPTION_ECPOLICY);
 
     if (showQuotas || showQuotasAndUsageOnly) {
       String types = cf.getOptValue(OPTION_TYPE);
@@ -146,15 +152,21 @@ public class Count extends FsCommand {
     }
 
     if (cf.getOpt(OPTION_HEADER)) {
+      StringBuilder headString = new StringBuilder();
       if (showQuotabyType) {
-        out.println(QuotaUsage.getStorageTypeHeader(storageTypes) + "PATHNAME");
+        headString.append(QuotaUsage.getStorageTypeHeader(storageTypes));
       } else {
         if (showQuotasAndUsageOnly) {
-          out.println(QuotaUsage.getHeader() + "PATHNAME");
+          headString.append(QuotaUsage.getHeader());
         } else {
-          out.println(ContentSummary.getHeader(showQuotas) + "PATHNAME");
+          headString.append(ContentSummary.getHeader(showQuotas));
         }
       }
+      if(displayECPolicy){
+        headString.append("ERASURECODING_POLICY ");
+      }
+      headString.append("PATHNAME");
+      out.println(headString.toString());
     }
   }
 
@@ -175,15 +187,26 @@ public class Count extends FsCommand {
 
   @Override
   protected void processPath(PathData src) throws IOException {
+    StringBuilder outputString = new StringBuilder();
     if (showQuotasAndUsageOnly || showQuotabyType) {
       QuotaUsage usage = src.fs.getQuotaUsage(src.path);
-      out.println(usage.toString(isHumanReadable(), showQuotabyType,
-          storageTypes) + src);
+      outputString.append(usage.toString(
+          isHumanReadable(), showQuotabyType, storageTypes));
     } else {
       ContentSummary summary = src.fs.getContentSummary(src.path);
-      out.println(summary.
-          toString(showQuotas, isHumanReadable(), excludeSnapshots) + src);
+      outputString.append(summary.toString(
+          showQuotas, isHumanReadable(), excludeSnapshots));
+    }
+    if(displayECPolicy){
+      ContentSummary summary = src.fs.getContentSummary(src.path);
+      if(!summary.getErasureCodingPolicy().equals("Replicated")){
+        outputString.append("EC:");
+      }
+      outputString.append(summary.getErasureCodingPolicy());
+      outputString.append(" ");
     }
+    outputString.append(src);
+    out.println(outputString.toString());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45ff4d38/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index e13b558..7786642 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -141,7 +141,7 @@ Similar to get command, except that the destination is restricted to a local fil
 count
 -----
 
-Usage: `hadoop fs -count [-q] [-h] [-v] [-x] [-t [<storage type>]] [-u] <paths> `
+Usage: `hadoop fs -count [-q] [-h] [-v] [-x] [-t [<storage type>]] [-u] [-e] <paths> `
 
 Count the number of directories, files and bytes under the paths that match the specified file pattern. Get the quota and the usage. The output columns with -count are: DIR\_COUNT, FILE\_COUNT, CONTENT\_SIZE, PATHNAME
 
@@ -159,6 +159,12 @@ The -v option displays a header line.
 
 The -x option excludes snapshots from the result calculation. Without the -x option (default), the result is always calculated from all INodes, including all snapshots under the given path. The -x option is ignored if -u or -q option is given.
 
+The -e option shows the erasure coding policy for each file.
+
+The output columns with -count -e are: DIR\_COUNT, FILE\_COUNT, CONTENT_SIZE, ERASURECODING\_POLICY, PATHNAME
+
+The ERASURECODING\_POLICY is name of the policy for the file. If a erasure coding policy is setted on that file, it will return name of the policy. If no erasure coding policy is setted, it will return \"Replicated\" which means it use replication storage strategy.
+
 Example:
 
 * `hadoop fs -count hdfs://nn1.example.com/file1 hdfs://nn2.example.com/file2`
@@ -168,6 +174,7 @@ Example:
 * `hadoop fs -count -u hdfs://nn1.example.com/file1`
 * `hadoop fs -count -u -h hdfs://nn1.example.com/file1`
 * `hadoop fs -count -u -h -v hdfs://nn1.example.com/file1`
+* `hadoop fs -count -e hdfs://nn1.example.com/file1`
 
 Exit Code:
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45ff4d38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/TestCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/TestCLI.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/TestCLI.java
index e1514ff..977262f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/TestCLI.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/TestCLI.java
@@ -1,4 +1,4 @@
-/**
+ /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45ff4d38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
index 2a1c38c..a782958 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
@@ -447,7 +447,7 @@ public class TestCount {
     Count count = new Count();
     String actual = count.getUsage();
     String expected =
-        "-count [-q] [-h] [-v] [-t [<storage type>]] [-u] [-x] <path> ...";
+        "-count [-q] [-h] [-v] [-t [<storage type>]] [-u] [-x] [-e] <path> ...";
     assertEquals("Count.getUsage", expected, actual);
   }
 
@@ -478,7 +478,8 @@ public class TestCount {
         + "It can also pass the value '', 'all' or 'ALL' to specify all the "
         + "storage types.\n"
         + "The -u option shows the quota and \n"
-        + "the usage against the quota without the detailed content summary.";
+        + "the usage against the quota without the detailed content summary."
+        + "The -e option shows the erasure coding policy.";
 
     assertEquals("Count.getDescription", expected, actual);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45ff4d38/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
index 6644cd8..710f063 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
@@ -278,7 +278,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-count \[-q\] \[-h\] \[-v\] \[-t \[&lt;storage type&gt;\]\] \[-u\] \[-x\] &lt;path&gt; \.\.\. :( )*</expected-output>
+          <expected-output>^-count \[-q\] \[-h\] \[-v\] \[-t \[&lt;storage type&gt;\]\] \[-u\] \[-x\] \[-e\] &lt;path&gt; \.\.\. :( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45ff4d38/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index b356583..63d0025 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -1588,7 +1588,8 @@ public class PBHelperClient {
         snapshotSpaceConsumed(cs.getSnapshotSpaceConsumed()).
         quota(cs.getQuota()).
         spaceConsumed(cs.getSpaceConsumed()).
-        spaceQuota(cs.getSpaceQuota());
+        spaceQuota(cs.getSpaceQuota()).
+        erasureCodingPolicy(cs.getErasureCodingPolicy());
     if (cs.hasTypeQuotaInfos()) {
       addStorageTypes(cs.getTypeQuotaInfos(), builder);
     }
@@ -2281,7 +2282,8 @@ public class PBHelperClient {
         setSnapshotSpaceConsumed(cs.getSnapshotSpaceConsumed()).
         setQuota(cs.getQuota()).
         setSpaceConsumed(cs.getSpaceConsumed()).
-        setSpaceQuota(cs.getSpaceQuota());
+        setSpaceQuota(cs.getSpaceQuota()).
+        setErasureCodingPolicy(cs.getErasureCodingPolicy());
 
     if (cs.isTypeQuotaSet() || cs.isTypeConsumedAvailable()) {
       builder.setTypeQuotaInfos(getBuilder(cs));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45ff4d38/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
index 10ea5d7..b306fcf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
@@ -159,6 +159,7 @@ message ContentSummaryProto {
   optional uint64 snapshotFileCount = 9;
   optional uint64 snapshotDirectoryCount = 10;
   optional uint64 snapshotSpaceConsumed = 11;
+  optional string erasureCodingPolicy = 12;
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45ff4d38/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index b35270d..8d5aa0d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -21,6 +21,14 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.io.WritableUtils;
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_ERASURECODING_POLICY;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
@@ -36,6 +44,8 @@ public class ContentSummaryComputationContext {
   private long sleepMilliSec = 0;
   private int sleepNanoSec = 0;
 
+  public static final String REPLICATED = "Replicated";
+  public static final Log LOG = LogFactory.getLog(INode.class);
   /**
    * Constructor
    *
@@ -138,4 +148,42 @@ public class ContentSummaryComputationContext {
     return (bsps != null) ? bsps:
         fsn.getBlockManager().getStoragePolicySuite();
   }
+
+  /** Get the erasure coding policy. */
+  public String getErasureCodingPolicyName(INode inode) {
+    if (inode.isFile()) {
+      INodeFile iNodeFile = inode.asFile();
+      if (iNodeFile.isStriped()) {
+        byte ecPolicyId = iNodeFile.getErasureCodingPolicyID();
+        return fsn.getErasureCodingPolicyManager()
+            .getByID(ecPolicyId).getName();
+      } else {
+        return REPLICATED;
+      }
+    }
+    if (inode.isSymlink()) {
+      return "";
+    }
+    try {
+      final XAttrFeature xaf = inode.getXAttrFeature();
+      if (xaf != null) {
+        XAttr xattr = xaf.getXAttr(XATTR_ERASURECODING_POLICY);
+        if (xattr != null) {
+          ByteArrayInputStream bins =
+              new ByteArrayInputStream(xattr.getValue());
+          DataInputStream din = new DataInputStream(bins);
+          String ecPolicyName = WritableUtils.readString(din);
+          return dir.getFSNamesystem()
+              .getErasureCodingPolicyManager()
+              .getEnabledPolicyByName(ecPolicyName)
+              .getName();
+        }
+      }
+    } catch (IOException ioe) {
+      LOG.warn("Encountered error getting ec policy for "
+          + inode.getFullPathName(), ioe);
+      return "";
+    }
+    return "";
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45ff4d38/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index c6258a1..1f982ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -445,6 +445,7 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
         snapshotFileCount(snapshotCounts.getFileCount()).
         snapshotDirectoryCount(snapshotCounts.getDirectoryCount()).
         snapshotSpaceConsumed(snapshotCounts.getStoragespace()).
+        erasureCodingPolicy(summary.getErasureCodingPolicyName(this)).
         build();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45ff4d38/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index 86db569..26acc1f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -570,5 +570,46 @@
         </comparator>
       </comparators>
     </test>
+
+    <test> <!-- TESTED -->
+      <description>count: file using absolute path with option -e to show erasurecoding policy of a directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir1</command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path /dir1 -policy RS-6-3-64k</ec-admin-command>
+        <command>-fs NAMENODE -touchz /dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir1/file2</command>
+        <command>-fs NAMENODE -count -e -v /dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*2( |\t)*0 EC:[A-Za-z0-9-]{1,}( )*/dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: file using absolute path with option -e to show erasurecoding policy of a file and option -v to show head information</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -count -e -v /file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*DIR_COUNT   FILE_COUNT       CONTENT_SIZE( )*ERASURECODING_POLICY( )*PATHNAME</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 [A-Za-z0-9-]{1,}( )*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
   </tests>
 </configuration>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] hadoop git commit: HADOOP-14538. Fix TestFilterFileSystem and TestHarFileSystem failures after DistributedFileSystem.append API. Contributed by Lei (Eddy) Xu.

Posted by st...@apache.org.
HADOOP-14538. Fix TestFilterFileSystem and TestHarFileSystem failures after DistributedFileSystem.append API. Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ade5124
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ade5124
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ade5124

Branch: refs/heads/HADOOP-13345
Commit: 7ade5124b8b6c52a084ec187c531017eee0f1884
Parents: 75043d3
Author: Akira Ajisaka <aa...@apache.org>
Authored: Mon Jun 19 19:09:18 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon Jun 19 19:09:18 2017 +0900

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/fs/FilterFileSystem.java    | 5 +++++
 .../src/main/java/org/apache/hadoop/fs/HarFileSystem.java       | 5 +++++
 2 files changed, 10 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ade5124/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index e940065..954a041 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -670,4 +670,9 @@ public class FilterFileSystem extends FileSystem {
   public FSDataOutputStreamBuilder createFile(Path path) {
     return fs.createFile(path);
   }
+
+  @Override
+  public FSDataOutputStreamBuilder appendFile(Path path) {
+    return fs.appendFile(path);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ade5124/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
index c410e34..6a1e8bd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
@@ -1273,4 +1273,9 @@ public class HarFileSystem extends FileSystem {
   public FSDataOutputStreamBuilder createFile(Path path) {
     return fs.createFile(path);
   }
+
+  @Override
+  public FSDataOutputStreamBuilder appendFile(Path path) {
+    return fs.appendFile(path);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] hadoop git commit: YARN-6127. Add support for work preserving NM restart when AMRMProxy is enabled. (Botong Huang via asuresh).

Posted by st...@apache.org.
YARN-6127. Add support for work preserving NM restart when AMRMProxy is enabled. (Botong Huang via asuresh).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49aa60e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49aa60e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49aa60e5

Branch: refs/heads/HADOOP-13345
Commit: 49aa60e50d20f8c18ed6f00fa8966244536fe7da
Parents: 8dbd53e
Author: Arun Suresh <as...@apache.org>
Authored: Thu Jun 22 11:26:29 2017 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Thu Jun 22 11:27:13 2017 -0700

----------------------------------------------------------------------
 .../AMRMProxyApplicationContextImpl.java        |  17 +-
 .../nodemanager/amrmproxy/AMRMProxyService.java | 179 +++++++++++++++---
 .../amrmproxy/AMRMProxyTokenSecretManager.java  |  93 +++++++++-
 .../amrmproxy/AbstractRequestInterceptor.java   |  37 +++-
 .../amrmproxy/RequestInterceptor.java           |  20 +-
 .../containermanager/ContainerManagerImpl.java  |   4 +
 .../recovery/NMLeveldbStateStoreService.java    | 182 ++++++++++++++++++-
 .../recovery/NMNullStateStoreService.java       |  29 +++
 .../recovery/NMStateStoreService.java           |  86 +++++++++
 .../amrmproxy/BaseAMRMProxyTest.java            |  78 +++++---
 .../amrmproxy/TestAMRMProxyService.java         | 119 +++++++++++-
 .../TestAMRMProxyTokenSecretManager.java        |  62 ++++++-
 .../recovery/NMMemoryStateStoreService.java     |  65 ++++++-
 .../TestNMLeveldbStateStoreService.java         | 105 ++++++++++-
 .../hadoop/yarn/server/MiniYARNCluster.java     |   6 +-
 15 files changed, 1001 insertions(+), 81 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49aa60e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyApplicationContextImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyApplicationContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyApplicationContextImpl.java
index 6d4fdfc..9938b37 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyApplicationContextImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyApplicationContextImpl.java
@@ -45,12 +45,13 @@ public class AMRMProxyApplicationContextImpl implements
 
   /**
    * Create an instance of the AMRMProxyApplicationContext.
-   * 
-   * @param nmContext
-   * @param conf
-   * @param applicationAttemptId
-   * @param user
-   * @param amrmToken
+   *
+   * @param nmContext NM context
+   * @param conf configuration
+   * @param applicationAttemptId attempt id
+   * @param user user name of the application
+   * @param amrmToken amrmToken issued by RM
+   * @param localToken amrmToken issued by AMRMProxy
    */
   public AMRMProxyApplicationContextImpl(Context nmContext,
       Configuration conf, ApplicationAttemptId applicationAttemptId,
@@ -86,6 +87,8 @@ public class AMRMProxyApplicationContextImpl implements
 
   /**
    * Sets the application's AMRMToken.
+   *
+   * @param amrmToken amrmToken issued by RM
    */
   public synchronized void setAMRMToken(
       Token<AMRMTokenIdentifier> amrmToken) {
@@ -99,6 +102,8 @@ public class AMRMProxyApplicationContextImpl implements
 
   /**
    * Sets the application's AMRMToken.
+   *
+   * @param localToken amrmToken issued by AMRMProxy
    */
   public synchronized void setLocalAMRMToken(
       Token<AMRMTokenIdentifier> localToken) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49aa60e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
index aeb3be8..d63b2cf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
@@ -64,7 +64,7 @@ import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
-
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredAMRMProxyState;
 import org.apache.hadoop.yarn.server.nodemanager.scheduler.DistributedScheduler;
 import org.apache.hadoop.yarn.server.security.MasterKeyData;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
@@ -86,6 +86,10 @@ public class AMRMProxyService extends AbstractService implements
     ApplicationMasterProtocol {
   private static final Logger LOG = LoggerFactory
       .getLogger(AMRMProxyService.class);
+
+  private static final String NMSS_USER_KEY = "user";
+  private static final String NMSS_AMRMTOKEN_KEY = "amrmtoken";
+
   private Server server;
   private final Context nmContext;
   private final AsyncDispatcher dispatcher;
@@ -95,9 +99,9 @@ public class AMRMProxyService extends AbstractService implements
 
   /**
    * Creates an instance of the service.
-   * 
-   * @param nmContext
-   * @param dispatcher
+   *
+   * @param nmContext NM context
+   * @param dispatcher NM dispatcher
    */
   public AMRMProxyService(Context nmContext, AsyncDispatcher dispatcher) {
     super(AMRMProxyService.class.getName());
@@ -113,6 +117,14 @@ public class AMRMProxyService extends AbstractService implements
   }
 
   @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    super.serviceInit(conf);
+    this.secretManager =
+        new AMRMProxyTokenSecretManager(this.nmContext.getNMStateStore());
+    this.secretManager.init(conf);
+  }
+
+  @Override
   protected void serviceStart() throws Exception {
     LOG.info("Starting AMRMProxyService");
     Configuration conf = getConfig();
@@ -134,7 +146,6 @@ public class AMRMProxyService extends AbstractService implements
             YarnConfiguration.AMRM_PROXY_CLIENT_THREAD_COUNT,
             YarnConfiguration.DEFAULT_AMRM_PROXY_CLIENT_THREAD_COUNT);
 
-    this.secretManager = new AMRMProxyTokenSecretManager(serverConf);
     this.secretManager.start();
 
     this.server =
@@ -161,6 +172,62 @@ public class AMRMProxyService extends AbstractService implements
   }
 
   /**
+   * Recover from NM state store. Called after serviceInit before serviceStart.
+   *
+   * @throws IOException if recover fails
+   */
+  public void recover() throws IOException {
+    LOG.info("Recovering AMRMProxyService");
+
+    RecoveredAMRMProxyState state =
+        this.nmContext.getNMStateStore().loadAMRMProxyState();
+
+    this.secretManager.recover(state);
+
+    LOG.info("Recovering {} running applications for AMRMProxy",
+        state.getAppContexts().size());
+    for (Map.Entry<ApplicationAttemptId, Map<String, byte[]>> entry : state
+        .getAppContexts().entrySet()) {
+      ApplicationAttemptId attemptId = entry.getKey();
+      LOG.info("Recovering app attempt {}", attemptId);
+
+      // Try recover for the running application attempt
+      try {
+        String user = null;
+        Token<AMRMTokenIdentifier> amrmToken = null;
+        for (Map.Entry<String, byte[]> contextEntry : entry.getValue()
+            .entrySet()) {
+          if (contextEntry.getKey().equals(NMSS_USER_KEY)) {
+            user = new String(contextEntry.getValue(), "UTF-8");
+          } else if (contextEntry.getKey().equals(NMSS_AMRMTOKEN_KEY)) {
+            amrmToken = new Token<>();
+            amrmToken.decodeFromUrlString(
+                new String(contextEntry.getValue(), "UTF-8"));
+          }
+        }
+
+        if (amrmToken == null) {
+          throw new IOException(
+              "No amrmToken found for app attempt " + attemptId);
+        }
+        if (user == null) {
+          throw new IOException("No user found for app attempt " + attemptId);
+        }
+
+        Token<AMRMTokenIdentifier> localToken =
+            this.secretManager.createAndGetAMRMToken(attemptId);
+
+        initializePipeline(attemptId, user, amrmToken, localToken,
+            entry.getValue(), true);
+      } catch (Exception e) {
+        LOG.error("Exception when recovering " + attemptId
+            + ", removing it from NMStateStore and move on", e);
+        this.nmContext.getNMStateStore().removeAMRMProxyAppContext(attemptId);
+      }
+    }
+  }
+
+  /**
    * This is called by the AMs started on this node to register with the RM.
    * This method does the initial authorization and then forwards the request to
    * the application instance specific intercepter chain.
@@ -221,8 +288,8 @@ public class AMRMProxyService extends AbstractService implements
    * application request processing pipeline.
    *
    * @param request - encapsulates information for starting an AM
-   * @throws IOException
-   * @throws YarnException
+   * @throws IOException if fails
+   * @throws YarnException if fails
    */
   public void processApplicationStartRequest(StartContainerRequest request)
       throws IOException, YarnException {
@@ -257,22 +324,25 @@ public class AMRMProxyService extends AbstractService implements
     request.getContainerLaunchContext().setTokens(
         ByteBuffer.wrap(dob.getData(), 0, dob.getLength()));
 
-    initializePipeline(containerTokenIdentifierForKey.getContainerID()
-        .getApplicationAttemptId(),
-        containerTokenIdentifierForKey.getApplicationSubmitter(),
-        amrmToken, localToken);
+    initializePipeline(appAttemptId,
+        containerTokenIdentifierForKey.getApplicationSubmitter(), amrmToken,
+        localToken, null, false);
   }
 
   /**
    * Initializes the request intercepter pipeline for the specified application.
-   * 
-   * @param applicationAttemptId
-   * @param user
-   * @param amrmToken
+   *
+   * @param applicationAttemptId attempt id
+   * @param user user name
+   * @param amrmToken amrmToken issued by RM
+   * @param localToken amrmToken issued by AMRMProxy
+   * @param recoveredDataMap the recovered states for AMRMProxy from NMSS
+   * @param isRecovery whether this is to recover a previously existing pipeline
    */
   protected void initializePipeline(ApplicationAttemptId applicationAttemptId,
       String user, Token<AMRMTokenIdentifier> amrmToken,
-      Token<AMRMTokenIdentifier> localToken) {
+      Token<AMRMTokenIdentifier> localToken,
+      Map<String, byte[]> recoveredDataMap, boolean isRecovery) {
     RequestInterceptorChainWrapper chainWrapper = null;
     synchronized (applPipelineMap) {
       if (applPipelineMap
@@ -288,11 +358,23 @@ public class AMRMProxyService extends AbstractService implements
             && chainWrapperBackup.getApplicationAttemptId() != null
             && !chainWrapperBackup.getApplicationAttemptId()
                 .equals(applicationAttemptId)) {
+          // TODO: revisit in AMRMProxy HA in YARN-6128
           // Remove the existing pipeline
           LOG.info("Remove the previous pipeline for ApplicationId: "
               + applicationAttemptId.toString());
           RequestInterceptorChainWrapper pipeline =
               applPipelineMap.remove(applicationAttemptId.getApplicationId());
+
+          if (!isRecovery && this.nmContext.getNMStateStore() != null) {
+            try {
+              this.nmContext.getNMStateStore()
+                  .removeAMRMProxyAppContext(applicationAttemptId);
+            } catch (IOException e) {
+              LOG.error("Error removing AMRMProxy application context for "
+                  + applicationAttemptId, e);
+            }
+          }
+
           try {
             pipeline.getRootInterceptor().shutdown();
           } catch (Throwable ex) {
@@ -324,7 +406,27 @@ public class AMRMProxyService extends AbstractService implements
           this.createRequestInterceptorChain();
       interceptorChain.init(createApplicationMasterContext(this.nmContext,
           applicationAttemptId, user, amrmToken, localToken));
+      if (isRecovery) {
+        if (recoveredDataMap == null) {
+          throw new YarnRuntimeException(
+              "null recoveredDataMap recieved for recover");
+        }
+        interceptorChain.recover(recoveredDataMap);
+      }
       chainWrapper.init(interceptorChain, applicationAttemptId);
+
+      if (!isRecovery && this.nmContext.getNMStateStore() != null) {
+        try {
+          this.nmContext.getNMStateStore().storeAMRMProxyAppContextEntry(
+              applicationAttemptId, NMSS_USER_KEY, user.getBytes("UTF-8"));
+          this.nmContext.getNMStateStore().storeAMRMProxyAppContextEntry(
+              applicationAttemptId, NMSS_AMRMTOKEN_KEY,
+              amrmToken.encodeToUrlString().getBytes("UTF-8"));
+        } catch (IOException e) {
+          LOG.error("Error storing AMRMProxy application context entry for "
+              + applicationAttemptId, e);
+        }
+      }
     } catch (Exception e) {
       this.applPipelineMap.remove(applicationAttemptId.getApplicationId());
       throw e;
@@ -335,7 +437,7 @@ public class AMRMProxyService extends AbstractService implements
    * Shuts down the request processing pipeline for the specified application
    * attempt id.
    *
-   * @param applicationId
+   * @param applicationId application id
    */
   protected void stopApplication(ApplicationId applicationId) {
     Preconditions.checkArgument(applicationId != null,
@@ -362,6 +464,17 @@ public class AMRMProxyService extends AbstractService implements
             "Failed to shutdown the request processing pipeline for app:"
                 + applicationId, ex);
       }
+
+      // Remove the app context from NMSS after the interceptors are shutdown
+      if (this.nmContext.getNMStateStore() != null) {
+        try {
+          this.nmContext.getNMStateStore()
+              .removeAMRMProxyAppContext(pipeline.getApplicationAttemptId());
+        } catch (IOException e) {
+          LOG.error("Error removing AMRMProxy application context for "
+              + applicationId, e);
+        }
+      }
     }
   }
 
@@ -383,12 +496,24 @@ public class AMRMProxyService extends AbstractService implements
       // Do not propagate this info back to AM
       allocateResponse.setAMRMToken(null);
 
-      org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> newTokenId =
+      org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> newToken =
           new org.apache.hadoop.security.token.Token<AMRMTokenIdentifier>(
               token.getIdentifier().array(), token.getPassword().array(),
               new Text(token.getKind()), new Text(token.getService()));
 
-      context.setAMRMToken(newTokenId);
+      context.setAMRMToken(newToken);
+
+      // Update the AMRMToken in context map in NM state store
+      if (this.nmContext.getNMStateStore() != null) {
+        try {
+          this.nmContext.getNMStateStore().storeAMRMProxyAppContextEntry(
+              context.getApplicationAttemptId(), NMSS_AMRMTOKEN_KEY,
+              newToken.encodeToUrlString().getBytes("UTF-8"));
+        } catch (IOException e) {
+          LOG.error("Error storing AMRMProxy application context entry for "
+              + context.getApplicationAttemptId(), e);
+        }
+      }
     }
 
     // Check if the local AMRMToken is rolled up and update the context and
@@ -431,7 +556,7 @@ public class AMRMProxyService extends AbstractService implements
 
   /**
    * Gets the Request intercepter chains for all the applications.
-   * 
+   *
    * @return the request intercepter chains.
    */
   protected Map<ApplicationId, RequestInterceptorChainWrapper> getPipelines() {
@@ -489,7 +614,7 @@ public class AMRMProxyService extends AbstractService implements
   /**
    * Returns the comma separated intercepter class names from the configuration.
    *
-   * @param conf
+   * @param conf configuration
    * @return the intercepter class names as an instance of ArrayList
    */
   private List<String> getInterceptorClassNames(Configuration conf) {
@@ -518,7 +643,7 @@ public class AMRMProxyService extends AbstractService implements
    * processing pipeline.
    *
    * @return the the intercepter wrapper instance
-   * @throws YarnException
+   * @throws YarnException if fails
    */
   private RequestInterceptorChainWrapper authorizeAndGetInterceptorChain()
       throws YarnException {
@@ -612,8 +737,8 @@ public class AMRMProxyService extends AbstractService implements
     /**
      * Initializes the wrapper with the specified parameters.
      * 
-     * @param rootInterceptor
-     * @param applicationAttemptId
+     * @param rootInterceptor the root request intercepter
+     * @param applicationAttemptId attempt id
      */
     public synchronized void init(RequestInterceptor rootInterceptor,
         ApplicationAttemptId applicationAttemptId) {
@@ -623,7 +748,7 @@ public class AMRMProxyService extends AbstractService implements
 
     /**
      * Gets the root request intercepter.
-     * 
+     *
      * @return the root request intercepter
      */
     public synchronized RequestInterceptor getRootInterceptor() {
@@ -632,7 +757,7 @@ public class AMRMProxyService extends AbstractService implements
 
     /**
      * Gets the application attempt identifier.
-     * 
+     *
      * @return the application attempt identifier
      */
     public synchronized ApplicationAttemptId getApplicationAttemptId() {
@@ -641,7 +766,7 @@ public class AMRMProxyService extends AbstractService implements
 
     /**
      * Gets the application identifier.
-     * 
+     *
      * @return the application identifier
      */
     public synchronized ApplicationId getApplicationId() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49aa60e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyTokenSecretManager.java
index d09ce41..aa3c70f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyTokenSecretManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyTokenSecretManager.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.amrmproxy;
 
+import java.io.IOException;
 import java.security.SecureRandom;
 import java.util.HashSet;
 import java.util.Set;
@@ -37,6 +38,9 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
+import org.apache.hadoop.yarn.server.api.records.MasterKey;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredAMRMProxyState;
 import org.apache.hadoop.yarn.server.security.MasterKeyData;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -60,17 +64,24 @@ public class AMRMProxyTokenSecretManager extends
   private final Lock writeLock = readWriteLock.writeLock();
 
   private final Timer timer;
-  private final long rollingInterval;
-  private final long activationDelay;
+  private long rollingInterval;
+  private long activationDelay;
+
+  private NMStateStoreService nmStateStore;
 
   private final Set<ApplicationAttemptId> appAttemptSet =
       new HashSet<ApplicationAttemptId>();
 
   /**
    * Create an {@link AMRMProxyTokenSecretManager}.
+   * @param nmStateStoreService NM state store
    */
-  public AMRMProxyTokenSecretManager(Configuration conf) {
+  public AMRMProxyTokenSecretManager(NMStateStoreService nmStateStoreService) {
     this.timer = new Timer();
+    this.nmStateStore = nmStateStoreService;
+  }
+
+  public void init(Configuration conf) {
     this.rollingInterval =
         conf.getLong(
             YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,
@@ -94,6 +105,14 @@ public class AMRMProxyTokenSecretManager extends
   public void start() {
     if (this.currentMasterKey == null) {
       this.currentMasterKey = createNewMasterKey();
+      if (this.nmStateStore != null) {
+        try {
+          this.nmStateStore.storeAMRMProxyCurrentMasterKey(
+              this.currentMasterKey.getMasterKey());
+        } catch (IOException e) {
+          LOG.error("Unable to update current master key in state store", e);
+        }
+      }
     }
     this.timer.scheduleAtFixedRate(new MasterKeyRoller(), rollingInterval,
         rollingInterval);
@@ -103,6 +122,11 @@ public class AMRMProxyTokenSecretManager extends
     this.timer.cancel();
   }
 
+  @VisibleForTesting
+  public void setNMStateStoreService(NMStateStoreService nmStateStoreService) {
+    this.nmStateStore = nmStateStoreService;
+  }
+
   public void applicationMasterFinished(ApplicationAttemptId appAttemptId) {
     this.writeLock.lock();
     try {
@@ -122,11 +146,21 @@ public class AMRMProxyTokenSecretManager extends
   }
 
   @Private
-  void rollMasterKey() {
+  @VisibleForTesting
+  public void rollMasterKey() {
     this.writeLock.lock();
     try {
       LOG.info("Rolling master-key for amrm-tokens");
       this.nextMasterKey = createNewMasterKey();
+      if (this.nmStateStore != null) {
+        try {
+          this.nmStateStore
+              .storeAMRMProxyNextMasterKey(this.nextMasterKey.getMasterKey());
+        } catch (IOException e) {
+          LOG.error("Unable to update next master key in state store", e);
+        }
+      }
+
       this.timer.schedule(new NextKeyActivator(), this.activationDelay);
     } finally {
       this.writeLock.unlock();
@@ -140,6 +174,8 @@ public class AMRMProxyTokenSecretManager extends
     }
   }
 
+  @Private
+  @VisibleForTesting
   public void activateNextMasterKey() {
     this.writeLock.lock();
     try {
@@ -147,6 +183,15 @@ public class AMRMProxyTokenSecretManager extends
           + this.nextMasterKey.getMasterKey().getKeyId());
       this.currentMasterKey = this.nextMasterKey;
       this.nextMasterKey = null;
+      if (this.nmStateStore != null) {
+        try {
+          this.nmStateStore.storeAMRMProxyCurrentMasterKey(
+              this.currentMasterKey.getMasterKey());
+          this.nmStateStore.storeAMRMProxyNextMasterKey(null);
+        } catch (IOException e) {
+          LOG.error("Unable to update current master key in state store", e);
+        }
+      }
     } finally {
       this.writeLock.unlock();
     }
@@ -239,6 +284,17 @@ public class AMRMProxyTokenSecretManager extends
 
   @Private
   @VisibleForTesting
+  public MasterKeyData getCurrentMasterKeyData() {
+    this.readLock.lock();
+    try {
+      return this.currentMasterKey;
+    } finally {
+      this.readLock.unlock();
+    }
+  }
+
+  @Private
+  @VisibleForTesting
   public MasterKeyData getNextMasterKeyData() {
     this.readLock.lock();
     try {
@@ -262,4 +318,33 @@ public class AMRMProxyTokenSecretManager extends
       this.readLock.unlock();
     }
   }
+
+  /**
+   * Recover secretManager from state store. Called after serviceInit before
+   * serviceStart.
+   *
+   * @param state the state to recover from
+   */
+  public void recover(RecoveredAMRMProxyState state) {
+    if (state != null) {
+      // recover the current master key
+      MasterKey currentKey = state.getCurrentMasterKey();
+      if (currentKey != null) {
+        this.currentMasterKey = new MasterKeyData(currentKey,
+            createSecretKey(currentKey.getBytes().array()));
+      } else {
+        LOG.warn("No current master key recovered from NM StateStore"
+            + " for AMRMProxyTokenSecretManager");
+      }
+
+      // recover the next master key if not null
+      MasterKey nextKey = state.getNextMasterKey();
+      if (nextKey != null) {
+        this.nextMasterKey = new MasterKeyData(nextKey,
+            createSecretKey(nextKey.getBytes().array()));
+        this.timer.schedule(new NextKeyActivator(), this.activationDelay);
+      }
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49aa60e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AbstractRequestInterceptor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AbstractRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AbstractRequestInterceptor.java
index e6c9bbd..93b42cb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AbstractRequestInterceptor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AbstractRequestInterceptor.java
@@ -18,16 +18,17 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.amrmproxy;
 
-import org.apache.hadoop.conf.Configuration;
+import java.io.IOException;
+import java.util.Map;
 
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.api.protocolrecords.DistributedSchedulingAllocateRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.DistributedSchedulingAllocateResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterDistributedSchedulingAMResponse;
-
-import java.io.IOException;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
 
 /**
  * Implements the RequestInterceptor interface and provides common functionality
@@ -83,6 +84,16 @@ public abstract class AbstractRequestInterceptor implements
   }
 
   /**
+   * Recover {@link RequestInterceptor} state from store.
+   */
+  @Override
+  public void recover(Map<String, byte[]> recoveredDataMap) {
+    if (this.nextInterceptor != null) {
+      this.nextInterceptor.recover(recoveredDataMap);
+    }
+  }
+
+  /**
    * Disposes the {@link RequestInterceptor}.
    */
   @Override
@@ -113,8 +124,8 @@ public abstract class AbstractRequestInterceptor implements
    *
    * @param request ApplicationMaster allocate request
    * @return Distribtued Scheduler Allocate Response
-   * @throws YarnException
-   * @throws IOException
+   * @throws YarnException if fails
+   * @throws IOException if fails
    */
   @Override
   public DistributedSchedulingAllocateResponse allocateForDistributedScheduling(
@@ -130,8 +141,8 @@ public abstract class AbstractRequestInterceptor implements
    *
    * @param request ApplicationMaster registration request
    * @return Distributed Scheduler Register Response
-   * @throws YarnException
-   * @throws IOException
+   * @throws YarnException if fails
+   * @throws IOException if fails
    */
   @Override
   public RegisterDistributedSchedulingAMResponse
@@ -141,4 +152,16 @@ public abstract class AbstractRequestInterceptor implements
     return (this.nextInterceptor != null) ? this.nextInterceptor
         .registerApplicationMasterForDistributedScheduling(request) : null;
   }
+
+  /**
+   * A helper method for getting NM state store.
+   *
+   * @return the NMSS instance
+   */
+  public NMStateStoreService getNMStateStore() {
+    if (this.appContext == null || this.appContext.getNMCotext() == null) {
+      return null;
+    }
+    return this.appContext.getNMCotext().getNMStateStore();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49aa60e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/RequestInterceptor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/RequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/RequestInterceptor.java
index 5995af1..950bdcc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/RequestInterceptor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/RequestInterceptor.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.amrmproxy;
 
+import java.util.Map;
+
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.yarn.server.api.DistributedSchedulingAMProtocol;
 
@@ -32,11 +34,25 @@ public interface RequestInterceptor extends DistributedSchedulingAMProtocol,
    * This method is called for initializing the intercepter. This is guaranteed
    * to be called only once in the lifetime of this instance.
    *
-   * @param ctx
+   * @param ctx AMRMProxy application context
    */
   void init(AMRMProxyApplicationContext ctx);
 
   /**
+   * Recover intercepter state when NM recovery is enabled. AMRMProxy will
+   * recover the data map into
+   * AMRMProxyApplicationContext.getRecoveredDataMap(). All intercepters should
+   * recover state from it.
+   *
+   * For example, registerRequest has to be saved by the last intercepter (i.e.
+   * the one that actually connects to RM), in order to re-register when RM
+   * fails over.
+   *
+   * @param recoveredDataMap states for all intercepters recovered from NMSS
+   */
+  void recover(Map<String, byte[]> recoveredDataMap);
+
+  /**
    * This method is called to release the resources held by the intercepter.
    * This will be called when the application pipeline is being destroyed. The
    * concrete implementations should dispose the resources and forward the
@@ -51,7 +67,7 @@ public interface RequestInterceptor extends DistributedSchedulingAMProtocol,
    * send the messages to the resource manager service and so the last
    * intercepter will not receive this method call.
    *
-   * @param nextInterceptor
+   * @param nextInterceptor the next intercepter to set
    */
   void setNextInterceptor(RequestInterceptor nextInterceptor);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49aa60e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index cbf617b..b1d634a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -341,6 +341,10 @@ public class ContainerManagerImpl extends CompositeService implements
       rsrcLocalizationSrvc.recoverLocalizedResources(
           stateStore.loadLocalizationState());
 
+      if (this.amrmProxyEnabled) {
+        this.getAMRMProxyService().recover();
+      }
+
       RecoveredApplicationsState appsState = stateStore.loadApplicationsState();
       for (ContainerManagerApplicationProto proto :
            appsState.getApplications()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49aa60e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
index d1b9d65..a0502df 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
@@ -24,12 +24,15 @@ import static org.fusesource.leveldbjni.JniDBFactory.bytes;
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Timer;
 import java.util.TimerTask;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -81,9 +84,8 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
 
   private static final String DB_NAME = "yarn-nm-state";
   private static final String DB_SCHEMA_VERSION_KEY = "nm-schema-version";
-  
-  private static final Version CURRENT_VERSION_INFO = Version
-      .newInstance(2, 0);
+
+  private static final Version CURRENT_VERSION_INFO = Version.newInstance(3, 0);
 
   private static final String DELETION_TASK_KEY_PREFIX =
       "DeletionService/deltask_";
@@ -122,6 +124,7 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
 
   private static final String CURRENT_MASTER_KEY_SUFFIX = "CurrentMasterKey";
   private static final String PREV_MASTER_KEY_SUFFIX = "PreviousMasterKey";
+  private static final String NEXT_MASTER_KEY_SUFFIX = "NextMasterKey";
   private static final String NM_TOKENS_KEY_PREFIX = "NMTokens/";
   private static final String NM_TOKENS_CURRENT_MASTER_KEY =
       NM_TOKENS_KEY_PREFIX + CURRENT_MASTER_KEY_SUFFIX;
@@ -136,6 +139,8 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
 
   private static final String LOG_DELETER_KEY_PREFIX = "LogDeleters/";
 
+  private static final String AMRMPROXY_KEY_PREFIX = "AMRMProxy/";
+
   private static final byte[] EMPTY_VALUE = new byte[0];
 
   private DB db;
@@ -1126,6 +1131,177 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   }
 
   @Override
+  public RecoveredAMRMProxyState loadAMRMProxyState() throws IOException {
+    RecoveredAMRMProxyState result = new RecoveredAMRMProxyState();
+    Set<String> unknownKeys = new HashSet<>();
+    LeveldbIterator iter = null;
+    try {
+      iter = new LeveldbIterator(db);
+      iter.seek(bytes(AMRMPROXY_KEY_PREFIX));
+      while (iter.hasNext()) {
+        Entry<byte[], byte[]> entry = iter.peekNext();
+        String key = asString(entry.getKey());
+        if (!key.startsWith(AMRMPROXY_KEY_PREFIX)) {
+          break;
+        }
+
+        String suffix = key.substring(AMRMPROXY_KEY_PREFIX.length());
+        if (suffix.equals(CURRENT_MASTER_KEY_SUFFIX)) {
+          iter.next();
+          result.setCurrentMasterKey(parseMasterKey(entry.getValue()));
+          LOG.info("Recovered for AMRMProxy: current master key id "
+              + result.getCurrentMasterKey().getKeyId());
+
+        } else if (suffix.equals(NEXT_MASTER_KEY_SUFFIX)) {
+          iter.next();
+          result.setNextMasterKey(parseMasterKey(entry.getValue()));
+          LOG.info("Recovered for AMRMProxy: next master key id "
+              + result.getNextMasterKey().getKeyId());
+
+        } else { // Load AMRMProxy application context map for an app attempt
+          // Parse appAttemptId, also handle the unknown keys
+          int idEndPos;
+          ApplicationAttemptId attemptId;
+          try {
+            idEndPos = key.indexOf('/', AMRMPROXY_KEY_PREFIX.length());
+            if (idEndPos < 0) {
+              throw new IOException(
+                  "Unable to determine attemptId in key: " + key);
+            }
+            attemptId = ApplicationAttemptId.fromString(
+                key.substring(AMRMPROXY_KEY_PREFIX.length(), idEndPos));
+          } catch (Exception e) {
+            // Try to move on for back-forward compatibility
+            LOG.warn("Unknown key " + key + ", remove and move on", e);
+            // Do this because iter.remove() is not supported here
+            unknownKeys.add(key);
+            continue;
+          }
+          // Parse the context map for the appAttemptId
+          Map<String, byte[]> appContext =
+              loadAMRMProxyAppContextMap(iter, key.substring(0, idEndPos + 1));
+          result.getAppContexts().put(attemptId, appContext);
+
+          LOG.info("Recovered for AMRMProxy: " + attemptId + ", map size "
+              + appContext.size());
+        }
+      }
+    } catch (DBException e) {
+      throw new IOException(e);
+    } finally {
+      if (iter != null) {
+        iter.close();
+      }
+    }
+
+    // Delete all unknown keys
+    try {
+      for (String key : unknownKeys) {
+        db.delete(bytes(key));
+      }
+    } catch (DBException e) {
+      throw new IOException(e);
+    }
+
+    return result;
+  }
+
+  private Map<String, byte[]> loadAMRMProxyAppContextMap(LeveldbIterator iter,
+      String keyPrefix) throws IOException {
+    Map<String, byte[]> appContextMap = new HashMap<>();
+    while (iter.hasNext()) {
+      Entry<byte[], byte[]> entry = iter.peekNext();
+      String key = asString(entry.getKey());
+      if (!key.startsWith(keyPrefix)) {
+        break;
+      }
+      iter.next();
+      String suffix = key.substring(keyPrefix.length());
+      byte[] data = entry.getValue();
+      appContextMap.put(suffix, Arrays.copyOf(data, data.length));
+    }
+    return appContextMap;
+  }
+
+  @Override
+  public void storeAMRMProxyCurrentMasterKey(MasterKey key) throws IOException {
+    storeMasterKey(AMRMPROXY_KEY_PREFIX + CURRENT_MASTER_KEY_SUFFIX, key);
+  }
+
+  @Override
+  public void storeAMRMProxyNextMasterKey(MasterKey key) throws IOException {
+    String dbkey = AMRMPROXY_KEY_PREFIX + NEXT_MASTER_KEY_SUFFIX;
+    if (key == null) {
+      // When key is null, delete the entry instead
+      try {
+        db.delete(bytes(dbkey));
+      } catch (DBException e) {
+        throw new IOException(e);
+      }
+      return;
+    }
+    storeMasterKey(dbkey, key);
+  }
+
+  @Override
+  public void storeAMRMProxyAppContextEntry(ApplicationAttemptId attempt,
+      String key, byte[] data) throws IOException {
+    String fullkey = AMRMPROXY_KEY_PREFIX + attempt + "/" + key;
+    try {
+      db.put(bytes(fullkey), data);
+    } catch (DBException e) {
+      throw new IOException(e);
+    }
+  }
+
+  @Override
+  public void removeAMRMProxyAppContextEntry(ApplicationAttemptId attempt,
+      String key) throws IOException {
+    String fullkey = AMRMPROXY_KEY_PREFIX + attempt + "/" + key;
+    try {
+      db.delete(bytes(fullkey));
+    } catch (DBException e) {
+      throw new IOException(e);
+    }
+  }
+
+  @Override
+  public void removeAMRMProxyAppContext(ApplicationAttemptId attempt)
+      throws IOException {
+    Set<String> candidates = new HashSet<>();
+    String keyPrefix = AMRMPROXY_KEY_PREFIX + attempt + "/";
+    LeveldbIterator iter = null;
+    try {
+      iter = new LeveldbIterator(db);
+      iter.seek(bytes(keyPrefix));
+      while (iter.hasNext()) {
+        Entry<byte[], byte[]> entry = iter.next();
+        String key = asString(entry.getKey());
+        if (!key.startsWith(keyPrefix)) {
+          break;
+        }
+        // Do this because iter.remove() is not supported here
+        candidates.add(key);
+      }
+    } catch (DBException e) {
+      throw new IOException(e);
+    } finally {
+      if (iter != null) {
+        iter.close();
+      }
+    }
+
+    // Delete all candidate keys
+    try {
+      for (String key : candidates) {
+        db.delete(bytes(key));
+      }
+    } catch (DBException e) {
+      throw new IOException(e);
+    }
+  }
+
+  @Override
   protected void initStorage(Configuration conf)
       throws IOException {
     db = openDatabase(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49aa60e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
index 4bcdf5c..96c3f9e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java
@@ -229,6 +229,35 @@ public class NMNullStateStoreService extends NMStateStoreService {
   }
 
   @Override
+  public RecoveredAMRMProxyState loadAMRMProxyState() throws IOException {
+    throw new UnsupportedOperationException(
+        "Recovery not supported by this state store");
+  }
+
+  @Override
+  public void storeAMRMProxyCurrentMasterKey(MasterKey key) throws IOException {
+  }
+
+  @Override
+  public void storeAMRMProxyNextMasterKey(MasterKey key) throws IOException {
+  }
+
+  @Override
+  public void storeAMRMProxyAppContextEntry(ApplicationAttemptId attempt,
+      String key, byte[] data) throws IOException {
+  }
+
+  @Override
+  public void removeAMRMProxyAppContextEntry(ApplicationAttemptId attempt,
+      String key) throws IOException {
+  }
+
+  @Override
+  public void removeAMRMProxyAppContext(ApplicationAttemptId attempt)
+      throws IOException {
+  }
+
+  @Override
   protected void initStorage(Configuration conf) throws IOException {
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49aa60e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
index 9dd1eb0..9f87279 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java
@@ -266,6 +266,41 @@ public abstract class NMStateStoreService extends AbstractService {
     }
   }
 
+  /**
+   * Recovered states for AMRMProxy.
+   */
+  public static class RecoveredAMRMProxyState {
+    private MasterKey currentMasterKey;
+    private MasterKey nextMasterKey;
+    // For each app, stores amrmToken, user name, as well as various AMRMProxy
+    // intercepter states
+    private Map<ApplicationAttemptId, Map<String, byte[]>> appContexts;
+
+    public RecoveredAMRMProxyState() {
+      appContexts = new HashMap<>();
+    }
+
+    public MasterKey getCurrentMasterKey() {
+      return currentMasterKey;
+    }
+
+    public MasterKey getNextMasterKey() {
+      return nextMasterKey;
+    }
+
+    public Map<ApplicationAttemptId, Map<String, byte[]>> getAppContexts() {
+      return appContexts;
+    }
+
+    public void setCurrentMasterKey(MasterKey currentKey) {
+      currentMasterKey = currentKey;
+    }
+
+    public void setNextMasterKey(MasterKey nextKey) {
+      nextMasterKey = nextKey;
+    }
+  }
+
   /** Initialize the state storage */
   @Override
   public void serviceInit(Configuration conf) throws IOException {
@@ -601,6 +636,57 @@ public abstract class NMStateStoreService extends AbstractService {
   public abstract void removeLogDeleter(ApplicationId appId)
       throws IOException;
 
+  /**
+   * Load the state of AMRMProxy.
+   * @return recovered state of AMRMProxy
+   * @throws IOException if fails
+   */
+  public abstract RecoveredAMRMProxyState loadAMRMProxyState()
+      throws IOException;
+
+  /**
+   * Record the current AMRMProxyTokenSecretManager master key.
+   * @param key the current master key
+   * @throws IOException if fails
+   */
+  public abstract void storeAMRMProxyCurrentMasterKey(MasterKey key)
+      throws IOException;
+
+  /**
+   * Record the next AMRMProxyTokenSecretManager master key.
+   * @param key the next master key
+   * @throws IOException if fails
+   */
+  public abstract void storeAMRMProxyNextMasterKey(MasterKey key)
+      throws IOException;
+
+  /**
+   * Add a context entry for an application attempt in AMRMProxyService.
+   * @param attempt app attempt ID
+   * @param key key string
+   * @param data state data to store
+   * @throws IOException if fails
+   */
+  public abstract void storeAMRMProxyAppContextEntry(
+      ApplicationAttemptId attempt, String key, byte[] data) throws IOException;
+
+  /**
+   * Remove a context entry for an application attempt in AMRMProxyService.
+   * @param attempt attempt ID
+   * @param key key string
+   * @throws IOException if fails
+   */
+  public abstract void removeAMRMProxyAppContextEntry(
+      ApplicationAttemptId attempt, String key) throws IOException;
+
+  /**
+   * Remove the entire context map for an application attempt in
+   * AMRMProxyService.
+   * @param attempt attempt ID
+   * @throws IOException if fails
+   */
+  public abstract void removeAMRMProxyAppContext(ApplicationAttemptId attempt)
+      throws IOException;
 
   protected abstract void initStorage(Configuration conf) throws IOException;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49aa60e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
index 6f5009e..a24c83b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
@@ -39,6 +39,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
@@ -62,12 +63,13 @@ import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
+import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
 import org.apache.hadoop.yarn.server.nodemanager.NodeResourceMonitor;
 import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManager;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
-
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMMemoryStateStoreService;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
 import org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerAllocator;
 import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager;
@@ -87,17 +89,15 @@ import org.junit.Before;
 public abstract class BaseAMRMProxyTest {
   private static final Log LOG = LogFactory
       .getLog(BaseAMRMProxyTest.class);
-  /**
-   * The AMRMProxyService instance that will be used by all the test cases
-   */
+  // The AMRMProxyService instance that will be used by all the test cases
   private MockAMRMProxyService amrmProxyService;
-  /**
-   * Thread pool used for asynchronous operations
-   */
+
+  // Thread pool used for asynchronous operations
   private static ExecutorService threadpool = Executors
       .newCachedThreadPool();
   private Configuration conf;
   private AsyncDispatcher dispatcher;
+  private Context nmContext;
 
   protected MockAMRMProxyService getAMRMProxyService() {
     Assert.assertNotNull(this.amrmProxyService);
@@ -105,32 +105,40 @@ public abstract class BaseAMRMProxyTest {
   }
 
   @Before
-  public void setUp() {
-    this.conf = new YarnConfiguration();
-    this.conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true);
+  public void setUp() throws IOException {
+    this.conf = createConfiguration();
+    this.dispatcher = new AsyncDispatcher();
+    this.dispatcher.init(this.conf);
+    this.dispatcher.start();
+    createAndStartAMRMProxyService(this.conf);
+  }
+
+  protected YarnConfiguration createConfiguration() {
+    YarnConfiguration config = new YarnConfiguration();
+    config.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true);
     String mockPassThroughInterceptorClass =
         PassThroughRequestInterceptor.class.getName();
 
     // Create a request intercepter pipeline for testing. The last one in the
     // chain will call the mock resource manager. The others in the chain will
     // simply forward it to the next one in the chain
-    this.conf.set(YarnConfiguration.AMRM_PROXY_INTERCEPTOR_CLASS_PIPELINE,
-        mockPassThroughInterceptorClass + ","
-            + mockPassThroughInterceptorClass + ","
-            + mockPassThroughInterceptorClass + ","
+    config.set(YarnConfiguration.AMRM_PROXY_INTERCEPTOR_CLASS_PIPELINE,
+        mockPassThroughInterceptorClass + "," + mockPassThroughInterceptorClass
+            + "," + mockPassThroughInterceptorClass + ","
             + MockRequestInterceptor.class.getName());
 
-    this.dispatcher = new AsyncDispatcher();
-    this.dispatcher.init(this.conf);
-    this.dispatcher.start();
-    createAndStartAMRMProxyService(this.conf);
+    config.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
+    return config;
   }
 
   @After
   public void tearDown() {
-    amrmProxyService.stop();
-    amrmProxyService = null;
+    this.amrmProxyService.stop();
+    this.amrmProxyService = null;
     this.dispatcher.stop();
+    if (this.nmContext.getNMStateStore() != null) {
+      this.nmContext.getNMStateStore().stop();
+    }
   }
 
   protected ExecutorService getThreadPool() {
@@ -141,17 +149,33 @@ public abstract class BaseAMRMProxyTest {
     return this.conf;
   }
 
-  protected void createAndStartAMRMProxyService(Configuration config) {
+  protected AsyncDispatcher getDispatcher() {
+    return this.dispatcher;
+  }
+
+  protected void createAndStartAMRMProxyService(Configuration config)
+      throws IOException {
     // Stop the existing instance first if not null
     if (this.amrmProxyService != null) {
       this.amrmProxyService.stop();
     }
+    if (this.nmContext == null) {
+      this.nmContext = createContext();
+    }
     this.amrmProxyService =
-        new MockAMRMProxyService(new NullContext(), dispatcher);
+        new MockAMRMProxyService(this.nmContext, this.dispatcher);
     this.amrmProxyService.init(config);
+    this.amrmProxyService.recover();
     this.amrmProxyService.start();
   }
 
+  protected Context createContext() {
+    NMMemoryStateStoreService stateStore = new NMMemoryStateStoreService();
+    stateStore.init(this.conf);
+    stateStore.start();
+    return new NMContext(null, null, null, null, stateStore, false, this.conf);
+  }
+
   /**
    * This helper method will invoke the specified function in parallel for each
    * end point in the specified list using a thread pool and return the
@@ -579,6 +603,13 @@ public abstract class BaseAMRMProxyTest {
       super(nmContext, dispatcher);
     }
 
+    @Override
+    protected void serviceStart() throws Exception {
+      // Override this method and do nothing to avoid the base class from
+      // listening to server end point
+      getSecretManager().start();
+    }
+
     /**
      * This method is used by the test code to initialize the pipeline. In the
      * actual service, the initialization is called by the
@@ -588,7 +619,8 @@ public abstract class BaseAMRMProxyTest {
      * @param user
      */
     public void initApp(ApplicationAttemptId applicationId, String user) {
-      super.initializePipeline(applicationId, user, null, null);
+      super.initializePipeline(applicationId, user,
+          new Token<AMRMTokenIdentifier>(), null, null, false);
     }
 
     public void stopApp(ApplicationId applicationId) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49aa60e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
index fa17f26..e734bdd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
@@ -26,9 +26,12 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -38,6 +41,7 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.server.nodemanager.amrmproxy.AMRMProxyService.RequestInterceptorChainWrapper;
 import org.apache.hadoop.yarn.util.Records;
 import org.junit.Assert;
@@ -48,6 +52,8 @@ public class TestAMRMProxyService extends BaseAMRMProxyTest {
   private static final Log LOG = LogFactory
       .getLog(TestAMRMProxyService.class);
 
+  private static MockResourceManagerFacade mockRM;
+
   /**
    * Test if the pipeline is created properly.
    */
@@ -99,9 +105,11 @@ public class TestAMRMProxyService extends BaseAMRMProxyTest {
 
   /**
    * Tests the case when interceptor pipeline initialization fails.
+   *
+   * @throws IOException
    */
   @Test
-  public void testInterceptorInitFailure() {
+  public void testInterceptorInitFailure() throws IOException {
     Configuration conf = this.getConf();
     // Override with a bad interceptor configuration
     conf.set(YarnConfiguration.AMRM_PROXY_INTERCEPTOR_CLASS_PIPELINE,
@@ -434,8 +442,8 @@ public class TestAMRMProxyService extends BaseAMRMProxyTest {
     // Second Attempt
 
     applicationAttemptId = ApplicationAttemptId.newInstance(appId, 2);
-    getAMRMProxyService().initializePipeline(applicationAttemptId, user, null,
-        null);
+    getAMRMProxyService().initializePipeline(applicationAttemptId, user,
+        new Token<AMRMTokenIdentifier>(), null, null, false);
 
     RequestInterceptorChainWrapper chain2 =
         getAMRMProxyService().getPipelines().get(appId);
@@ -559,4 +567,109 @@ public class TestAMRMProxyService extends BaseAMRMProxyTest {
     Assert.assertEquals(relList.size(),
         containersForReleasedContainerIds.size());
   }
+
+  /**
+   * Test AMRMProxy restart with recovery.
+   */
+  @Test
+  public void testRecovery() throws YarnException, Exception {
+
+    Configuration conf = createConfiguration();
+    // Use the MockRequestInterceptorAcrossRestart instead for the chain
+    conf.set(YarnConfiguration.AMRM_PROXY_INTERCEPTOR_CLASS_PIPELINE,
+        MockRequestInterceptorAcrossRestart.class.getName());
+
+    mockRM = new MockResourceManagerFacade(new YarnConfiguration(conf), 0);
+
+    createAndStartAMRMProxyService(conf);
+
+    int testAppId1 = 1;
+    RegisterApplicationMasterResponse registerResponse =
+        registerApplicationMaster(testAppId1);
+    Assert.assertNotNull(registerResponse);
+    Assert.assertEquals(Integer.toString(testAppId1),
+        registerResponse.getQueue());
+
+    int testAppId2 = 2;
+    registerResponse = registerApplicationMaster(testAppId2);
+    Assert.assertNotNull(registerResponse);
+    Assert.assertEquals(Integer.toString(testAppId2),
+        registerResponse.getQueue());
+
+    AllocateResponse allocateResponse = allocate(testAppId2);
+    Assert.assertNotNull(allocateResponse);
+
+    // At the time of kill, app1 just registerAM, app2 already did one allocate.
+    // Both application should be recovered
+    createAndStartAMRMProxyService(conf);
+    Assert.assertTrue(getAMRMProxyService().getPipelines().size() == 2);
+
+    allocateResponse = allocate(testAppId1);
+    Assert.assertNotNull(allocateResponse);
+
+    FinishApplicationMasterResponse finshResponse =
+        finishApplicationMaster(testAppId1, FinalApplicationStatus.SUCCEEDED);
+    Assert.assertNotNull(finshResponse);
+    Assert.assertEquals(true, finshResponse.getIsUnregistered());
+
+    allocateResponse = allocate(testAppId2);
+    Assert.assertNotNull(allocateResponse);
+
+    finshResponse =
+        finishApplicationMaster(testAppId2, FinalApplicationStatus.SUCCEEDED);
+
+    Assert.assertNotNull(finshResponse);
+    Assert.assertEquals(true, finshResponse.getIsUnregistered());
+
+    int testAppId3 = 3;
+    try {
+      // Try to finish an application master that is not registered.
+      finishApplicationMaster(testAppId3, FinalApplicationStatus.SUCCEEDED);
+      Assert
+          .fail("The Mock RM should complain about not knowing the third app");
+    } catch (Throwable ex) {
+    }
+
+    mockRM = null;
+  }
+
+  /**
+   * A mock intercepter implementation that uses the same mockRM instance across
+   * restart.
+   */
+  public static class MockRequestInterceptorAcrossRestart
+      extends AbstractRequestInterceptor {
+
+    public MockRequestInterceptorAcrossRestart() {
+    }
+
+    @Override
+    public void init(AMRMProxyApplicationContext appContext) {
+      super.init(appContext);
+      if (mockRM == null) {
+        throw new RuntimeException("mockRM not initialized yet");
+      }
+    }
+
+    @Override
+    public RegisterApplicationMasterResponse registerApplicationMaster(
+        RegisterApplicationMasterRequest request)
+        throws YarnException, IOException {
+      return mockRM.registerApplicationMaster(request);
+    }
+
+    @Override
+    public FinishApplicationMasterResponse finishApplicationMaster(
+        FinishApplicationMasterRequest request)
+        throws YarnException, IOException {
+      return mockRM.finishApplicationMaster(request);
+    }
+
+    @Override
+    public AllocateResponse allocate(AllocateRequest request)
+        throws YarnException, IOException {
+      return mockRM.allocate(request);
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49aa60e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyTokenSecretManager.java
index 927563e..db7167d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyTokenSecretManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyTokenSecretManager.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMMemoryStateStoreService;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -40,11 +41,19 @@ public class TestAMRMProxyTokenSecretManager {
 
   private YarnConfiguration conf;
   private AMRMProxyTokenSecretManager secretManager;
+  private NMMemoryStateStoreService stateStore;
 
   @Before
   public void setup() {
     conf = new YarnConfiguration();
-    secretManager = new AMRMProxyTokenSecretManager(conf);
+    conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
+
+    stateStore = new NMMemoryStateStoreService();
+    stateStore.init(conf);
+    stateStore.start();
+
+    secretManager = new AMRMProxyTokenSecretManager(stateStore);
+    secretManager.init(conf);
     secretManager.start();
   }
 
@@ -53,6 +62,9 @@ public class TestAMRMProxyTokenSecretManager {
     if (secretManager != null) {
       secretManager.stop();
     }
+    if (stateStore != null) {
+      stateStore.stop();
+    }
   }
 
   @Test
@@ -78,4 +90,52 @@ public class TestAMRMProxyTokenSecretManager {
     }
   }
 
+  @Test
+  public void testRecovery() throws IOException {
+    ApplicationId appId = ApplicationId.newInstance(1, 1);
+    ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
+
+    Token<AMRMTokenIdentifier> localToken =
+        secretManager.createAndGetAMRMToken(attemptId);
+
+    AMRMTokenIdentifier identifier = secretManager.createIdentifier();
+    identifier.readFields(new DataInputStream(
+        new ByteArrayInputStream(localToken.getIdentifier())));
+
+    secretManager.retrievePassword(identifier);
+
+    // Generate next master key
+    secretManager.rollMasterKey();
+
+    // Restart and recover
+    secretManager.stop();
+    secretManager = new AMRMProxyTokenSecretManager(stateStore);
+    secretManager.init(conf);
+    secretManager.recover(stateStore.loadAMRMProxyState());
+    secretManager.start();
+    // Recover the app
+    secretManager.createAndGetAMRMToken(attemptId);
+
+    // Current master key should be recovered, and thus pass here
+    secretManager.retrievePassword(identifier);
+
+    // Roll key, current master key will be replaced
+    secretManager.activateNextMasterKey();
+
+    // Restart and recover
+    secretManager.stop();
+    secretManager = new AMRMProxyTokenSecretManager(stateStore);
+    secretManager.init(conf);
+    secretManager.recover(stateStore.loadAMRMProxyState());
+    secretManager.start();
+    // Recover the app
+    secretManager.createAndGetAMRMToken(attemptId);
+
+    try {
+      secretManager.retrievePassword(identifier);
+      Assert.fail("Expect InvalidToken exception because the "
+          + "old master key should have expired");
+    } catch (InvalidToken e) {
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49aa60e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
index 15c0e84..0e03994 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java
@@ -20,11 +20,10 @@ package org.apache.hadoop.yarn.server.nodemanager.recovery;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -50,6 +49,7 @@ public class NMMemoryStateStoreService extends NMStateStoreService {
   private RecoveredNMTokensState nmTokenState;
   private RecoveredContainerTokensState containerTokenState;
   private Map<ApplicationId, LogDeleterProto> logDeleterState;
+  private RecoveredAMRMProxyState amrmProxyState;
 
   public NMMemoryStateStoreService() {
     super(NMMemoryStateStoreService.class.getName());
@@ -67,6 +67,7 @@ public class NMMemoryStateStoreService extends NMStateStoreService {
     trackerStates = new HashMap<TrackerKey, TrackerState>();
     deleteTasks = new HashMap<Integer, DeletionServiceDeleteTaskProto>();
     logDeleterState = new HashMap<ApplicationId, LogDeleterProto>();
+    amrmProxyState = new RecoveredAMRMProxyState();
   }
 
   @Override
@@ -417,6 +418,66 @@ public class NMMemoryStateStoreService extends NMStateStoreService {
     logDeleterState.remove(appId);
   }
 
+  @Override
+  public synchronized RecoveredAMRMProxyState loadAMRMProxyState()
+      throws IOException {
+    // return a copy so caller can't modify our state
+    RecoveredAMRMProxyState result = new RecoveredAMRMProxyState();
+    result.setCurrentMasterKey(amrmProxyState.getCurrentMasterKey());
+    result.setNextMasterKey(amrmProxyState.getNextMasterKey());
+    for (Map.Entry<ApplicationAttemptId, Map<String, byte[]>> entry :
+        amrmProxyState.getAppContexts().entrySet()) {
+      result.getAppContexts().put(entry.getKey(),
+          new HashMap<String, byte[]>(entry.getValue()));
+    }
+    return result;
+  }
+
+  @Override
+  public synchronized void storeAMRMProxyCurrentMasterKey(MasterKey key)
+      throws IOException {
+    MasterKeyPBImpl keypb = (MasterKeyPBImpl) key;
+    amrmProxyState.setCurrentMasterKey(new MasterKeyPBImpl(keypb.getProto()));
+  }
+
+  @Override
+  public synchronized void storeAMRMProxyNextMasterKey(MasterKey key)
+      throws IOException {
+    if (key == null) {
+      amrmProxyState.setNextMasterKey(null);
+      return;
+    }
+    MasterKeyPBImpl keypb = (MasterKeyPBImpl) key;
+    amrmProxyState.setNextMasterKey(new MasterKeyPBImpl(keypb.getProto()));
+  }
+
+  @Override
+  public synchronized void storeAMRMProxyAppContextEntry(
+      ApplicationAttemptId attempt, String key, byte[] data)
+      throws IOException {
+    Map<String, byte[]> entryMap = amrmProxyState.getAppContexts().get(attempt);
+    if (entryMap == null) {
+      entryMap = new HashMap<>();
+      amrmProxyState.getAppContexts().put(attempt, entryMap);
+    }
+    entryMap.put(key, Arrays.copyOf(data, data.length));
+  }
+
+  @Override
+  public synchronized void removeAMRMProxyAppContextEntry(
+      ApplicationAttemptId attempt, String key) throws IOException {
+    Map<String, byte[]> entryMap = amrmProxyState.getAppContexts().get(attempt);
+    if (entryMap != null) {
+      entryMap.remove(key);
+    }
+  }
+
+  @Override
+  public synchronized void removeAMRMProxyAppContext(
+      ApplicationAttemptId attempt) throws IOException {
+    amrmProxyState.getAppContexts().remove(attempt);
+  }
+
   private static class TrackerState {
     Map<Path, LocalResourceProto> inProgressMap =
         new HashMap<Path, LocalResourceProto>();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49aa60e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
index 537f849..0133156 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
@@ -20,10 +20,11 @@ package org.apache.hadoop.yarn.server.nodemanager.recovery;
 
 import static org.fusesource.leveldbjni.JniDBFactory.bytes;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
 import static org.mockito.Mockito.isNull;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.timeout;
@@ -33,6 +34,7 @@ import java.io.File;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -65,7 +67,9 @@ import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.Localize
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LogDeleterProto;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
+import org.apache.hadoop.yarn.server.nodemanager.amrmproxy.AMRMProxyTokenSecretManager;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.LocalResourceTrackerState;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredAMRMProxyState;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredApplicationsState;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerState;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerStatus;
@@ -1016,6 +1020,105 @@ public class TestNMLeveldbStateStoreService {
     assertNull(stateStore.getDB().get(invalidKey));
   }
 
+  @Test
+  public void testAMRMProxyStorage() throws IOException {
+    RecoveredAMRMProxyState state = stateStore.loadAMRMProxyState();
+    assertEquals(state.getCurrentMasterKey(), null);
+    assertEquals(state.getNextMasterKey(), null);
+    assertEquals(state.getAppContexts().size(), 0);
+
+    ApplicationId appId1 = ApplicationId.newInstance(1, 1);
+    ApplicationId appId2 = ApplicationId.newInstance(1, 2);
+    ApplicationAttemptId attemptId1 =
+        ApplicationAttemptId.newInstance(appId1, 1);
+    ApplicationAttemptId attemptId2 =
+        ApplicationAttemptId.newInstance(appId2, 2);
+    String key1 = "key1";
+    String key2 = "key2";
+    byte[] data1 = "data1".getBytes();
+    byte[] data2 = "data2".getBytes();
+
+    AMRMProxyTokenSecretManager secretManager =
+        new AMRMProxyTokenSecretManager(stateStore);
+    secretManager.init(conf);
+    // Generate currentMasterKey
+    secretManager.start();
+
+    try {
+      // Add two applications, each with two data entries
+      stateStore.storeAMRMProxyAppContextEntry(attemptId1, key1, data1);
+      stateStore.storeAMRMProxyAppContextEntry(attemptId2, key1, data1);
+      stateStore.storeAMRMProxyAppContextEntry(attemptId1, key2, data2);
+      stateStore.storeAMRMProxyAppContextEntry(attemptId2, key2, data2);
+
+      // restart state store and verify recovered
+      restartStateStore();
+      secretManager.setNMStateStoreService(stateStore);
+      state = stateStore.loadAMRMProxyState();
+      assertEquals(state.getCurrentMasterKey(),
+          secretManager.getCurrentMasterKeyData().getMasterKey());
+      assertEquals(state.getNextMasterKey(), null);
+      assertEquals(state.getAppContexts().size(), 2);
+      // app1
+      Map<String, byte[]> map = state.getAppContexts().get(attemptId1);
+      assertNotEquals(map, null);
+      assertEquals(map.size(), 2);
+      assertTrue(Arrays.equals(map.get(key1), data1));
+      assertTrue(Arrays.equals(map.get(key2), data2));
+      // app2
+      map = state.getAppContexts().get(attemptId2);
+      assertNotEquals(map, null);
+      assertEquals(map.size(), 2);
+      assertTrue(Arrays.equals(map.get(key1), data1));
+      assertTrue(Arrays.equals(map.get(key2), data2));
+
+      // Generate next master key and remove one entry of app2
+      secretManager.rollMasterKey();
+      stateStore.removeAMRMProxyAppContextEntry(attemptId2, key1);
+
+      // restart state store and verify recovered
+      restartStateStore();
+      secretManager.setNMStateStoreService(stateStore);
+      state = stateStore.loadAMRMProxyState();
+      assertEquals(state.getCurrentMasterKey(),
+          secretManager.getCurrentMasterKeyData().getMasterKey());
+      assertEquals(state.getNextMasterKey(),
+          secretManager.getNextMasterKeyData().getMasterKey());
+      assertEquals(state.getAppContexts().size(), 2);
+      // app1
+      map = state.getAppContexts().get(attemptId1);
+      assertNotEquals(map, null);
+      assertEquals(map.size(), 2);
+      assertTrue(Arrays.equals(map.get(key1), data1));
+      assertTrue(Arrays.equals(map.get(key2), data2));
+      // app2
+      map = state.getAppContexts().get(attemptId2);
+      assertNotEquals(map, null);
+      assertEquals(map.size(), 1);
+      assertTrue(Arrays.equals(map.get(key2), data2));
+
+      // Activate next master key and remove all entries of app1
+      secretManager.activateNextMasterKey();
+      stateStore.removeAMRMProxyAppContext(attemptId1);
+
+      // restart state store and verify recovered
+      restartStateStore();
+      secretManager.setNMStateStoreService(stateStore);
+      state = stateStore.loadAMRMProxyState();
+      assertEquals(state.getCurrentMasterKey(),
+          secretManager.getCurrentMasterKeyData().getMasterKey());
+      assertEquals(state.getNextMasterKey(), null);
+      assertEquals(state.getAppContexts().size(), 1);
+      // app2 only
+      map = state.getAppContexts().get(attemptId2);
+      assertNotEquals(map, null);
+      assertEquals(map.size(), 1);
+      assertTrue(Arrays.equals(map.get(key2), data2));
+    } finally {
+      secretManager.stop();
+    }
+  }
+
   private static class NMTokenSecretManagerForTest extends
       BaseNMTokenSecretManager {
     public MasterKey generateKey() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49aa60e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
index 77240c6..329d57e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
@@ -23,6 +23,7 @@ import java.io.IOException;
 import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.Collection;
+import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
@@ -938,9 +939,10 @@ public class MiniYARNCluster extends CompositeService {
     @Override
     protected void initializePipeline(ApplicationAttemptId applicationAttemptId,
         String user, Token<AMRMTokenIdentifier> amrmToken,
-        Token<AMRMTokenIdentifier> localToken) {
+        Token<AMRMTokenIdentifier> localToken,
+        Map<String, byte[]> recoveredDataMap, boolean isRecovery) {
       super.initializePipeline(applicationAttemptId, user, amrmToken,
-          localToken);
+          localToken, recoveredDataMap, isRecovery);
       RequestInterceptor rt = getPipelines()
           .get(applicationAttemptId.getApplicationId()).getRootInterceptor();
       // The DefaultRequestInterceptor will generally be the last


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] hadoop git commit: YARN-5892. Support user-specific minimum user limit percentage in Capacity Scheduler. Contributed by Eric Payne.

Posted by st...@apache.org.
YARN-5892. Support user-specific minimum user limit percentage in Capacity Scheduler. Contributed by Eric Payne.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca13b224
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca13b224
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca13b224

Branch: refs/heads/HADOOP-13345
Commit: ca13b224b2feb9c44de861da9cbba8dd2a12cb35
Parents: 68dc7c2
Author: Sunil G <su...@apache.org>
Authored: Thu Jun 22 23:50:57 2017 -0700
Committer: Sunil G <su...@apache.org>
Committed: Thu Jun 22 23:50:57 2017 -0700

----------------------------------------------------------------------
 .../resource/DefaultResourceCalculator.java     |   6 +
 .../resource/DominantResourceCalculator.java    |   8 +
 .../yarn/util/resource/ResourceCalculator.java  |  23 +++
 .../hadoop/yarn/util/resource/Resources.java    |   5 +
 .../scheduler/capacity/AbstractCSQueue.java     |  23 +++
 .../scheduler/capacity/CSQueue.java             |   7 +
 .../CapacitySchedulerConfiguration.java         |  34 ++++
 .../scheduler/capacity/LeafQueue.java           |  28 ++-
 .../scheduler/capacity/UserInfo.java            |  15 +-
 .../scheduler/capacity/UsersManager.java        | 172 +++++++++++++++----
 .../scheduler/common/fica/FiCaSchedulerApp.java |   4 +-
 .../webapp/CapacitySchedulerPage.java           |   9 +-
 .../scheduler/capacity/TestLeafQueue.java       | 123 +++++++++++++
 .../src/site/markdown/CapacityScheduler.md      |   1 +
 14 files changed, 415 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca13b224/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index 524a049..bdf60bd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -67,6 +67,12 @@ public class DefaultResourceCalculator extends ResourceCalculator {
   }
 
   @Override
+  public Resource divideAndCeil(Resource numerator, float denominator) {
+    return Resources.createResource(
+        divideAndCeil(numerator.getMemorySize(), denominator));
+  }
+
+  @Override
   public Resource normalize(Resource r, Resource minimumResource,
       Resource maximumResource, Resource stepFactor) {
     if (stepFactor.getMemorySize() == 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca13b224/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 69fe716..ea9b927 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -155,6 +155,14 @@ public class DominantResourceCalculator extends ResourceCalculator {
   }
 
   @Override
+  public Resource divideAndCeil(Resource numerator, float denominator) {
+    return Resources.createResource(
+        divideAndCeil(numerator.getMemorySize(), denominator),
+        divideAndCeil(numerator.getVirtualCores(), denominator)
+        );
+  }
+
+  @Override
   public Resource normalize(Resource r, Resource minimumResource,
                             Resource maximumResource, Resource stepFactor) {
     if (stepFactor.getMemorySize() == 0 || stepFactor.getVirtualCores() == 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca13b224/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
index d219fe1..398dac5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
@@ -65,6 +65,13 @@ public abstract class ResourceCalculator {
     }
     return (a + (b - 1)) / b;
   }
+
+  public static int divideAndCeil(int a, float b) {
+    if (b == 0) {
+      return 0;
+    }
+    return (int) Math.ceil(a / b);
+  }
   
   public static long divideAndCeil(long a, long b) {
     if (b == 0) {
@@ -73,6 +80,13 @@ public abstract class ResourceCalculator {
     return (a + (b - 1)) / b;
   }
 
+  public static long divideAndCeil(long a, float b) {
+    if (b == 0) {
+      return 0;
+    }
+    return (long) Math.ceil(a/b);
+  }
+
   public static int roundUp(int a, int b) {
     return divideAndCeil(a, b) * b;
   }
@@ -198,6 +212,15 @@ public abstract class ResourceCalculator {
    * @return resultant resource
    */
   public abstract Resource divideAndCeil(Resource numerator, int denominator);
+
+  /**
+   * Divide-and-ceil <code>numerator</code> by <code>denominator</code>.
+   *
+   * @param numerator numerator resource
+   * @param denominator denominator
+   * @return resultant resource
+   */
+  public abstract Resource divideAndCeil(Resource numerator, float denominator);
   
   /**
    * Check if a smaller resource can be contained by bigger resource.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca13b224/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index b2bb099..a1d14fd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -279,6 +279,11 @@ public class Resources {
       ResourceCalculator resourceCalculator, Resource lhs, int rhs) {
     return resourceCalculator.divideAndCeil(lhs, rhs);
   }
+
+  public static Resource divideAndCeil(
+      ResourceCalculator resourceCalculator, Resource lhs, float rhs) {
+    return resourceCalculator.divideAndCeil(lhs, rhs);
+  }
   
   public static boolean equals(Resource lhs, Resource rhs) {
     return lhs.equals(rhs);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca13b224/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 6c141a1..b69ec96 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -111,6 +111,7 @@ public abstract class AbstractCSQueue implements CSQueue {
   protected ReentrantReadWriteLock.WriteLock writeLock;
 
   volatile Priority priority = Priority.newInstance(0);
+  private Map<String, Float> userWeights = new HashMap<String, Float>();
 
   public AbstractCSQueue(CapacitySchedulerContext cs,
       String queueName, CSQueue parent, CSQueue old) throws IOException {
@@ -332,11 +333,28 @@ public abstract class AbstractCSQueue implements CSQueue {
 
       this.priority = csContext.getConfiguration().getQueuePriority(
           getQueuePath());
+
+      this.userWeights = getUserWeightsFromHierarchy();
     } finally {
       writeLock.unlock();
     }
   }
 
+  private Map<String, Float> getUserWeightsFromHierarchy() throws IOException {
+    Map<String, Float> unionInheritedWeights = new HashMap<String, Float>();
+    CSQueue parentQ = getParent();
+    if (parentQ != null) {
+      // Inherit all of parent's user's weights
+      unionInheritedWeights.putAll(parentQ.getUserWeights());
+    }
+    // Insert this queue's user's weights, overriding parent's user's weights if
+    // there is overlap.
+    CapacitySchedulerConfiguration csConf = csContext.getConfiguration();
+    unionInheritedWeights.putAll(
+        csConf.getAllUserWeightsForQueue(getQueuePath()));
+    return unionInheritedWeights;
+  }
+
   private void initializeQueueState(QueueState previousState,
       QueueState configuredState, QueueState parentState) {
     // verify that we can not any value for State other than RUNNING/STOPPED
@@ -956,4 +974,9 @@ public abstract class AbstractCSQueue implements CSQueue {
   public Priority getPriority() {
     return this.priority;
   }
+
+  @Override
+  public Map<String, Float> getUserWeights() {
+    return userWeights;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca13b224/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
index c6726ec..3a17d1b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 import java.io.IOException;
 import java.util.Collection;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
@@ -350,4 +351,10 @@ public interface CSQueue extends SchedulerQueue<CSQueue> {
    * @return queue priority
    */
   Priority getPriority();
+
+  /**
+   * Get a map of usernames and weights
+   * @return map of usernames and corresponding weight
+   */
+  Map<String, Float> getUserWeights();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca13b224/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index c3c9585..90a7e65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -108,6 +108,15 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
   public static final String USER_LIMIT_FACTOR = "user-limit-factor";
 
   @Private
+  public static final String USER_WEIGHT = "weight";
+
+  @Private
+  public static final String USER_SETTINGS = "user-settings";
+
+  @Private
+  public static final float DEFAULT_USER_WEIGHT = 1.0f;
+
+  @Private
   public static final String STATE = "state";
   
   @Private
@@ -1412,4 +1421,29 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
         QUEUE_PRIORITY_UTILIZATION_ORDERING_POLICY,
         UNDER_UTILIZED_PREEMPTION_MOVE_RESERVATION), allowMoveReservation);
   }
+
+  /**
+   * Get the weights of all users at this queue level from the configuration.
+   * Used in computing user-specific user limit, relative to other users.
+   * @param queuePath full queue path
+   * @return map of user weights, if they exists. Otherwise, return empty map.
+   */
+  public Map<String, Float> getAllUserWeightsForQueue(String queuePath) {
+    Map <String, Float> userWeights = new HashMap <String, Float>();
+    String qPathPlusPrefix =
+        getQueuePrefix(queuePath).replaceAll("\\.", "\\\\.")
+        + USER_SETTINGS + "\\.";
+    String weightKeyRegex =
+        qPathPlusPrefix + "\\w+\\." + USER_WEIGHT;
+    Map<String, String> props = getValByRegex(weightKeyRegex);
+    for (Entry<String, String> e : props.entrySet()) {
+      String userName =
+          e.getKey().replaceFirst(qPathPlusPrefix, "")
+          .replaceFirst("\\." + USER_WEIGHT, "");
+      if (userName != null && !userName.isEmpty()) {
+        userWeights.put(userName, new Float(e.getValue()));
+      }
+    }
+    return userWeights;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca13b224/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index eb2432e..013a5ac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
 import java.io.IOException;
 import java.util.*;
+import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.commons.lang.StringUtils;
@@ -237,6 +238,20 @@ public class LeafQueue extends AbstractCSQueue {
       defaultAppPriorityPerQueue = Priority.newInstance(
           conf.getDefaultApplicationPriorityConfPerQueue(getQueuePath()));
 
+      // Validate leaf queue's user's weights.
+      int queueUL = Math.min(100, conf.getUserLimit(getQueuePath()));
+      for (Entry<String, Float> e : getUserWeights().entrySet()) {
+        float val = e.getValue().floatValue();
+        if (val < 0.0f || val > (100.0f / queueUL)) {
+          throw new IOException("Weight (" + val + ") for user \"" + e.getKey()
+              + "\" must be between 0 and" + " 100 / " + queueUL + " (= " +
+              100.0f/queueUL + ", the number of concurrent active users in "
+              + getQueuePath() + ")");
+        }
+      }
+
+      usersManager.updateUserWeights();
+
       LOG.info(
           "Initializing " + queueName + "\n" + "capacity = " + queueCapacities
               .getCapacity() + " [= (float) configuredCapacity / 100 ]" + "\n"
@@ -619,11 +634,16 @@ public class LeafQueue extends AbstractCSQueue {
 
   @VisibleForTesting
   public Resource getUserAMResourceLimit() {
-     return getUserAMResourceLimitPerPartition(RMNodeLabelsManager.NO_LABEL);
+    return getUserAMResourceLimitPerPartition(RMNodeLabelsManager.NO_LABEL,
+         null);
   }
 
   public Resource getUserAMResourceLimitPerPartition(
-      String nodePartition) {
+      String nodePartition, String userName) {
+    float userWeight = 1.0f;
+    if (userName != null && getUser(userName) != null) {
+      userWeight = getUser(userName).getWeight();
+    }
     try {
       readLock.lock();
       /*
@@ -634,6 +654,7 @@ public class LeafQueue extends AbstractCSQueue {
        */
       float effectiveUserLimit = Math.max(usersManager.getUserLimit() / 100.0f,
           1.0f / Math.max(getAbstractUsersManager().getNumActiveUsers(), 1));
+      effectiveUserLimit = Math.min(effectiveUserLimit * userWeight, 1.0f);
 
       Resource queuePartitionResource = Resources
           .multiplyAndNormalizeUp(resourceCalculator,
@@ -774,7 +795,8 @@ public class LeafQueue extends AbstractCSQueue {
 
         // Verify whether we already calculated user-am-limit for this label.
         if (userAMLimit == null) {
-          userAMLimit = getUserAMResourceLimitPerPartition(partitionName);
+          userAMLimit = getUserAMResourceLimitPerPartition(partitionName,
+              application.getUser());
           userAmPartitionLimit.put(partitionName, userAMLimit);
         }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca13b224/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UserInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UserInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UserInfo.java
index ff9d304..a1a8ecf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UserInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UserInfo.java
@@ -37,11 +37,14 @@ public class UserInfo {
   protected ResourceInfo AMResourceUsed;
   protected ResourceInfo userResourceLimit;
   protected ResourcesInfo resources;
+  private float userWeight;
+  private boolean isActive;
 
   UserInfo() {}
 
   UserInfo(String username, Resource resUsed, int activeApps, int pendingApps,
-      Resource amResUsed, Resource resourceLimit, ResourceUsage resourceUsage) {
+      Resource amResUsed, Resource resourceLimit, ResourceUsage resourceUsage,
+      float weight, boolean isActive) {
     this.username = username;
     this.resourcesUsed = new ResourceInfo(resUsed);
     this.numActiveApplications = activeApps;
@@ -49,6 +52,8 @@ public class UserInfo {
     this.AMResourceUsed = new ResourceInfo(amResUsed);
     this.userResourceLimit = new ResourceInfo(resourceLimit);
     this.resources = new ResourcesInfo(resourceUsage);
+    this.userWeight = weight;
+    this.isActive = isActive;
   }
 
   public String getUsername() {
@@ -78,4 +83,12 @@ public class UserInfo {
   public ResourcesInfo getResourceUsageInfo() {
     return resources;
   }
+
+  public float getUserWeight() {
+    return userWeight;
+  }
+
+  public boolean getIsActive() {
+    return isActive;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca13b224/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
index 579c4c7..5f7d185 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
@@ -92,6 +92,9 @@ public class UsersManager implements AbstractUsersManager {
   Map<String, Map<SchedulingMode, Resource>> preComputedActiveUserLimit = new ConcurrentHashMap<>();
   Map<String, Map<SchedulingMode, Resource>> preComputedAllUserLimit = new ConcurrentHashMap<>();
 
+  private float activeUsersTimesWeights = 0.0f;
+  private float allUsersTimesWeights = 0.0f;
+
   /**
    * UsageRatios will store the total used resources ratio across all users of
    * the queue.
@@ -158,6 +161,7 @@ public class UsersManager implements AbstractUsersManager {
 
     private UsageRatios userUsageRatios = new UsageRatios();
     private WriteLock writeLock;
+    private float weight;
 
     public User(String name) {
       ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
@@ -262,6 +266,20 @@ public class UsersManager implements AbstractUsersManager {
     public void setResourceUsage(ResourceUsage resourceUsage) {
       this.userResourceUsage = resourceUsage;
     }
+
+    /**
+     * @return the weight
+     */
+    public float getWeight() {
+      return weight;
+    }
+
+    /**
+     * @param weight the weight to set
+     */
+    public void setWeight(float weight) {
+      this.weight = weight;
+    }
   } /* End of User class */
 
   /**
@@ -382,6 +400,8 @@ public class UsersManager implements AbstractUsersManager {
       // Remove user from active/non-active list as well.
       activeUsersSet.remove(userName);
       nonActiveUsersSet.remove(userName);
+      activeUsersTimesWeights = sumActiveUsersTimesWeights();
+      allUsersTimesWeights = sumAllUsersTimesWeights();
     } finally {
       writeLock.unlock();
     }
@@ -418,6 +438,8 @@ public class UsersManager implements AbstractUsersManager {
    */
   private void addUser(String userName, User user) {
     this.users.put(userName, user);
+    user.setWeight(getUserWeightFromQueue(userName));
+    allUsersTimesWeights = sumAllUsersTimesWeights();
   }
 
   /**
@@ -434,7 +456,8 @@ public class UsersManager implements AbstractUsersManager {
                 user.getActiveApplications(), user.getPendingApplications(),
                 Resources.clone(user.getConsumedAMResources()),
                 Resources.clone(user.getUserResourceLimit()),
-                user.getResourceUsage()));
+                user.getResourceUsage(), user.getWeight(),
+                activeUsersSet.contains(user.userName)));
       }
       return usersToReturn;
     } finally {
@@ -442,6 +465,11 @@ public class UsersManager implements AbstractUsersManager {
     }
   }
 
+  private float getUserWeightFromQueue(String userName) {
+    Float weight = lQueue.getUserWeights().get(userName);
+    return (weight == null) ? 1.0f : weight.floatValue();
+  }
+
   /**
    * Get computed user-limit for all ACTIVE users in this queue. If cached data
    * is invalidated due to resource change, this method also enforce to
@@ -480,13 +508,24 @@ public class UsersManager implements AbstractUsersManager {
       writeLock.unlock();
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("userLimit is fetched. userLimit = "
-          + userLimitPerSchedulingMode.get(schedulingMode) + ", schedulingMode="
-          + schedulingMode + ", partition=" + nodePartition);
+    Resource userLimitResource = userLimitPerSchedulingMode.get(schedulingMode);
+    User user = getUser(userName);
+    float weight = (user == null) ? 1.0f : user.getWeight();
+    Resource userSpecificUserLimit =
+        Resources.multiplyAndNormalizeDown(resourceCalculator,
+            userLimitResource, weight, lQueue.getMinimumAllocation());
+
+    if (user != null) {
+      user.setUserResourceLimit(userSpecificUserLimit);
     }
 
-    return userLimitPerSchedulingMode.get(schedulingMode);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("userLimit is fetched. userLimit=" + userLimitResource
+          + ", userSpecificUserLimit=" + userSpecificUserLimit
+          + ", schedulingMode=" + schedulingMode
+          + ", partition=" + nodePartition);
+    }
+    return userSpecificUserLimit;
   }
 
   /**
@@ -527,13 +566,21 @@ public class UsersManager implements AbstractUsersManager {
       writeLock.unlock();
     }
 
+    Resource userLimitResource = userLimitPerSchedulingMode.get(schedulingMode);
+    User user = getUser(userName);
+    float weight = (user == null) ? 1.0f : user.getWeight();
+    Resource userSpecificUserLimit =
+        Resources.multiplyAndNormalizeDown(resourceCalculator,
+            userLimitResource, weight, lQueue.getMinimumAllocation());
+
     if (LOG.isDebugEnabled()) {
-      LOG.debug("userLimit is fetched. userLimit = "
-          + userLimitPerSchedulingMode.get(schedulingMode) + ", schedulingMode="
-          + schedulingMode + ", partition=" + nodePartition);
+      LOG.debug("userLimit is fetched. userLimit=" + userLimitResource
+          + ", userSpecificUserLimit=" + userSpecificUserLimit
+          + ", schedulingMode=" + schedulingMode
+          + ", partition=" + nodePartition);
     }
 
-    return userLimitPerSchedulingMode.get(schedulingMode);
+    return userSpecificUserLimit;
   }
 
   /*
@@ -656,16 +703,19 @@ public class UsersManager implements AbstractUsersManager {
         queueCapacity, required);
 
     /*
-     * We want to base the userLimit calculation on max(queueCapacity,
-     * usedResources+required). However, we want usedResources to be based on
-     * the combined ratios of all the users in the queue so we use consumedRatio
-     * to calculate such. The calculation is dependent on how the
-     * resourceCalculator calculates the ratio between two Resources. DRF
-     * Example: If usedResources is greater than queueCapacity and users have
-     * the following [mem,cpu] usages: User1: [10%,20%] - Dominant resource is
-     * 20% User2: [30%,10%] - Dominant resource is 30% Then total consumedRatio
-     * is then 20+30=50%. Yes, this value can be larger than 100% but for the
-     * purposes of making sure all users are getting their fair share, it works.
+     * We want to base the userLimit calculation on
+     * max(queueCapacity, usedResources+required). However, we want
+     * usedResources to be based on the combined ratios of all the users in the
+     * queue so we use consumedRatio to calculate such.
+     * The calculation is dependent on how the resourceCalculator calculates the
+     * ratio between two Resources. DRF Example: If usedResources is greater
+     * than queueCapacity and users have the following [mem,cpu] usages:
+     *
+     * User1: [10%,20%] - Dominant resource is 20%
+     * User2: [30%,10%] - Dominant resource is 30%
+     * Then total consumedRatio is then 20+30=50%. Yes, this value can be
+     * larger than 100% but for the purposes of making sure all users are
+     * getting their fair share, it works.
      */
     Resource consumed = Resources.multiplyAndNormalizeUp(resourceCalculator,
         partitionResource, getUsageRatio(nodePartition),
@@ -680,23 +730,23 @@ public class UsersManager implements AbstractUsersManager {
      * capacity * user-limit-factor. Also, the queue's configured capacity
      * should be higher than queue-hard-limit * ulMin
      */
-    int usersCount = getNumActiveUsers();
+    float usersSummedByWeight = activeUsersTimesWeights;
     Resource resourceUsed = totalResUsageForActiveUsers.getUsed(nodePartition);
 
     // For non-activeUser calculation, consider all users count.
     if (!activeUser) {
       resourceUsed = currentCapacity;
-      usersCount = users.size();
+      usersSummedByWeight = allUsersTimesWeights;
     }
 
     /*
-     * User limit resource is determined by: max{currentCapacity / #activeUsers,
+     * User limit resource is determined by: max(currentCapacity / #activeUsers,
      * currentCapacity * user-limit-percentage%)
      */
     Resource userLimitResource = Resources.max(resourceCalculator,
         partitionResource,
         Resources.divideAndCeil(resourceCalculator, resourceUsed,
-            usersCount),
+            usersSummedByWeight),
         Resources.divideAndCeil(resourceCalculator,
             Resources.multiplyAndRoundDown(currentCapacity, getUserLimit()),
             100));
@@ -727,18 +777,26 @@ public class UsersManager implements AbstractUsersManager {
             lQueue.getMinimumAllocation());
 
     if (LOG.isDebugEnabled()) {
-      LOG.debug("User limit computation for " + userName + " in queue "
-          + lQueue.getQueueName() + " userLimitPercent=" + lQueue.getUserLimit()
-          + " userLimitFactor=" + lQueue.getUserLimitFactor() + " required: "
-          + required + " consumed: " + consumed + " user-limit-resource: "
-          + userLimitResource + " queueCapacity: " + queueCapacity
-          + " qconsumed: " + lQueue.getQueueResourceUsage().getUsed()
-          + " currentCapacity: " + currentCapacity + " activeUsers: "
-          + usersCount + " clusterCapacity: " + clusterResource
-          + " resourceByLabel: " + partitionResource + " usageratio: "
-          + getUsageRatio(nodePartition) + " Partition: " + nodePartition);
-    }
-    getUser(userName).setUserResourceLimit(userLimitResource);
+      LOG.debug("User limit computation for " + userName
+          + ",  in queue: " + lQueue.getQueueName()
+          + ",  userLimitPercent=" + lQueue.getUserLimit()
+          + ",  userLimitFactor=" + lQueue.getUserLimitFactor()
+          + ",  required=" + required
+          + ",  consumed=" + consumed
+          + ",  user-limit-resource=" + userLimitResource
+          + ",  queueCapacity=" + queueCapacity
+          + ",  qconsumed=" + lQueue.getQueueResourceUsage().getUsed()
+          + ",  currentCapacity=" + currentCapacity
+          + ",  activeUsers=" + usersSummedByWeight
+          + ",  clusterCapacity=" + clusterResource
+          + ",  resourceByLabel=" + partitionResource
+          + ",  usageratio=" + getUsageRatio(nodePartition)
+          + ",  Partition=" + nodePartition
+          + ",  resourceUsed=" + resourceUsed
+          + ",  maxUserLimit=" + maxUserLimit
+          + ",  userWeight=" + getUser(userName).getWeight()
+      );
+    }
     return userLimitResource;
   }
 
@@ -838,6 +896,32 @@ public class UsersManager implements AbstractUsersManager {
     return activeUsers.get();
   }
 
+  float sumActiveUsersTimesWeights() {
+    float count = 0.0f;
+    try {
+      this.readLock.lock();
+      for (String u : activeUsersSet) {
+        count += getUser(u).getWeight();
+      }
+      return count;
+    } finally {
+      this.readLock.unlock();
+    }
+  }
+
+  float sumAllUsersTimesWeights() {
+    float count = 0.0f;
+    try {
+      this.readLock.lock();
+      for (String u : users.keySet()) {
+        count += getUser(u).getWeight();
+      }
+      return count;
+    } finally {
+      this.readLock.unlock();
+    }
+  }
+
   private void updateActiveUsersResourceUsage(String userName) {
     try {
       this.writeLock.lock();
@@ -850,6 +934,7 @@ public class UsersManager implements AbstractUsersManager {
       if (nonActiveUsersSet.contains(userName)) {
         nonActiveUsersSet.remove(userName);
         activeUsersSet.add(userName);
+        activeUsersTimesWeights = sumActiveUsersTimesWeights();
 
         // Update total resource usage of active and non-active after user
         // is moved from non-active to active.
@@ -890,6 +975,7 @@ public class UsersManager implements AbstractUsersManager {
       if (activeUsersSet.contains(userName)) {
         activeUsersSet.remove(userName);
         nonActiveUsersSet.add(userName);
+        activeUsersTimesWeights = sumActiveUsersTimesWeights();
 
         // Update total resource usage of active and non-active after user is
         // moved from active to non-active.
@@ -990,4 +1076,18 @@ public class UsersManager implements AbstractUsersManager {
               + totalResUsageForNonActiveUsers.getAllUsed());
     }
   }
+
+  public void updateUserWeights() {
+    try {
+      this.writeLock.lock();
+      for (Map.Entry<String, User> ue : users.entrySet()) {
+        ue.getValue().setWeight(getUserWeightFromQueue(ue.getKey()));
+      }
+      activeUsersTimesWeights = sumActiveUsersTimesWeights();
+      allUsersTimesWeights = sumAllUsersTimesWeights();
+      userLimitNeedsRecompute();
+    } finally {
+      this.writeLock.unlock();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca13b224/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 331585e..ad4c8ce 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -881,8 +881,8 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
         .append(queue.getAMResourceLimitPerPartition(appAMNodePartitionName));
     diagnosticMessage.append("; ");
     diagnosticMessage.append("User AM Resource Limit of the queue = ");
-    diagnosticMessage.append(
-        queue.getUserAMResourceLimitPerPartition(appAMNodePartitionName));
+    diagnosticMessage.append(queue.getUserAMResourceLimitPerPartition(
+        appAMNodePartitionName, getUser()));
     diagnosticMessage.append("; ");
     diagnosticMessage.append("Queue AM Resource Usage = ");
     diagnosticMessage.append(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca13b224/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index b972428..292c5f3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -68,6 +68,7 @@ class CapacitySchedulerPage extends RmView {
       "left:0%;background:none;border:1px dashed #BFBFBF";
   static final String Q_OVER = "background:#FFA333";
   static final String Q_UNDER = "background:#5BD75B";
+  static final String ACTIVE_USER = "background:#FFFF00"; // Yellow highlight
 
   @RequestScoped
   static class CSQInfo {
@@ -209,6 +210,7 @@ class CapacitySchedulerPage extends RmView {
           html.table("#userinfo").thead().$class("ui-widget-header").tr().th()
               .$class("ui-state-default")._("User Name")._().th()
               .$class("ui-state-default")._("Max Resource")._().th()
+              .$class("ui-state-default")._("Weight")._().th()
               .$class("ui-state-default")._("Used Resource")._().th()
               .$class("ui-state-default")._("Max AM Resource")._().th()
               .$class("ui-state-default")._("Used AM Resource")._().th()
@@ -229,8 +231,11 @@ class CapacitySchedulerPage extends RmView {
         ResourceInfo amUsed = (resourceUsages.getAmUsed() == null)
             ? new ResourceInfo(Resources.none())
             : resourceUsages.getAmUsed();
-        tbody.tr().td(userInfo.getUsername())
+        String highlightIfAsking =
+            userInfo.getIsActive() ? ACTIVE_USER : null;
+        tbody.tr().$style(highlightIfAsking).td(userInfo.getUsername())
             .td(userInfo.getUserResourceLimit().toString())
+            .td(String.valueOf(userInfo.getUserWeight()))
             .td(resourcesUsed.toString())
             .td(resourceUsages.getAMLimit().toString())
             .td(amUsed.toString())
@@ -399,6 +404,8 @@ class CapacitySchedulerPage extends RmView {
               _("Used (over capacity)")._().
             span().$class("qlegend ui-corner-all ui-state-default").
               _("Max Capacity")._().
+            span().$class("qlegend ui-corner-all").$style(ACTIVE_USER).
+            _("Users Requesting Resources")._().
           _();
 
         float used = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca13b224/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index a9ed5a9..4417132 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -955,7 +955,130 @@ public class TestLeafQueue {
     // app_0 doesn't have outstanding resources, there's only one active user.
     assertEquals("There should only be 1 active user!", 
         1, a.getAbstractUsersManager().getNumActiveUsers());
+  }
+
+  @Test
+  public void testUserSpecificUserLimits() throws Exception {
+    // Mock the queue
+    LeafQueue a = stubLeafQueue((LeafQueue)queues.get(A));
+    // Set minimum-user-limit-percent for queue "a" in the configs.
+    csConf.setUserLimit(a.getQueuePath(), 50);
+    // Set weight for "user_0" to be 1.5 for the a queue in the configs.
+    csConf.setFloat("yarn.scheduler.capacity." + a.getQueuePath()
+        + ".user-settings.user_0." + CapacitySchedulerConfiguration.USER_WEIGHT,
+        1.5f);
+
+    when(csContext.getClusterResource())
+        .thenReturn(Resources.createResource(16 * GB, 32));
+    // Verify that configs were updated and parsed correctly.
+    Assert.assertNull(a.getUserWeights().get("user_0"));
+    a.reinitialize(a, csContext.getClusterResource());
+    assertEquals(1.5, a.getUserWeights().get("user_0").floatValue(), 0.0);
+
+    // set maxCapacity
+    a.setMaxCapacity(1.0f);
+
+    // Set minimum user-limit-percent
+    a.setUserLimit(50);
+    a.setUserLimitFactor(2);
+
+    // Users
+    final String user_0 = "user_0";
+    final String user_1 = "user_1";
+
+    // Set user_0's weight to 1.5 in the a queue's object.
+    a.getUsersManager().getUserAndAddIfAbsent(user_0).setWeight(1.5f);
+
+    // Submit applications
+    final ApplicationAttemptId appAttemptId_0 =
+        TestUtils.getMockApplicationAttemptId(0, 0);
+    FiCaSchedulerApp app_0 =
+        new FiCaSchedulerApp(appAttemptId_0, user_0, a,
+            a.getAbstractUsersManager(), spyRMContext);
+    a.submitApplicationAttempt(app_0, user_0);
+
+    final ApplicationAttemptId appAttemptId_1 =
+        TestUtils.getMockApplicationAttemptId(1, 0);
+    FiCaSchedulerApp app_1 =
+        new FiCaSchedulerApp(appAttemptId_1, user_1, a,
+            a.getAbstractUsersManager(), spyRMContext);
+    a.submitApplicationAttempt(app_1, user_1); // different user
+
+    // Setup some nodes
+    String host_0 = "127.0.0.1";
+    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB);
+    String host_1 = "127.0.0.2";
+    FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8*GB);
+
+    final int numNodes = 2;
+    Resource clusterResource =
+        Resources.createResource(numNodes * (8*GB), numNodes * 16);
+    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
+
+    // Setup resource-requests
+    // app_0 asks for 3 3-GB containers
+    Priority priority = TestUtils.createMockPriority(1);
+    app_0.updateResourceRequests(Collections.singletonList(
+            TestUtils.createResourceRequest(ResourceRequest.ANY, 4*GB, 3, true,
+                priority, recordFactory)));
+
+    // app_1 asks for 2 1-GB containers
+    app_1.updateResourceRequests(Collections.singletonList(
+        TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 2, true,
+            priority, recordFactory)));
+
+    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(
+        app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(),
+        app_1);
+    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(),
+        node_0, node_1.getNodeID(), node_1);
+
+    /**
+     * Start testing...
+     */
+
+    // There're two active users
+    assertEquals(2, a.getAbstractUsersManager().getNumActiveUsers());
+
+    // 1 container to user_0. Since queue starts out empty, user limit would
+    // normally be calculated to be the minumum container size (1024GB).
+    // However, in this case, user_0 has a weight of 1.5, so the UL is 2048GB
+    // because 1024 * 1.5 rounded up to container size is 2048GB.
+    applyCSAssignment(clusterResource,
+        a.assignContainers(clusterResource, node_0,
+        new ResourceLimits(clusterResource),
+        SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
+    assertEquals(4*GB, a.getUsedResources().getMemorySize());
+    assertEquals(4*GB, app_0.getCurrentConsumption().getMemorySize());
+    assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize());
+
+    // At this point the queue-wide user limit is 3072GB, but since user_0 has a
+    // weight of 1.5, its user limit is 5120GB. So, even though user_0 already
+    // has 4096GB, it is under its user limit, so it gets another container.
+    applyCSAssignment(clusterResource,
+        a.assignContainers(clusterResource, node_0,
+        new ResourceLimits(clusterResource),
+        SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
+    assertEquals(8*GB, a.getUsedResources().getMemorySize());
+    assertEquals(8*GB, app_0.getCurrentConsumption().getMemorySize());
+    assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize());
+
+    // Queue-wide user limit at this point is 4069GB and user_0's user limit is
+    // 6144GB. user_0 has 8192GB.
+    // Now that user_0 is above its user limit, the next container should go to user_1
+    applyCSAssignment(clusterResource,
+        a.assignContainers(clusterResource, node_1,
+        new ResourceLimits(clusterResource),
+        SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
+    assertEquals(9*GB, a.getUsedResources().getMemorySize());
+    assertEquals(8*GB, app_0.getCurrentConsumption().getMemorySize());
+    assertEquals(1*GB, app_1.getCurrentConsumption().getMemorySize());
+
+    assertEquals(4*GB,
+        app_0.getTotalPendingRequestsPerPartition().get("").getMemorySize());
 
+    assertEquals(1*GB,
+        app_1.getTotalPendingRequestsPerPartition().get("").getMemorySize());
   }
 
   @SuppressWarnings({ "unchecked", "rawtypes" })

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca13b224/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index 737bdc2..f1d4535 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -124,6 +124,7 @@ Configuration
 | `yarn.scheduler.capacity.<queue-path>.user-limit-factor` | The multiple of the queue capacity which can be configured to allow a single user to acquire more resources. By default this is set to 1 which ensures that a single user can never take more than the queue's configured capacity irrespective of how idle the cluster is. Value is specified as a float. |
 | `yarn.scheduler.capacity.<queue-path>.maximum-allocation-mb` | The per queue maximum limit of memory to allocate to each container request at the Resource Manager. This setting overrides the cluster configuration `yarn.scheduler.maximum-allocation-mb`. This value must be smaller than or equal to the cluster maximum. |
 | `yarn.scheduler.capacity.<queue-path>.maximum-allocation-vcores` | The per queue maximum limit of virtual cores to allocate to each container request at the Resource Manager. This setting overrides the cluster configuration `yarn.scheduler.maximum-allocation-vcores`. This value must be smaller than or equal to the cluster maximum. |
+| `yarn.scheduler.capacity.<queue-path>.user-settings.<user-name>.weight` | This floating point value is used when calculating the user limit resource values for users in a queue. This value will weight each user more or less than the other users in the queue. For example, if user A should receive 50% more resources in a queue than users B and C, this property will be set to 1.5 for user A.  Users B and C will default to 1.0. |
 
   * Running and Pending Application Limits
   


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] hadoop git commit: HADOOP-14547. [WASB] the configured retry policy is not used for all storage operations. Contributed by Thomas.

Posted by st...@apache.org.
HADOOP-14547. [WASB] the configured retry policy is not used for all storage operations.
Contributed by Thomas.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6bd73c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6bd73c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6bd73c6

Branch: refs/heads/HADOOP-13345
Commit: c6bd73c6c5760c3a52203e6a51628587ceec6896
Parents: 0b77262
Author: Steve Loughran <st...@apache.org>
Authored: Fri Jun 23 21:38:41 2017 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Fri Jun 23 21:38:41 2017 +0100

----------------------------------------------------------------------
 .../fs/azure/AzureNativeFileSystemStore.java    | 11 ++-----
 .../fs/azure/SecureStorageInterfaceImpl.java    | 30 +++++++++++++++-----
 .../hadoop/fs/azure/StorageInterfaceImpl.java   | 28 ++++++++++++++----
 3 files changed, 49 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6bd73c6/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 534919e..f76b44a 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -481,6 +481,9 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
       }
     }
 
+    // Configure Azure storage session.
+    configureAzureStorageSession();
+
     // Start an Azure storage session.
     //
     createAzureStorageSession();
@@ -792,9 +795,6 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
     // Accessing the storage server unauthenticated using
     // anonymous credentials.
     isAnonymousCredentials = true;
-
-    // Configure Azure storage session.
-    configureAzureStorageSession();
   }
 
   private void connectUsingCredentials(String accountName,
@@ -820,9 +820,6 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
 
     // Can only create container if using account key credentials
     canCreateOrModifyContainer = credentials instanceof StorageCredentialsAccountAndKey;
-
-    // Configure Azure storage session.
-    configureAzureStorageSession();
   }
 
   /**
@@ -848,8 +845,6 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
     rootDirectory = container.getDirectoryReference("");
 
     canCreateOrModifyContainer = true;
-
-    configureAzureStorageSession();
     tolerateOobAppends = false;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6bd73c6/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java
index 650149a..810aacf 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java
@@ -69,6 +69,8 @@ public class SecureStorageInterfaceImpl extends StorageInterface {
   public static final String SAS_ERROR_CODE = "SAS Error";
   private SASKeyGeneratorInterface sasKeyGenerator;
   private String storageAccount;
+  private RetryPolicyFactory retryPolicy;
+  private int timeoutIntervalInMs;
 
   public SecureStorageInterfaceImpl(boolean useLocalSASKeyMode,
       Configuration conf) throws SecureModeException {
@@ -90,10 +92,12 @@ public class SecureStorageInterfaceImpl extends StorageInterface {
 
   @Override
   public void setTimeoutInMs(int timeoutInMs) {
+    timeoutIntervalInMs = timeoutInMs;
   }
 
   @Override
   public void setRetryPolicyFactory(RetryPolicyFactory retryPolicyFactory) {
+    retryPolicy = retryPolicyFactory;
   }
 
   @Override
@@ -133,9 +137,15 @@ public class SecureStorageInterfaceImpl extends StorageInterface {
       throws URISyntaxException, StorageException {
 
     try {
-      return new SASCloudBlobContainerWrapperImpl(storageAccount,
-          new CloudBlobContainer(sasKeyGenerator.getContainerSASUri(
-              storageAccount, name)), sasKeyGenerator);
+      CloudBlobContainer container = new CloudBlobContainer(sasKeyGenerator.getContainerSASUri(
+          storageAccount, name));
+      if (retryPolicy != null) {
+        container.getServiceClient().getDefaultRequestOptions().setRetryPolicyFactory(retryPolicy);
+      }
+      if (timeoutIntervalInMs > 0) {
+        container.getServiceClient().getDefaultRequestOptions().setTimeoutIntervalInMs(timeoutIntervalInMs);
+      }
+      return new SASCloudBlobContainerWrapperImpl(storageAccount, container, sasKeyGenerator);
     } catch (SASKeyGenerationException sasEx) {
       String errorMsg = "Encountered SASKeyGeneration exception while "
           + "generating SAS Key for container : " + name
@@ -216,9 +226,12 @@ public class SecureStorageInterfaceImpl extends StorageInterface {
     public CloudBlobWrapper getBlockBlobReference(String relativePath)
         throws URISyntaxException, StorageException {
       try {
+        CloudBlockBlob blob = new CloudBlockBlob(sasKeyGenerator.getRelativeBlobSASUri(
+                storageAccount, getName(), relativePath));
+        blob.getServiceClient().setDefaultRequestOptions(
+                container.getServiceClient().getDefaultRequestOptions());
         return new SASCloudBlockBlobWrapperImpl(
-            new CloudBlockBlob(sasKeyGenerator.getRelativeBlobSASUri(
-                storageAccount, getName(), relativePath)));
+                blob);
       } catch (SASKeyGenerationException sasEx) {
         String errorMsg = "Encountered SASKeyGeneration exception while "
             + "generating SAS Key for relativePath : " + relativePath
@@ -232,9 +245,12 @@ public class SecureStorageInterfaceImpl extends StorageInterface {
     public CloudBlobWrapper getPageBlobReference(String relativePath)
         throws URISyntaxException, StorageException {
       try {
+        CloudPageBlob blob = new CloudPageBlob(sasKeyGenerator.getRelativeBlobSASUri(
+                storageAccount, getName(), relativePath));
+        blob.getServiceClient().setDefaultRequestOptions(
+                container.getServiceClient().getDefaultRequestOptions());
         return new SASCloudPageBlobWrapperImpl(
-            new CloudPageBlob(sasKeyGenerator.getRelativeBlobSASUri(
-                storageAccount, getName(), relativePath)));
+                blob);
       } catch (SASKeyGenerationException sasEx) {
         String errorMsg = "Encountered SASKeyGeneration exception while "
             + "generating SAS Key for relativePath : " + relativePath

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6bd73c6/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
index 367cd04..491a0d0 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
@@ -60,32 +60,50 @@ import com.microsoft.azure.storage.blob.PageRange;
 @InterfaceAudience.Private
 class StorageInterfaceImpl extends StorageInterface {
   private CloudBlobClient serviceClient;
+  private RetryPolicyFactory retryPolicyFactory;
+  private int timeoutIntervalInMs;
+
+  private void updateRetryPolicy() {
+    if (serviceClient != null && retryPolicyFactory != null) {
+      serviceClient.getDefaultRequestOptions().setRetryPolicyFactory(retryPolicyFactory);
+    }
+  }
+
+  private void updateTimeoutInMs() {
+    if (serviceClient != null && timeoutIntervalInMs > 0) {
+      serviceClient.getDefaultRequestOptions().setTimeoutIntervalInMs(timeoutIntervalInMs);
+    }
+  }
 
   @Override
   public void setRetryPolicyFactory(final RetryPolicyFactory retryPolicyFactory) {
-    serviceClient.getDefaultRequestOptions().setRetryPolicyFactory(
-            retryPolicyFactory);
+    this.retryPolicyFactory = retryPolicyFactory;
+    updateRetryPolicy();
   }
 
   @Override
   public void setTimeoutInMs(int timeoutInMs) {
-    serviceClient.getDefaultRequestOptions().setTimeoutIntervalInMs(
-            timeoutInMs);
+    timeoutIntervalInMs = timeoutInMs;
+    updateTimeoutInMs();
   }
 
   @Override
   public void createBlobClient(CloudStorageAccount account) {
     serviceClient = account.createCloudBlobClient();
+    updateRetryPolicy();
+    updateTimeoutInMs();
   }
 
   @Override
   public void createBlobClient(URI baseUri) {
-    serviceClient = new CloudBlobClient(baseUri);
+    createBlobClient(baseUri, (StorageCredentials)null);
   }
 
   @Override
   public void createBlobClient(URI baseUri, StorageCredentials credentials) {
     serviceClient = new CloudBlobClient(baseUri, credentials);
+    updateRetryPolicy();
+    updateTimeoutInMs();
   }
 
   @Override


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] hadoop git commit: HDFS-12010. TestCopyPreserveFlag fails consistently because of mismatch in access time. Contributed by Mukul Kumar Singh.

Posted by st...@apache.org.
HDFS-12010. TestCopyPreserveFlag fails consistently because of mismatch in access time. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ceaac18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ceaac18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ceaac18

Branch: refs/heads/HADOOP-13345
Commit: 8ceaac1877556794bd8ae3573c632f5ca9414472
Parents: 6d116ff
Author: Arpit Agarwal <ar...@apache.org>
Authored: Thu Jun 22 15:42:50 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Thu Jun 22 15:42:50 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java    | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ceaac18/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
index 85e3dda..47dc601 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
@@ -76,10 +76,10 @@ public class TestCopyPreserveFlag {
         output.writeChar('\n');
     }
     output.close();
-    fs.setTimes(FROM, MODIFICATION_TIME, ACCESS_TIME);
     fs.setPermission(FROM, PERMISSIONS);
-    fs.setTimes(DIR_FROM, MODIFICATION_TIME, ACCESS_TIME);
+    fs.setTimes(FROM, MODIFICATION_TIME, ACCESS_TIME);
     fs.setPermission(DIR_FROM, PERMISSIONS);
+    fs.setTimes(DIR_FROM, MODIFICATION_TIME, ACCESS_TIME);
   }
 
   @After


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] hadoop git commit: HADOOP-8143. Change distcp to have -pb on by default. Contributed by Mithun Radhakrishnan

Posted by st...@apache.org.
HADOOP-8143. Change distcp to have -pb on by default. Contributed by Mithun Radhakrishnan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd65eea7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd65eea7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd65eea7

Branch: refs/heads/HADOOP-13345
Commit: dd65eea74b1f9dde858ff34df8111e5340115511
Parents: 8c0769d
Author: Jason Lowe <jl...@yahoo-inc.com>
Authored: Tue Jun 20 09:53:47 2017 -0500
Committer: Jason Lowe <jl...@yahoo-inc.com>
Committed: Tue Jun 20 09:53:47 2017 -0500

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/tools/OptionsParser.java      | 3 +++
 .../src/test/java/org/apache/hadoop/tools/TestOptionsParser.java  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd65eea7/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
index 96fb1d9..2bfaccf 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
@@ -187,6 +187,9 @@ public class OptionsParser {
     if (command.hasOption(DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) {
       builder.preserve(
           getVal(command, DistCpOptionSwitch.PRESERVE_STATUS.getSwitch()));
+    } else {
+      // No "preserve" settings specified. Preserve block-size.
+      builder.preserve(DistCpOptions.FileAttribute.BLOCKSIZE);
     }
 
     if (command.hasOption(DistCpOptionSwitch.FILE_LIMIT.getSwitch())) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd65eea7/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
index e7fdc51..6928cdf 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
@@ -413,7 +413,7 @@ public class TestOptionsParser {
         "-f",
         "hdfs://localhost:9820/source/first",
         "hdfs://localhost:9820/target/"});
-    Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE));
+    Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] hadoop git commit: HDFS-11943. [Erasure coding] Warn log frequently print to screen in doEncode/doDecode functions. Contributed by liaoyuxiangqin

Posted by st...@apache.org.
HDFS-11943. [Erasure coding] Warn log frequently print to screen in doEncode/doDecode functions. Contributed by liaoyuxiangqin


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/379f19a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/379f19a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/379f19a2

Branch: refs/heads/HADOOP-13345
Commit: 379f19a2c768ac3cf668ad28ce6419ca56a01b07
Parents: a594f96
Author: Kai Zheng <ka...@intel.com>
Authored: Mon Jun 26 16:26:09 2017 +0800
Committer: Kai Zheng <ka...@intel.com>
Committed: Mon Jun 26 16:26:09 2017 +0800

----------------------------------------------------------------------
 .../io/erasurecode/rawcoder/AbstractNativeRawDecoder.java    | 8 +++++++-
 .../io/erasurecode/rawcoder/AbstractNativeRawEncoder.java    | 8 +++++++-
 2 files changed, 14 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/379f19a2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
index 55edb64..d6aa8bd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.io.erasurecode.rawcoder;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
+import org.apache.hadoop.util.PerformanceAdvisory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -66,7 +67,7 @@ abstract class AbstractNativeRawDecoder extends RawErasureDecoder {
 
   @Override
   protected void doDecode(ByteArrayDecodingState decodingState) {
-    LOG.warn("convertToByteBufferState is invoked, " +
+    PerformanceAdvisory.LOG.debug("convertToByteBufferState is invoked, " +
         "not efficiently. Please use direct ByteBuffer inputs/outputs");
 
     ByteBufferDecodingState bbdState = decodingState.convertToByteBufferState();
@@ -78,6 +79,11 @@ abstract class AbstractNativeRawDecoder extends RawErasureDecoder {
     }
   }
 
+  @Override
+  public boolean preferDirectBuffer() {
+    return true;
+  }
+
   // To link with the underlying data structure in the native layer.
   // No get/set as only used by native codes.
   private long nativeCoder;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/379f19a2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawEncoder.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawEncoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawEncoder.java
index b2c7016..21805fe 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawEncoder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawEncoder.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.io.erasurecode.rawcoder;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
+import org.apache.hadoop.util.PerformanceAdvisory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -63,7 +64,7 @@ abstract class AbstractNativeRawEncoder extends RawErasureEncoder {
 
   @Override
   protected void doEncode(ByteArrayEncodingState encodingState) {
-    LOG.warn("convertToByteBufferState is invoked, " +
+    PerformanceAdvisory.LOG.debug("convertToByteBufferState is invoked, " +
         "not efficiently. Please use direct ByteBuffer inputs/outputs");
 
     ByteBufferEncodingState bbeState = encodingState.convertToByteBufferState();
@@ -75,6 +76,11 @@ abstract class AbstractNativeRawEncoder extends RawErasureEncoder {
     }
   }
 
+  @Override
+  public boolean preferDirectBuffer() {
+    return true;
+  }
+
   // To link with the underlying data structure in the native layer.
   // No get/set as only used by native codes.
   private long nativeCoder;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] hadoop git commit: HDFS-11067. DFS#listStatusIterator(..) should throw FileNotFoundException if the directory deleted before fetching next batch of entries. Contributed by Vinayakumar B.

Posted by st...@apache.org.
HDFS-11067. DFS#listStatusIterator(..) should throw FileNotFoundException if the directory deleted before fetching next batch of entries. Contributed by Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8dbd53ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8dbd53ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8dbd53ef

Branch: refs/heads/HADOOP-13345
Commit: 8dbd53ef9f34e3e05b159e4f5378e9c2c52c59c5
Parents: b649519
Author: Vinayakumar B <vi...@apache.org>
Authored: Thu Jun 22 17:35:40 2017 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Thu Jun 22 17:37:08 2017 +0530

----------------------------------------------------------------------
 .../src/site/markdown/filesystem/filesystem.md  |  4 +++
 .../main/java/org/apache/hadoop/fs/Hdfs.java    |  2 +-
 .../hadoop/hdfs/DistributedFileSystem.java      |  2 +-
 .../org/apache/hadoop/hdfs/TestFileStatus.java  | 27 ++++++++++++++++++--
 4 files changed, 31 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dbd53ef/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
index b464941..b56666c 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
@@ -1185,6 +1185,10 @@ on (possibly remote) filesystems. These filesystems are invariably accessed
 concurrently; the state of the filesystem MAY change between a `hasNext()`
 probe and the invocation of the `next()` call.
 
+During iteration through a `RemoteIterator`, if the directory is deleted on
+remote filesystem, then `hasNext()` or `next()` call may throw
+`FileNotFoundException`.
+
 Accordingly, a robust iteration through a `RemoteIterator` would catch and
 discard `NoSuchElementException` exceptions raised during the process, which
 could be done through the `while(true)` iteration example above, or

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dbd53ef/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java
index 645f1ad..cd870ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java
@@ -232,7 +232,7 @@ public class Hdfs extends AbstractFileSystem {
         thisListing = dfs.listPaths(src, thisListing.getLastName(),
             needLocation);
         if (thisListing == null) {
-          return false; // the directory is deleted
+          throw new FileNotFoundException("File " + src + " does not exist.");
         }
         i = 0;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dbd53ef/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 3e09804..f8af4ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -1168,7 +1168,7 @@ public class DistributedFileSystem extends FileSystem {
             needLocation);
         statistics.incrementReadOps(1);
         if (thisListing == null) {
-          return false;
+          throw new FileNotFoundException("File " + p + " does not exist.");
         }
         i = 0;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dbd53ef/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
index c74bb63..31007dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
@@ -317,8 +317,31 @@ public class TestFileStatus {
     assertEquals(file3.toString(), itor.next().getPath().toString());
 
     assertFalse(itor.hasNext());
-      
 
-    fs.delete(dir, true);
+    itor = fs.listStatusIterator(dir);
+    assertEquals(dir3.toString(), itor.next().getPath().toString());
+    assertEquals(dir4.toString(), itor.next().getPath().toString());
+    fs.delete(dir.getParent(), true);
+    try {
+      itor.hasNext();
+      fail("FileNotFoundException expected");
+    } catch (FileNotFoundException fnfe) {
+    }
+
+    fs.mkdirs(file2);
+    fs.mkdirs(dir3);
+    fs.mkdirs(dir4);
+    fs.mkdirs(dir5);
+    itor = fs.listStatusIterator(dir);
+    int count = 0;
+    try {
+      fs.delete(dir.getParent(), true);
+      while (itor.next() != null) {
+        count++;
+      }
+      fail("FileNotFoundException expected");
+    } catch (FileNotFoundException fnfe) {
+    }
+    assertEquals(2, count);
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] hadoop git commit: HADOOP-14533. Size of args cannot be less than zero in TraceAdmin#run as its linkedlist. Contributed by Weisen Han.

Posted by st...@apache.org.
HADOOP-14533. Size of args cannot be less than zero in TraceAdmin#run as its linkedlist. Contributed by Weisen Han.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e9daa2e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e9daa2e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e9daa2e

Branch: refs/heads/HADOOP-13345
Commit: 2e9daa2e27536484846d9c5f020c6afaf27932ce
Parents: 099dfe9
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Tue Jun 20 20:44:31 2017 +0800
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Tue Jun 20 20:44:31 2017 +0800

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/tracing/TraceAdmin.java  | 2 +-
 .../test/java/org/apache/hadoop/tracing/TestTraceAdmin.java  | 8 ++++++++
 2 files changed, 9 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e9daa2e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
index 038435c..766fb0a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
@@ -166,7 +166,7 @@ public class TraceAdmin extends Configured implements Tool {
       System.err.println("You must specify a host with -host.");
       return 1;
     }
-    if (args.size() < 0) {
+    if (args.isEmpty()) {
       System.err.println("You must specify an operation.");
       return 1;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e9daa2e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
index ad20310..5a4d2a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
@@ -78,6 +78,14 @@ public class TestTraceAdmin extends SaslDataTransferTestCase {
   }
 
   @Test
+  public void testNoOperator() throws Exception {
+    TraceAdmin trace = new TraceAdmin();
+    trace.setConf(new Configuration());
+    Assert.assertEquals("ret:1, You must specify an operation." + NEWLINE,
+        runTraceCommand(trace, "-host", "127.0.0.1:12346"));
+  }
+
+  @Test
   public void testCreateAndDestroySpanReceiver() throws Exception {
     Configuration conf = new Configuration();
     conf = new Configuration();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] hadoop git commit: HADOOP-14461 Azure: handle failure gracefully in case of missing account access key. Contributed by Mingliang Liu.

Posted by st...@apache.org.
HADOOP-14461 Azure: handle failure gracefully in case of missing account access key.
Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48f4a229
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48f4a229
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48f4a229

Branch: refs/heads/HADOOP-13345
Commit: 48f4a229a93b65df9e99df2505719dd3538f1188
Parents: 379f19a
Author: Steve Loughran <st...@apache.org>
Authored: Mon Jun 26 13:39:47 2017 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Mon Jun 26 13:39:47 2017 +0100

----------------------------------------------------------------------
 .../fs/azure/AzureNativeFileSystemStore.java    | 42 ++++++++++----------
 .../fs/azure/AzureBlobStorageTestAccount.java   | 37 ++++++++++-------
 .../TestAzureFileSystemErrorConditions.java     | 13 +++---
 ...TestFileSystemOperationExceptionMessage.java | 13 +++---
 4 files changed, 59 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/48f4a229/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index f76b44a..3fa1a62 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -303,6 +303,14 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
   private boolean useLocalSasKeyMode = false;
 
   private String delegationToken;
+
+  /** The error message template when container is not accessible. */
+  static final String NO_ACCESS_TO_CONTAINER_MSG = "No credentials found for "
+      + "account %s in the configuration, and its container %s is not "
+      + "accessible using anonymous credentials. Please check if the container "
+      + "exists first. If it is not publicly available, you have to provide "
+      + "account credentials.";
+
   /**
    * A test hook interface that can modify the operation context we use for
    * Azure Storage operations, e.g. to inject errors.
@@ -778,18 +786,17 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
     rootDirectory = container.getDirectoryReference("");
 
     // Check for container existence, and our ability to access it.
+    boolean canAccess;
     try {
-      if (!container.exists(getInstrumentedContext())) {
-        throw new AzureException("Container " + containerName + " in account "
-            + accountName + " not found, and we can't create"
-            + " it using anoynomous credentials, and no credentials found for them"
-            + " in the configuration.");
-      }
+      canAccess = container.exists(getInstrumentedContext());
     } catch (StorageException ex) {
-      throw new AzureException("Unable to access container " + containerName
-          + " in account " + accountName
-          + " using anonymous credentials, and no credentials found for them "
-          + " in the configuration.", ex);
+      LOG.error("Service returned StorageException when checking existence "
+          + "of container {} in account {}", containerName, accountName, ex);
+      canAccess = false;
+    }
+    if (!canAccess) {
+      throw new AzureException(String.format(NO_ACCESS_TO_CONTAINER_MSG,
+          accountName, containerName));
     }
 
     // Accessing the storage server unauthenticated using
@@ -999,22 +1006,17 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
       // Check whether the account is configured with an account key.
       propertyValue = getAccountKeyFromConfiguration(accountName,
           sessionConfiguration);
-      if (propertyValue != null) {
-
+      if (StringUtils.isNotEmpty(propertyValue)) {
         // Account key was found.
         // Create the Azure storage session using the account key and container.
         connectUsingConnectionStringCredentials(
             getAccountFromAuthority(sessionUri),
             getContainerFromAuthority(sessionUri), propertyValue);
-
-        // Return to caller
-        return;
+      } else {
+        LOG.debug("The account access key is not configured for {}. "
+            + "Now try anonymous access.", sessionUri);
+        connectUsingAnonymousCredentials(sessionUri);
       }
-
-      // The account access is not configured for this cluster. Try anonymous
-      // access.
-      connectUsingAnonymousCredentials(sessionUri);
-
     } catch (Exception e) {
       // Caught exception while attempting to initialize the Azure File
       // System store, re-throw the exception.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48f4a229/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
index 5f66fd2f..b6c252f 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AzureBlobStorageTestAccount.java
@@ -22,6 +22,9 @@ import com.microsoft.azure.storage.*;
 import com.microsoft.azure.storage.blob.*;
 import com.microsoft.azure.storage.core.Base64;
 import org.apache.commons.configuration2.SubsetConfiguration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.azure.metrics.AzureFileSystemInstrumentation;
@@ -46,6 +49,8 @@ import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_LOCA
  * for instructions on how to connect to a real Azure Storage account.
  */
 public final class AzureBlobStorageTestAccount {
+  private static final Logger LOG = LoggerFactory.getLogger(
+      AzureBlobStorageTestAccount.class);
 
   private static final String ACCOUNT_KEY_PROPERTY_NAME = "fs.azure.account.key.";
   private static final String SAS_PROPERTY_NAME = "fs.azure.sas.";
@@ -299,10 +304,9 @@ public final class AzureBlobStorageTestAccount {
     Configuration conf = createTestConfiguration();
     if (!conf.getBoolean(USE_EMULATOR_PROPERTY_NAME, false)) {
       // Not configured to test against the storage emulator.
-      System.out
-        .println("Skipping emulator Azure test because configuration " +
-            "doesn't indicate that it's running." +
-            " Please see RunningLiveWasbTests.txt for guidance.");
+      LOG.warn("Skipping emulator Azure test because configuration doesn't "
+          + "indicate that it's running. Please see RunningLiveWasbTests.txt "
+          + "for guidance.");
       return null;
     }
     CloudStorageAccount account =
@@ -456,18 +460,22 @@ public final class AzureBlobStorageTestAccount {
       KeyProviderException {
     String accountKey = AzureNativeFileSystemStore
         .getAccountKeyFromConfiguration(accountName, conf);
-    StorageCredentials credentials;
-    if (accountKey == null && allowAnonymous) {
-      credentials = StorageCredentialsAnonymous.ANONYMOUS;
+    final StorageCredentials credentials;
+    if (accountKey == null) {
+      if (allowAnonymous) {
+        credentials = StorageCredentialsAnonymous.ANONYMOUS;
+      } else {
+        LOG.warn("Skipping live Azure test because of missing key for"
+            + " account '" + accountName + "'. "
+            + "Please see RunningLiveWasbTests.txt for guidance.");
+        return null;
+      }
     } else {
       credentials = new StorageCredentialsAccountAndKey(
           accountName.split("\\.")[0], accountKey);
     }
-    if (credentials == null) {
-      return null;
-    } else {
-      return new CloudStorageAccount(credentials);
-    }
+
+    return new CloudStorageAccount(credentials);
   }
 
   public static Configuration createTestConfiguration() {
@@ -493,9 +501,8 @@ public final class AzureBlobStorageTestAccount {
       throws URISyntaxException, KeyProviderException {
     String testAccountName = conf.get(TEST_ACCOUNT_NAME_PROPERTY_NAME);
     if (testAccountName == null) {
-      System.out
-        .println("Skipping live Azure test because of missing test account." +
-                 " Please see RunningLiveWasbTests.txt for guidance.");
+      LOG.warn("Skipping live Azure test because of missing test account. "
+          + "Please see RunningLiveWasbTests.txt for guidance.");
       return null;
     }
     return createStorageAccount(testAccountName, conf, false);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48f4a229/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
index 810bcf7..c985224 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestAzureFileSystemErrorConditions.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.fs.azure;
 
+import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.NO_ACCESS_TO_CONTAINER_MSG;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -35,6 +36,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext;
+import org.apache.hadoop.test.GenericTestUtils;
+
 import org.junit.Test;
 
 import com.microsoft.azure.storage.OperationContext;
@@ -64,18 +67,18 @@ public class TestAzureFileSystemErrorConditions {
    */
   @Test
   public void testAccessUnauthorizedPublicContainer() throws Exception {
+    final String container = "nonExistentContainer";
+    final String account = "hopefullyNonExistentAccount";
     Path noAccessPath = new Path(
-        "wasb://nonExistentContainer@hopefullyNonExistentAccount/someFile");
+        "wasb://" + container + "@" + account + "/someFile");
     NativeAzureFileSystem.suppressRetryPolicy();
     try {
       FileSystem.get(noAccessPath.toUri(), new Configuration())
         .open(noAccessPath);
       assertTrue("Should've thrown.", false);
     } catch (AzureException ex) {
-      assertTrue("Unexpected message in exception " + ex,
-          ex.getMessage().contains(
-          "Unable to access container nonExistentContainer in account" +
-          " hopefullyNonExistentAccount"));
+      GenericTestUtils.assertExceptionContains(
+          String.format(NO_ACCESS_TO_CONTAINER_MSG, account, container), ex);
     } finally {
       NativeAzureFileSystem.resumeRetryPolicy();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48f4a229/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionMessage.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionMessage.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionMessage.java
index 57920a4..e619817 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionMessage.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionMessage.java
@@ -21,9 +21,13 @@ import java.net.URI;
 import java.util.UUID;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.test.GenericTestUtils;
+
 import org.junit.Assert;
 import org.junit.Test;
 
+import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.NO_ACCESS_TO_CONTAINER_MSG;
+
 
 public class TestFileSystemOperationExceptionMessage extends
   NativeAzureFileSystemBaseTest {
@@ -41,11 +45,6 @@ public class TestFileSystemOperationExceptionMessage extends
     String wasbUri = String.format("wasb://%s@%s",
         testContainer, testStorageAccount);
 
-    String expectedErrorMessage =
-        String.format("Container %s in account %s not found, and we can't create it "
-            + "using anoynomous credentials, and no credentials found for "
-            + "them in the configuration.", testContainer, testStorageAccount);
-
     fs = new NativeAzureFileSystem();
     try {
       fs.initialize(new URI(wasbUri), conf);
@@ -63,7 +62,9 @@ public class TestFileSystemOperationExceptionMessage extends
             || exceptionMessage.length() == 0) {
           Assert.fail();}
         else {
-          Assert.assertTrue(exceptionMessage.equals(expectedErrorMessage));
+          GenericTestUtils.assertExceptionContains(String.format(
+              NO_ACCESS_TO_CONTAINER_MSG, testStorageAccount, testContainer),
+              ex);
         }
       } else {
         Assert.fail();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] hadoop git commit: YARN-6713. Fix dead link in the Javadoc of FairSchedulerEventLog.java. Contributed by Weiwei Yang.

Posted by st...@apache.org.
YARN-6713. Fix dead link in the Javadoc of FairSchedulerEventLog.java. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/099dfe92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/099dfe92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/099dfe92

Branch: refs/heads/HADOOP-13345
Commit: 099dfe92a9743bcb7e27506f8877b8b23b0ff95a
Parents: 2c76916
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Jun 20 17:20:27 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Jun 20 17:20:27 2017 +0900

----------------------------------------------------------------------
 .../resourcemanager/scheduler/fair/FairSchedulerEventLog.java    | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/099dfe92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerEventLog.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerEventLog.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerEventLog.java
index 05ba330..803ce78 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerEventLog.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerEventLog.java
@@ -48,8 +48,8 @@ import org.apache.log4j.spi.LoggingEvent;
  * JobTracker's main log.
  *
  * Constructing this class creates a disabled log. It must be initialized
- * using {@link FairSchedulerEventLog#init(Configuration, String)} to begin
- * writing to the file.
+ * using {@link FairSchedulerEventLog#init(FairSchedulerConfiguration)} to
+ * begin writing to the file.
  */
 @Private
 @Unstable


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] hadoop git commit: HADOOP-14395. Provide Builder pattern for DistributedFileSystem.append. Contributed by Lei (Eddy) Xu.

Posted by st...@apache.org.
HADOOP-14395. Provide Builder pattern for DistributedFileSystem.append. Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6460df21
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6460df21
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6460df21

Branch: refs/heads/HADOOP-13345
Commit: 6460df21a09a7fcc29eceb8dc3859d6298da6882
Parents: 82bbcbf
Author: Lei Xu <le...@apache.org>
Authored: Fri Jun 16 17:24:00 2017 -0700
Committer: Lei Xu <le...@apache.org>
Committed: Fri Jun 16 17:24:00 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/FileSystem.java   | 10 +++++
 .../hadoop/hdfs/DistributedFileSystem.java      | 41 ++++++++++++++-----
 .../hadoop/hdfs/TestDistributedFileSystem.java  | 43 +++++++++++++++++++-
 3 files changed, 82 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6460df21/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index cc92f31..d7cd7dd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -4179,4 +4179,14 @@ public abstract class FileSystem extends Configured implements Closeable {
     return new FileSystemDataOutputStreamBuilder(this, path)
         .create().overwrite(true);
   }
+
+  /**
+   * Create a Builder to append a file.
+   * @param path file path.
+   * @return a {@link FSDataOutputStreamBuilder} to build file append request.
+   */
+  @InterfaceAudience.Private
+  protected FSDataOutputStreamBuilder appendFile(Path path) {
+    return new FileSystemDataOutputStreamBuilder(this, path).append();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6460df21/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 1fd8f79..1a9ae48 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -30,6 +30,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -2734,7 +2735,7 @@ public class DistributedFileSystem extends FileSystem {
    */
   public static final class HdfsDataOutputStreamBuilder
       extends FSDataOutputStreamBuilder<
-      HdfsDataOutputStream, HdfsDataOutputStreamBuilder> {
+      FSDataOutputStream, HdfsDataOutputStreamBuilder> {
     private final DistributedFileSystem dfs;
     private InetSocketAddress[] favoredNodes = null;
     private String ecPolicyName = null;
@@ -2857,17 +2858,24 @@ public class DistributedFileSystem extends FileSystem {
      * @throws IOException on I/O errors.
      */
     @Override
-    public HdfsDataOutputStream build() throws IOException {
-      if (isRecursive()) {
-        return dfs.create(getPath(), getPermission(), getFlags(),
-            getBufferSize(), getReplication(), getBlockSize(),
-            getProgress(), getChecksumOpt(), getFavoredNodes(),
-            getEcPolicyName());
-      } else {
-        return dfs.createNonRecursive(getPath(), getPermission(), getFlags(),
-            getBufferSize(), getReplication(), getBlockSize(), getProgress(),
-            getChecksumOpt(), getFavoredNodes(), getEcPolicyName());
+    public FSDataOutputStream build() throws IOException {
+      if (getFlags().contains(CreateFlag.CREATE)) {
+        if (isRecursive()) {
+          return dfs.create(getPath(), getPermission(), getFlags(),
+              getBufferSize(), getReplication(), getBlockSize(),
+              getProgress(), getChecksumOpt(), getFavoredNodes(),
+              getEcPolicyName());
+        } else {
+          return dfs.createNonRecursive(getPath(), getPermission(), getFlags(),
+              getBufferSize(), getReplication(), getBlockSize(), getProgress(),
+              getChecksumOpt(), getFavoredNodes(), getEcPolicyName());
+        }
+      } else if (getFlags().contains(CreateFlag.APPEND)) {
+        return dfs.append(getPath(), getFlags(), getBufferSize(), getProgress(),
+            getFavoredNodes());
       }
+      throw new HadoopIllegalArgumentException(
+          "Must specify either create or append");
     }
   }
 
@@ -2896,4 +2904,15 @@ public class DistributedFileSystem extends FileSystem {
   public RemoteIterator<OpenFileEntry> listOpenFiles() throws IOException {
     return dfs.listOpenFiles();
   }
+
+  /**
+   * Create a {@link HdfsDataOutputStreamBuilder} to append a file on DFS.
+   *
+   * @param path file path.
+   * @return A {@link HdfsDataOutputStreamBuilder} for appending a file.
+   */
+  @Override
+  public HdfsDataOutputStreamBuilder appendFile(Path path) {
+    return new HdfsDataOutputStreamBuilder(this, path).append();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6460df21/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 9857735..4479419 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -50,6 +50,7 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.atomic.AtomicReference;
 
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -1445,7 +1446,7 @@ public class TestDistributedFileSystem {
   }
 
   @Test
-  public void testDFSDataOutputStreamBuilder() throws Exception {
+  public void testDFSDataOutputStreamBuilderForCreation() throws Exception {
     Configuration conf = getTestConfiguration();
     String testFile = "/testDFSDataOutputStreamBuilder";
     Path testFilePath = new Path(testFile);
@@ -1453,6 +1454,11 @@ public class TestDistributedFileSystem {
         .numDataNodes(1).build()) {
       DistributedFileSystem fs = cluster.getFileSystem();
 
+      // Before calling build(), no change was made in the file system
+      HdfsDataOutputStreamBuilder builder = fs.createFile(testFilePath)
+          .blockSize(4096).replication((short)1);
+      assertFalse(fs.exists(testFilePath));
+
       // Test create an empty file
       try (FSDataOutputStream out =
                fs.createFile(testFilePath).build()) {
@@ -1497,4 +1503,39 @@ public class TestDistributedFileSystem {
           fs.exists(new Path("/parent")));
     }
   }
+
+  @Test
+  public void testDFSDataOutputStreamBuilderForAppend() throws IOException {
+    Configuration conf = getTestConfiguration();
+    String testFile = "/testDFSDataOutputStreamBuilderForAppend";
+    Path path = new Path(testFile);
+    Random random = new Random();
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(1).build()) {
+      DistributedFileSystem fs = cluster.getFileSystem();
+
+      byte[] buf = new byte[16];
+      random.nextBytes(buf);
+
+      try (FSDataOutputStream out = fs.appendFile(path).build()) {
+        out.write(buf);
+        fail("should fail on appending to non-existent file");
+      } catch (IOException e) {
+        GenericTestUtils.assertExceptionContains("non-existent", e);
+      }
+
+      random.nextBytes(buf);
+      try (FSDataOutputStream out = fs.createFile(path).build()) {
+        out.write(buf);
+      }
+
+      random.nextBytes(buf);
+      try (FSDataOutputStream out = fs.appendFile(path).build()) {
+        out.write(buf);
+      }
+
+      FileStatus status = fs.getFileStatus(path);
+      assertEquals(16 * 2, status.getLen());
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] hadoop git commit: HADOOP-12940. Fix warnings from Spotbugs in hadoop-common.

Posted by st...@apache.org.
HADOOP-12940. Fix warnings from Spotbugs in hadoop-common.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/092ebdf8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/092ebdf8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/092ebdf8

Branch: refs/heads/HADOOP-13345
Commit: 092ebdf885468a2bf79cbfb168286b7cddc4a0db
Parents: 8ceaac1
Author: Akira Ajisaka <aa...@apache.org>
Authored: Fri Jun 23 10:28:58 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Fri Jun 23 10:28:58 2017 +0900

----------------------------------------------------------------------
 .../MultiSchemeAuthenticationHandler.java       | 11 +++++----
 .../dev-support/findbugsExcludeFile.xml         | 26 ++++++++++++++++++++
 .../java/org/apache/hadoop/fs/FileUtil.java     |  6 +++--
 .../apache/hadoop/fs/RawLocalFileSystem.java    | 17 +++++++------
 .../hadoop/fs/shell/CommandWithDestination.java |  2 +-
 .../org/apache/hadoop/io/DoubleWritable.java    |  4 +--
 .../org/apache/hadoop/io/FloatWritable.java     |  6 ++---
 .../main/java/org/apache/hadoop/io/IOUtils.java |  9 ++++---
 .../apache/hadoop/io/erasurecode/ECSchema.java  |  4 +--
 .../org/apache/hadoop/io/file/tfile/Utils.java  |  2 +-
 .../ZKDelegationTokenSecretManager.java         |  8 ++----
 .../org/apache/hadoop/util/SysInfoWindows.java  |  6 ++---
 .../java/org/apache/hadoop/minikdc/MiniKdc.java |  7 ++++--
 13 files changed, 70 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/092ebdf8/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/MultiSchemeAuthenticationHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/MultiSchemeAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/MultiSchemeAuthenticationHandler.java
index aa49100..58a0adb 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/MultiSchemeAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/MultiSchemeAuthenticationHandler.java
@@ -186,11 +186,12 @@ public class MultiSchemeAuthenticationHandler implements
     String authorization =
         request.getHeader(HttpConstants.AUTHORIZATION_HEADER);
     if (authorization != null) {
-      for (String scheme : schemeToAuthHandlerMapping.keySet()) {
-        if (AuthenticationHandlerUtil.matchAuthScheme(scheme, authorization)) {
-          AuthenticationHandler handler =
-              schemeToAuthHandlerMapping.get(scheme);
-          AuthenticationToken token = handler.authenticate(request, response);
+      for (Map.Entry<String, AuthenticationHandler> entry :
+          schemeToAuthHandlerMapping.entrySet()) {
+        if (AuthenticationHandlerUtil.matchAuthScheme(
+            entry.getKey(), authorization)) {
+          AuthenticationToken token =
+              entry.getValue().authenticate(request, response);
           logger.trace("Token generated with type {}", token.getType());
           return token;
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/092ebdf8/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index cdd88f3..de76afb 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -416,4 +416,30 @@
     <Method name="toString"/>
     <Bug pattern="DM_DEFAULT_ENCODING"/>
   </Match>
+
+  <!-- We need to make the methods public because PBHelperClient calls them. -->
+  <Match>
+    <Class name="org.apache.hadoop.crypto.CipherSuite"/>
+    <Method name="setUnknownValue"/>
+    <Bug pattern="ME_ENUM_FIELD_SETTER"/>
+  </Match>
+  <Match>
+    <Class name="org.apache.hadoop.crypto.CryptoProtocolVersion"/>
+    <Method name="setUnknownValue"/>
+    <Bug pattern="ME_ENUM_FIELD_SETTER"/>
+  </Match>
+
+  <!-- We need to make the method public for testing. -->
+  <Match>
+    <Class name="org.apache.hadoop.metrics2.lib.DefaultMetricsSystem"/>
+    <Method name="setMiniClusterMode"/>
+    <Bug pattern="ME_ENUM_FIELD_SETTER"/>
+  </Match>
+
+  <!-- Experimental interface. Ignore. -->
+  <Match>
+    <Class name="org.apache.hadoop.metrics2.lib.DefaultMetricsFactory"/>
+    <Method name="setInstance"/>
+    <Bug pattern="ME_ENUM_FIELD_SETTER"/>
+  </Match>
 </FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/092ebdf8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 0b27a7e..b656a87 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -115,8 +115,10 @@ public class FileUtil {
     file.deleteOnExit();
     if (file.isDirectory()) {
       File[] files = file.listFiles();
-      for (File child : files) {
-        fullyDeleteOnExit(child);
+      if (files != null) {
+        for (File child : files) {
+          fullyDeleteOnExit(child);
+        }
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/092ebdf8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index e4e0659..721d0c0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -384,13 +384,16 @@ public class RawLocalFileSystem extends FileSystem {
     // again.
     try {
       FileStatus sdst = this.getFileStatus(dst);
-      if (sdst.isDirectory() && dstFile.list().length == 0) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Deleting empty destination and renaming " + src + " to " +
-              dst);
-        }
-        if (this.delete(dst, false) && srcFile.renameTo(dstFile)) {
-          return true;
+      String[] dstFileList = dstFile.list();
+      if (dstFileList != null) {
+        if (sdst.isDirectory() && dstFileList.length == 0) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Deleting empty destination and renaming " + src +
+                " to " + dst);
+          }
+          if (this.delete(dst, false) && srcFile.renameTo(dstFile)) {
+            return true;
+          }
         }
       }
     } catch (FileNotFoundException ignored) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/092ebdf8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
index 578d22b..2a483c0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
@@ -501,7 +501,7 @@ abstract class CommandWithDestination extends FsCommand {
                         createFlags,
                         getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
                             IO_FILE_BUFFER_SIZE_DEFAULT),
-                        lazyPersist ? 1 : getDefaultReplication(item.path),
+                        (short) 1,
                         getDefaultBlockSize(),
                         null,
                         null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/092ebdf8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DoubleWritable.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DoubleWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DoubleWritable.java
index 5cc326f..f45ed0a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DoubleWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DoubleWritable.java
@@ -75,7 +75,7 @@ public class DoubleWritable implements WritableComparable<DoubleWritable> {
   
   @Override
   public int compareTo(DoubleWritable o) {
-    return (value < o.value ? -1 : (value == o.value ? 0 : 1));
+    return Double.compare(value, o.value);
   }
   
   @Override
@@ -94,7 +94,7 @@ public class DoubleWritable implements WritableComparable<DoubleWritable> {
                        byte[] b2, int s2, int l2) {
       double thisValue = readDouble(b1, s1);
       double thatValue = readDouble(b2, s2);
-      return (thisValue < thatValue ? -1 : (thisValue == thatValue ? 0 : 1));
+      return Double.compare(thisValue, thatValue);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/092ebdf8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FloatWritable.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FloatWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FloatWritable.java
index 21e4cc4..367fc94 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FloatWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FloatWritable.java
@@ -66,9 +66,7 @@ public class FloatWritable implements WritableComparable<FloatWritable> {
   /** Compares two FloatWritables. */
   @Override
   public int compareTo(FloatWritable o) {
-    float thisValue = this.value;
-    float thatValue = o.value;
-    return (thisValue<thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
+    return Float.compare(value, o.value);
   }
 
   @Override
@@ -86,7 +84,7 @@ public class FloatWritable implements WritableComparable<FloatWritable> {
                        byte[] b2, int s2, int l2) {
       float thisValue = readFloat(b1, s1);
       float thatValue = readFloat(b2, s2);
-      return (thisValue<thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
+      return Float.compare(thisValue, thatValue);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/092ebdf8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index 459014b..e24f196 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -371,9 +371,12 @@ public class IOUtils {
     try (DirectoryStream<Path> stream =
              Files.newDirectoryStream(dir.toPath())) {
       for (Path entry: stream) {
-        String fileName = entry.getFileName().toString();
-        if ((filter == null) || filter.accept(dir, fileName)) {
-          list.add(fileName);
+        Path fileName = entry.getFileName();
+        if (fileName != null) {
+          String fileNameStr = fileName.toString();
+          if ((filter == null) || filter.accept(dir, fileNameStr)) {
+            list.add(fileNameStr);
+          }
         }
       }
     } catch (DirectoryIteratorException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/092ebdf8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index e55fbdd..4d66019 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -189,8 +189,8 @@ public final class ECSchema {
     sb.append((extraOptions.isEmpty() ? "" : ", "));
 
     int i = 0;
-    for (String opt : extraOptions.keySet()) {
-      sb.append(opt + "=" + extraOptions.get(opt) +
+    for (Map.Entry<String, String> entry : extraOptions.entrySet()) {
+      sb.append(entry.getKey() + "=" + entry.getValue() +
           (++i < extraOptions.size() ? ", " : ""));
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/092ebdf8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Utils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Utils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Utils.java
index 5743c66..8cb6e0d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Utils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Utils.java
@@ -395,7 +395,7 @@ public final class Utils {
 
     @Override
     public int hashCode() {
-      return (major << 16 + minor);
+      return (major << 16) + minor;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/092ebdf8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
index 4a7ddb2..88bd29b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
@@ -881,11 +881,9 @@ public abstract class ZKDelegationTokenSecretManager<TokenIdent extends Abstract
     String nodeCreatePath =
         getNodePath(ZK_DTSM_TOKENS_ROOT, DELEGATION_TOKEN_PREFIX
             + ident.getSequenceNumber());
-    ByteArrayOutputStream tokenOs = new ByteArrayOutputStream();
-    DataOutputStream tokenOut = new DataOutputStream(tokenOs);
-    ByteArrayOutputStream seqOs = new ByteArrayOutputStream();
 
-    try {
+    try (ByteArrayOutputStream tokenOs = new ByteArrayOutputStream();
+         DataOutputStream tokenOut = new DataOutputStream(tokenOs)) {
       ident.write(tokenOut);
       tokenOut.writeLong(info.getRenewDate());
       tokenOut.writeInt(info.getPassword().length);
@@ -902,8 +900,6 @@ public abstract class ZKDelegationTokenSecretManager<TokenIdent extends Abstract
         zkClient.create().withMode(CreateMode.PERSISTENT)
             .forPath(nodeCreatePath, tokenOs.toByteArray());
       }
-    } finally {
-      seqOs.close();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/092ebdf8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
index 8b4ea54..bce2d6d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
@@ -169,7 +169,7 @@ public class SysInfoWindows extends SysInfo {
 
   /** {@inheritDoc} */
   @Override
-  public int getNumProcessors() {
+  public synchronized int getNumProcessors() {
     refreshIfNeeded();
     return numProcessors;
   }
@@ -196,7 +196,7 @@ public class SysInfoWindows extends SysInfo {
 
   /** {@inheritDoc} */
   @Override
-  public float getCpuUsagePercentage() {
+  public synchronized float getCpuUsagePercentage() {
     refreshIfNeeded();
     float ret = cpuUsage;
     if (ret != -1) {
@@ -207,7 +207,7 @@ public class SysInfoWindows extends SysInfo {
 
   /** {@inheritDoc} */
   @Override
-  public float getNumVCoresUsed() {
+  public synchronized float getNumVCoresUsed() {
     refreshIfNeeded();
     float ret = cpuUsage;
     if (ret != -1) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/092ebdf8/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
index fe194c0..6f91018 100644
--- a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
+++ b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
@@ -365,8 +365,11 @@ public class MiniKdc {
         LOG.warn("WARNING: cannot delete file " + f.getAbsolutePath());
       }
     } else {
-      for (File c: f.listFiles()) {
-        delete(c);
+      File[] fileList = f.listFiles();
+      if (fileList != null) {
+        for (File c : fileList) {
+          delete(c);
+        }
       }
       if (! f.delete()) {
         LOG.warn("WARNING: cannot delete directory " + f.getAbsolutePath());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] hadoop git commit: HDFS-11992. Replace commons-logging APIs with slf4j in FsDatasetImpl. Contributed by hu xiaodong.

Posted by st...@apache.org.
HDFS-11992. Replace commons-logging APIs with slf4j in FsDatasetImpl. Contributed by hu xiaodong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a598479
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a598479
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a598479

Branch: refs/heads/HADOOP-13345
Commit: 1a598479a9faec787706bcf924dfbd88a80e1b82
Parents: 5157f6c
Author: Akira Ajisaka <aa...@apache.org>
Authored: Wed Jun 21 11:19:48 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed Jun 21 11:19:48 2017 +0900

----------------------------------------------------------------------
 .../java/org/apache/hadoop/util/InstrumentedLock.java   | 12 ++++++------
 .../org/apache/hadoop/util/InstrumentedReadLock.java    |  6 +++---
 .../apache/hadoop/util/InstrumentedReadWriteLock.java   |  4 ++--
 .../org/apache/hadoop/util/InstrumentedWriteLock.java   |  6 +++---
 .../org/apache/hadoop/util/TestInstrumentedLock.java    |  6 +++---
 .../hadoop/util/TestInstrumentedReadWriteLock.java      |  7 ++++---
 .../server/datanode/fsdataset/impl/BlockPoolSlice.java  |  3 ++-
 .../server/datanode/fsdataset/impl/FsDatasetImpl.java   |  6 +++---
 8 files changed, 26 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a598479/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java
index 0520271..2c1f591 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java
@@ -25,9 +25,9 @@ import java.util.concurrent.locks.ReentrantLock;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.commons.logging.Log;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
 
 /**
  * This is a debugging class that can be used by callers to track
@@ -44,7 +44,7 @@ import com.google.common.annotations.VisibleForTesting;
 public class InstrumentedLock implements Lock {
 
   private final Lock lock;
-  private final Log logger;
+  private final Logger logger;
   private final String name;
   private final Timer clock;
 
@@ -70,20 +70,20 @@ public class InstrumentedLock implements Lock {
    * @param lockWarningThresholdMs the time threshold to view lock held
    *                               time as being "too long"
    */
-  public InstrumentedLock(String name, Log logger, long minLoggingGapMs,
-      long lockWarningThresholdMs) {
+  public InstrumentedLock(String name, Logger logger, long minLoggingGapMs,
+                          long lockWarningThresholdMs) {
     this(name, logger, new ReentrantLock(),
         minLoggingGapMs, lockWarningThresholdMs);
   }
 
-  public InstrumentedLock(String name, Log logger, Lock lock,
+  public InstrumentedLock(String name, Logger logger, Lock lock,
       long minLoggingGapMs, long lockWarningThresholdMs) {
     this(name, logger, lock,
         minLoggingGapMs, lockWarningThresholdMs, new Timer());
   }
 
   @VisibleForTesting
-  InstrumentedLock(String name, Log logger, Lock lock,
+  InstrumentedLock(String name, Logger logger, Lock lock,
       long minLoggingGapMs, long lockWarningThresholdMs, Timer clock) {
     this.name = name;
     this.lock = lock;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a598479/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java
index 09fd43e..e115718 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java
@@ -19,11 +19,11 @@ package org.apache.hadoop.util;
 
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
-import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
 
 /**
  * This is a wrap class of a <tt>ReadLock</tt>.
@@ -51,7 +51,7 @@ public class InstrumentedReadLock extends InstrumentedLock {
     };
   };
 
-  public InstrumentedReadLock(String name, Log logger,
+  public InstrumentedReadLock(String name, Logger logger,
       ReentrantReadWriteLock readWriteLock,
       long minLoggingGapMs, long lockWarningThresholdMs) {
     this(name, logger, readWriteLock, minLoggingGapMs, lockWarningThresholdMs,
@@ -59,7 +59,7 @@ public class InstrumentedReadLock extends InstrumentedLock {
   }
 
   @VisibleForTesting
-  InstrumentedReadLock(String name, Log logger,
+  InstrumentedReadLock(String name, Logger logger,
       ReentrantReadWriteLock readWriteLock,
       long minLoggingGapMs, long lockWarningThresholdMs, Timer clock) {
     super(name, logger, readWriteLock.readLock(), minLoggingGapMs,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a598479/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadWriteLock.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadWriteLock.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadWriteLock.java
index 62e6b09..a410524 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadWriteLock.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadWriteLock.java
@@ -21,9 +21,9 @@ import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
-import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
 
 /**
  * This is a wrap class of a {@link ReentrantReadWriteLock}.
@@ -37,7 +37,7 @@ public class InstrumentedReadWriteLock implements ReadWriteLock {
   private final Lock readLock;
   private final Lock writeLock;
 
-  InstrumentedReadWriteLock(boolean fair, String name, Log logger,
+  InstrumentedReadWriteLock(boolean fair, String name, Logger logger,
       long minLoggingGapMs, long lockWarningThresholdMs) {
     ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(fair);
     readLock = new InstrumentedReadLock(name, logger, readWriteLock,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a598479/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedWriteLock.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedWriteLock.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedWriteLock.java
index 9208c1b..6842166 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedWriteLock.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedWriteLock.java
@@ -19,11 +19,11 @@ package org.apache.hadoop.util;
 
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
-import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
 
 /**
  * This is a wrap class of a <tt>WriteLock</tt>.
@@ -37,7 +37,7 @@ import com.google.common.annotations.VisibleForTesting;
 @InterfaceStability.Unstable
 public class InstrumentedWriteLock extends InstrumentedLock {
 
-  public InstrumentedWriteLock(String name, Log logger,
+  public InstrumentedWriteLock(String name, Logger logger,
       ReentrantReadWriteLock readWriteLock,
       long minLoggingGapMs, long lockWarningThresholdMs) {
     this(name, logger, readWriteLock, minLoggingGapMs, lockWarningThresholdMs,
@@ -45,7 +45,7 @@ public class InstrumentedWriteLock extends InstrumentedLock {
   }
 
   @VisibleForTesting
-  InstrumentedWriteLock(String name, Log logger,
+  InstrumentedWriteLock(String name, Logger logger,
       ReentrantReadWriteLock readWriteLock,
       long minLoggingGapMs, long lockWarningThresholdMs, Timer clock) {
     super(name, logger, readWriteLock.writeLock(), minLoggingGapMs,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a598479/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java
index d3f6912..44158ec 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java
@@ -21,8 +21,8 @@ import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.locks.Lock;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TestName;
@@ -34,7 +34,7 @@ import static org.junit.Assert.*;
  */
 public class TestInstrumentedLock {
 
-  static final Log LOG = LogFactory.getLog(TestInstrumentedLock.class);
+  static final Logger LOG = LoggerFactory.getLogger(TestInstrumentedLock.class);
 
   @Rule public TestName name = new TestName();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a598479/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java
index eeefa88..3e1a88b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java
@@ -24,18 +24,19 @@ import static org.junit.Assert.assertTrue;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TestName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A test class for InstrumentedReadLock and InstrumentedWriteLock.
  */
 public class TestInstrumentedReadWriteLock {
 
-  static final Log LOG = LogFactory.getLog(TestInstrumentedReadWriteLock.class);
+  static final Logger LOG = LoggerFactory.getLogger(
+          TestInstrumentedReadWriteLock.class);
 
   @Rule
   public TestName name = new TestName();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a598479/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index c17ef36..6b1ee3d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -740,7 +740,8 @@ class BlockPoolSlice {
         }
       }
     } catch (IOException e) {
-      FsDatasetImpl.LOG.warn(e);
+      FsDatasetImpl.LOG.warn("Getting exception while validating integrity " +
+              "and setting length for blockFile", e);
       return 0;
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a598479/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 11835a5..1e159cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -46,8 +46,6 @@ import javax.management.ObjectName;
 import javax.management.StandardMBean;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -119,6 +117,8 @@ import org.apache.hadoop.util.Timer;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**************************************************
  * FSDataset manages a set of data blocks.  Each block
@@ -127,7 +127,7 @@ import com.google.common.collect.Sets;
  ***************************************************/
 @InterfaceAudience.Private
 class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
-  static final Log LOG = LogFactory.getLog(FsDatasetImpl.class);
+  static final Logger LOG = LoggerFactory.getLogger(FsDatasetImpl.class);
   private final static boolean isNativeIOAvailable;
   private Timer timer;
   static {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] hadoop git commit: HADOOP-14542. Add IOUtils.cleanupWithLogger that accepts slf4j logger API. Contributed by Chen Liang.

Posted by st...@apache.org.
HADOOP-14542. Add IOUtils.cleanupWithLogger that accepts slf4j logger API. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6495190
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6495190
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6495190

Branch: refs/heads/HADOOP-13345
Commit: b64951905e64f6fed581c28634be6ed15c190633
Parents: 9ae9467
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Jun 22 17:42:59 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu Jun 22 17:42:59 2017 +0900

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/io/IOUtils.java | 23 ++++++++++++++++++++
 1 file changed, 23 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6495190/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index ee7264b..459014b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.Shell;
+import org.slf4j.Logger;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@@ -261,6 +262,28 @@ public class IOUtils {
   }
 
   /**
+   * Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
+   * null pointers. Must only be used for cleanup in exception handlers.
+   *
+   * @param logger the log to record problems to at debug level. Can be null.
+   * @param closeables the objects to close
+   */
+  public static void cleanupWithLogger(Logger logger,
+      java.io.Closeable... closeables) {
+    for (java.io.Closeable c : closeables) {
+      if (c != null) {
+        try {
+          c.close();
+        } catch (Throwable e) {
+          if (logger != null) {
+            logger.debug("Exception in closing {}", c, e);
+          }
+        }
+      }
+    }
+  }
+
+  /**
    * Closes the stream ignoring {@link Throwable}.
    * Must only be called in cleaning up from exception handlers.
    *


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] hadoop git commit: YARN-5006. ResourceManager quit due to ApplicationStateData exceed the limit size of znode in zk. Contributed by Bibin A Chundatt.

Posted by st...@apache.org.
YARN-5006. ResourceManager quit due to ApplicationStateData exceed the limit size of znode in zk. Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/740204b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/740204b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/740204b2

Branch: refs/heads/HADOOP-13345
Commit: 740204b2926f49ea70596c6059582ce409fbdd90
Parents: 092ebdf
Author: Naganarasimha <na...@apache.org>
Authored: Fri Jun 23 07:52:41 2017 +0530
Committer: Naganarasimha <na...@apache.org>
Committed: Fri Jun 23 07:52:41 2017 +0530

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |  5 +++
 .../src/main/resources/yarn-default.xml         |  8 +++++
 .../resourcemanager/recovery/RMStateStore.java  | 13 ++++---
 .../recovery/StoreLimitException.java           | 33 +++++++++++++++++
 .../recovery/ZKRMStateStore.java                | 16 +++++++--
 .../resourcemanager/rmapp/RMAppEvent.java       | 25 +++++++++++++
 .../server/resourcemanager/rmapp/RMAppImpl.java |  6 ++--
 .../recovery/TestZKRMStateStore.java            | 37 ++++++++++++++++++++
 8 files changed, 135 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/740204b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 5e4c826..ca71d35 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -561,6 +561,11 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_ZK_NUM_RETRIES = RM_ZK_PREFIX + "num-retries";
   public static final int DEFAULT_ZK_RM_NUM_RETRIES = 1000;
 
+  /** Zookeeper znode limit */
+  public static final String RM_ZK_ZNODE_SIZE_LIMIT_BYTES =
+      RM_ZK_PREFIX + "max-znode-size.bytes";
+  public static final int DEFAULT_RM_ZK_ZNODE_SIZE_LIMIT_BYTES = 1024 * 1024;
+
   public static final String RM_ZK_RETRY_INTERVAL_MS =
       RM_ZK_PREFIX + "retry-interval-ms";
   public static final int DEFAULT_RM_ZK_RETRY_INTERVAL_MS = 1000;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/740204b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index e687eef..d4b7bde 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -629,6 +629,14 @@
   </property>
 
   <property>
+	<description>Specifies the maximum size of the data that can be stored
+		in a znode.Value should be same or less than jute.maxbuffer configured
+		in zookeeper.Default value configured is 1MB.</description>
+	<name>yarn.resourcemanager.zk-max-znode-size.bytes</name>
+	<value>1048576</value>
+  </property>
+
+  <property>
     <description>Name of the cluster. In a HA setting,
       this is used to ensure the RM participates in leader
       election for this cluster and ensures it does not affect

http://git-wip-us.apache.org/repos/asf/hadoop/blob/740204b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
index 975847c..e945b59 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
@@ -48,7 +48,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
 import org.apache.hadoop.yarn.conf.HAUtil;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
@@ -217,14 +216,20 @@ public abstract class RMStateStore extends AbstractService {
       LOG.info("Storing info for app: " + appId);
       try {
         store.storeApplicationStateInternal(appId, appState);
-        store.notifyApplication(new RMAppEvent(appId,
-               RMAppEventType.APP_NEW_SAVED));
+        store.notifyApplication(
+            new RMAppEvent(appId, RMAppEventType.APP_NEW_SAVED));
       } catch (Exception e) {
         LOG.error("Error storing app: " + appId, e);
-        isFenced = store.notifyStoreOperationFailedInternal(e);
+        if (e instanceof StoreLimitException) {
+          store.notifyApplication(new RMAppEvent(appId,
+              RMAppEventType.APP_REJECTED, e.getMessage(), false));
+        } else {
+          isFenced = store.notifyStoreOperationFailedInternal(e);
+        }
       }
       return finalState(isFenced);
     };
+
   }
 
   private static class UpdateAppTransition implements

http://git-wip-us.apache.org/repos/asf/hadoop/blob/740204b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/StoreLimitException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/StoreLimitException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/StoreLimitException.java
new file mode 100644
index 0000000..289ea44
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/StoreLimitException.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.recovery;
+
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+/**
+ * This exception is thrown when Application Data size exceeds limit RM state
+ * store.
+ *
+ */
+public class StoreLimitException extends YarnException {
+  private static final long serialVersionUID = 1L;
+
+  public StoreLimitException(String message) {
+    super(message);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/740204b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
index a9d0885..3b986d1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java
@@ -188,6 +188,7 @@ public class ZKRMStateStore extends RMStateStore {
   private String fencingNodePath;
   private Thread verifyActiveStatusThread;
   private int zkSessionTimeout;
+  private int zknodeLimit;
 
   /* ACL and auth info */
   private List<ACL> zkAcl;
@@ -283,6 +284,8 @@ public class ZKRMStateStore extends RMStateStore {
     fencingNodePath = getNodePath(zkRootNodePath, FENCING_LOCK);
     zkSessionTimeout = conf.getInt(YarnConfiguration.RM_ZK_TIMEOUT_MS,
         YarnConfiguration.DEFAULT_RM_ZK_TIMEOUT_MS);
+    zknodeLimit = conf.getInt(YarnConfiguration.RM_ZK_ZNODE_SIZE_LIMIT_BYTES,
+        YarnConfiguration.DEFAULT_RM_ZK_ZNODE_SIZE_LIMIT_BYTES);
 
     appIdNodeSplitIndex =
         conf.getInt(YarnConfiguration.ZK_APPID_NODE_SPLIT_INDEX,
@@ -746,8 +749,17 @@ public class ZKRMStateStore extends RMStateStore {
     }
 
     byte[] appStateData = appStateDataPB.getProto().toByteArray();
-    safeCreate(nodeCreatePath, appStateData, zkAcl,
-        CreateMode.PERSISTENT);
+    if (appStateData.length <= zknodeLimit) {
+      safeCreate(nodeCreatePath, appStateData, zkAcl, CreateMode.PERSISTENT);
+    } else {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Application state data size for " + appId + " is "
+            + appStateData.length);
+      }
+      throw new StoreLimitException("Application " + appId
+          + " exceeds the maximum allowed size for application data. "
+          + "See yarn.resourcemanager.zk-max-znode-size.bytes.");
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/740204b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java
index 6496402..0c6139e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java
@@ -25,6 +25,7 @@ public class RMAppEvent extends AbstractEvent<RMAppEventType>{
 
   private final ApplicationId appId;
   private final String diagnosticMsg;
+  private boolean storeAppInfo;
 
   public RMAppEvent(ApplicationId appId, RMAppEventType type) {
     this(appId, type, "");
@@ -35,6 +36,21 @@ public class RMAppEvent extends AbstractEvent<RMAppEventType>{
     super(type);
     this.appId = appId;
     this.diagnosticMsg = diagnostic;
+    this.storeAppInfo = true;
+  }
+
+  /**
+   * Constructor to create RM Application Event type.
+   *
+   * @param appId application Id
+   * @param type RM Event type
+   * @param diagnostic Diagnostic message for event
+   * @param storeApp Application should be saved or not
+   */
+  public RMAppEvent(ApplicationId appId, RMAppEventType type, String diagnostic,
+      boolean storeApp) {
+    this(appId, type, diagnostic);
+    this.storeAppInfo = storeApp;
   }
 
   public ApplicationId getApplicationId() {
@@ -44,4 +60,13 @@ public class RMAppEvent extends AbstractEvent<RMAppEventType>{
   public String getDiagnosticMsg() {
     return this.diagnosticMsg;
   }
+
+  /**
+   * Store application to state store or not.
+   *
+   * @return boolean application should be saved to store.
+   */
+  public boolean doStoreAppInfo() {
+    return storeAppInfo;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/740204b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index dda9474..bf8fa4f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -1307,8 +1307,10 @@ public class RMAppImpl implements RMApp, Recoverable {
 
     @Override
     public void transition(RMAppImpl app, RMAppEvent event) {
-      app.rememberTargetTransitionsAndStoreState(event, transitionToDo,
-        targetedFinalState, stateToBeStored);
+      if (event.doStoreAppInfo()) {
+        app.rememberTargetTransitionsAndStoreState(event, transitionToDo,
+            targetedFinalState, stateToBeStored);
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/740204b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
index 7c40ddf..fcd8647 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.DelegationKey;
 import org.apache.hadoop.service.Service;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
@@ -42,6 +43,7 @@ import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPB
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
 import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.Event;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
 import org.apache.hadoop.yarn.server.records.Version;
@@ -53,6 +55,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMSta
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AggregateAppResourceUsage;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
@@ -70,6 +74,7 @@ import org.junit.Before;
 import org.junit.Test;
 
 import com.google.common.base.Joiner;
+import com.google.common.base.Supplier;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 
@@ -249,6 +254,38 @@ public class TestZKRMStateStore extends RMStateStoreTestBase {
         zkTester.getRMStateStore()).testRetryingCreateRootDir();
   }
 
+  @Test
+  public void testZKNodeLimit() throws Exception {
+    TestZKRMStateStoreTester zkTester = new TestZKRMStateStoreTester();
+    long submitTime = System.currentTimeMillis();
+    long startTime = System.currentTimeMillis() + 1234;
+    Configuration conf = new YarnConfiguration();
+    conf.setInt(YarnConfiguration.RM_ZK_ZNODE_SIZE_LIMIT_BYTES, 1);
+    RMStateStore store = zkTester.getRMStateStore(conf);
+    TestAppRejDispatcher dispatcher = new TestAppRejDispatcher();
+    store.setRMDispatcher(dispatcher);
+    ApplicationId appId1 =
+        ApplicationId.fromString("application_1352994193343_0001");
+    storeApp(store, appId1, submitTime, startTime);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return dispatcher.apprejectedEvnt;
+      }
+    }, 100, 5000);
+  }
+
+  static class TestAppRejDispatcher extends TestDispatcher {
+    private boolean apprejectedEvnt;
+
+    public void handle(Event event) {
+      if (event instanceof RMAppEvent
+          && event.getType().equals(RMAppEventType.APP_REJECTED)) {
+        apprejectedEvnt = true;
+      }
+    };
+  }
+
   @Test (timeout = 60000)
   public void testCheckMajorVersionChange() throws Exception {
     TestZKRMStateStoreTester zkTester = new TestZKRMStateStoreTester() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] hadoop git commit: YARN-6467. CSQueueMetrics needs to update the current metrics for default partition only. Contributed by Manikandan R.

Posted by st...@apache.org.
YARN-6467. CSQueueMetrics needs to update the current metrics for default partition only. Contributed by Manikandan R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9c2aa1b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9c2aa1b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9c2aa1b

Branch: refs/heads/HADOOP-13345
Commit: e9c2aa1bc383cb08784846534415bf17667d6e41
Parents: 3008045
Author: Naganarasimha <na...@apache.org>
Authored: Mon Jun 19 21:31:40 2017 +0530
Committer: Naganarasimha <na...@apache.org>
Committed: Mon Jun 19 21:31:40 2017 +0530

----------------------------------------------------------------------
 .../scheduler/AppSchedulingInfo.java            |  69 ++++---
 .../resourcemanager/scheduler/QueueMetrics.java | 194 ++++++++++++-------
 .../scheduler/SchedulerApplicationAttempt.java  |  12 +-
 .../scheduler/capacity/CSQueueMetrics.java      |  56 ++++--
 .../scheduler/capacity/CSQueueUtils.java        |  16 +-
 .../scheduler/capacity/LeafQueue.java           |  17 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java |  33 ++--
 .../scheduler/fair/FSAppAttempt.java            |  28 +--
 .../invariants/TestMetricsInvariantChecker.java |   7 +-
 .../scheduler/TestQueueMetrics.java             |  61 ++++--
 .../TestSchedulerApplicationAttempt.java        |   7 +-
 .../capacity/TestCapacityScheduler.java         |   6 +-
 .../TestNodeLabelContainerAllocation.java       |  10 +-
 13 files changed, 310 insertions(+), 206 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c2aa1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
index 4de5eac..851c9f5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
@@ -18,6 +18,19 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentSkipListSet;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -31,27 +44,12 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.LocalitySchedulingPlacementSet;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.ResourceRequestUpdateResult;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SchedulingPlacementSet;
-
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
-
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk;
 import org.apache.hadoop.yarn.util.resource.Resources;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentSkipListSet;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
 /**
  * This class keeps track of all the consumption of an application. This also
  * keeps track of current running/completed containers for the application.
@@ -260,10 +258,13 @@ public class AppSchedulingInfo {
 
     Resource lastRequestCapability =
         lastRequest != null ? lastRequest.getCapability() : Resources.none();
-    metrics.incrPendingResources(user,
+    metrics.incrPendingResources(request.getNodeLabelExpression(), user,
         request.getNumContainers(), request.getCapability());
-    metrics.decrPendingResources(user,
-        lastRequestContainers, lastRequestCapability);
+
+    if(lastRequest != null) {
+      metrics.decrPendingResources(lastRequest.getNodeLabelExpression(), user,
+          lastRequestContainers, lastRequestCapability);
+    }
 
     // update queue:
     Resource increasedResource =
@@ -419,7 +420,7 @@ public class AppSchedulingInfo {
       writeLock.lock();
 
       if (null != containerAllocated) {
-        updateMetricsForAllocatedContainer(type, containerAllocated);
+        updateMetricsForAllocatedContainer(type, node, containerAllocated);
       }
 
       return schedulerKeyToPlacementSets.get(schedulerKey).allocate(
@@ -443,10 +444,12 @@ public class AppSchedulingInfo {
       for (SchedulingPlacementSet ps : schedulerKeyToPlacementSets.values()) {
         PendingAsk ask = ps.getPendingAsk(ResourceRequest.ANY);
         if (ask.getCount() > 0) {
-          oldMetrics.decrPendingResources(user, ask.getCount(),
-              ask.getPerAllocationResource());
-          newMetrics.incrPendingResources(user, ask.getCount(),
-              ask.getPerAllocationResource());
+          oldMetrics.decrPendingResources(
+              ps.getPrimaryRequestedNodePartition(),
+              user, ask.getCount(), ask.getPerAllocationResource());
+          newMetrics.incrPendingResources(
+              ps.getPrimaryRequestedNodePartition(),
+              user, ask.getCount(), ask.getPerAllocationResource());
 
           Resource delta = Resources.multiply(ask.getPerAllocationResource(),
               ask.getCount());
@@ -476,8 +479,8 @@ public class AppSchedulingInfo {
       for (SchedulingPlacementSet ps : schedulerKeyToPlacementSets.values()) {
         PendingAsk ask = ps.getPendingAsk(ResourceRequest.ANY);
         if (ask.getCount() > 0) {
-          metrics.decrPendingResources(user, ask.getCount(),
-              ask.getPerAllocationResource());
+          metrics.decrPendingResources(ps.getPrimaryRequestedNodePartition(),
+              user, ask.getCount(), ask.getPerAllocationResource());
 
           // Update Queue
           queue.decPendingResource(
@@ -537,8 +540,8 @@ public class AppSchedulingInfo {
         return;
       }
 
-      metrics.allocateResources(user, 1, rmContainer.getAllocatedResource(),
-          false);
+      metrics.allocateResources(rmContainer.getNodeLabelExpression(),
+          user, 1, rmContainer.getAllocatedResource(), false);
     } finally {
       this.writeLock.unlock();
     }
@@ -562,8 +565,8 @@ public class AppSchedulingInfo {
     }
   }
 
-  private void updateMetricsForAllocatedContainer(
-    NodeType type, Container containerAllocated) {
+  private void updateMetricsForAllocatedContainer(NodeType type,
+      SchedulerNode node, Container containerAllocated) {
     QueueMetrics metrics = queue.getMetrics();
     if (pending) {
       // once an allocation is done we assume the application is
@@ -579,8 +582,10 @@ public class AppSchedulingInfo {
           + containerAllocated.getResource() + " type="
           + type);
     }
-    metrics.allocateResources(user, 1, containerAllocated.getResource(),
-        true);
+    if(node != null) {
+      metrics.allocateResources(node.getPartition(), user, 1,
+          containerAllocated.getResource(), true);
+    }
     metrics.incrNodeTypeAggregations(user, type);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c2aa1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
index 9a57876..eafe8ed 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.slf4j.Logger;
@@ -59,38 +60,45 @@ public class QueueMetrics implements MetricsSource {
   @Metric("# of apps completed") MutableCounterInt appsCompleted;
   @Metric("# of apps killed") MutableCounterInt appsKilled;
   @Metric("# of apps failed") MutableCounterInt appsFailed;
-
-  @Metric("Allocated memory in MB") MutableGaugeLong allocatedMB;
-  @Metric("Allocated CPU in virtual cores") MutableGaugeInt allocatedVCores;
-  @Metric("# of allocated containers") MutableGaugeInt allocatedContainers;
-  @Metric("Aggregate # of allocated containers") MutableCounterLong aggregateContainersAllocated;
   @Metric("Aggregate # of allocated node-local containers")
     MutableCounterLong aggregateNodeLocalContainersAllocated;
   @Metric("Aggregate # of allocated rack-local containers")
     MutableCounterLong aggregateRackLocalContainersAllocated;
   @Metric("Aggregate # of allocated off-switch containers")
     MutableCounterLong aggregateOffSwitchContainersAllocated;
-  @Metric("Aggregate # of released containers") MutableCounterLong aggregateContainersReleased;
   @Metric("Aggregate # of preempted containers") MutableCounterLong
       aggregateContainersPreempted;
+  @Metric("# of active users") MutableGaugeInt activeUsers;
+  @Metric("# of active applications") MutableGaugeInt activeApplications;
+  @Metric("App Attempt First Container Allocation Delay")
+    MutableRate appAttemptFirstContainerAllocationDelay;
+
+  //Metrics updated only for "default" partition
+  @Metric("Allocated memory in MB") MutableGaugeLong allocatedMB;
+  @Metric("Allocated CPU in virtual cores") MutableGaugeInt allocatedVCores;
+  @Metric("# of allocated containers") MutableGaugeInt allocatedContainers;
+  @Metric("Aggregate # of allocated containers")
+    MutableCounterLong aggregateContainersAllocated;
+  @Metric("Aggregate # of released containers")
+    MutableCounterLong aggregateContainersReleased;
   @Metric("Available memory in MB") MutableGaugeLong availableMB;
   @Metric("Available CPU in virtual cores") MutableGaugeInt availableVCores;
   @Metric("Pending memory allocation in MB") MutableGaugeLong pendingMB;
-  @Metric("Pending CPU allocation in virtual cores") MutableGaugeInt pendingVCores;
+  @Metric("Pending CPU allocation in virtual cores")
+    MutableGaugeInt pendingVCores;
   @Metric("# of pending containers") MutableGaugeInt pendingContainers;
   @Metric("# of reserved memory in MB") MutableGaugeLong reservedMB;
   @Metric("Reserved CPU in virtual cores") MutableGaugeInt reservedVCores;
   @Metric("# of reserved containers") MutableGaugeInt reservedContainers;
-  @Metric("# of active users") MutableGaugeInt activeUsers;
-  @Metric("# of active applications") MutableGaugeInt activeApplications;
-  @Metric("App Attempt First Container Allocation Delay") MutableRate appAttemptFirstContainerAllocationDelay;
+
   private final MutableGaugeInt[] runningTime;
   private TimeBucketMetrics<ApplicationId> runBuckets;
 
   static final Logger LOG = LoggerFactory.getLogger(QueueMetrics.class);
   static final MetricsInfo RECORD_INFO = info("QueueMetrics",
       "Metrics for the resource scheduler");
-  protected static final MetricsInfo QUEUE_INFO = info("Queue", "Metrics by queue");
+  protected static final MetricsInfo QUEUE_INFO =
+      info("Queue", "Metrics by queue");
   protected static final MetricsInfo USER_INFO =
       info("User", "Metrics by user");
   static final Splitter Q_SPLITTER =
@@ -334,41 +342,61 @@ public class QueueMetrics implements MetricsSource {
   /**
    * Set available resources. To be called by scheduler periodically as
    * resources become available.
+   * @param partition Node Partition
+   * @param limit resource limit
+   */
+  public void setAvailableResourcesToQueue(String partition, Resource limit) {
+    if(partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) {
+      availableMB.set(limit.getMemorySize());
+      availableVCores.set(limit.getVirtualCores());
+    }
+  }
+
+  /**
+   * Set available resources. To be called by scheduler periodically as
+   * resources become available.
    * @param limit resource limit
    */
   public void setAvailableResourcesToQueue(Resource limit) {
-    availableMB.set(limit.getMemorySize());
-    availableVCores.set(limit.getVirtualCores());
+    this.setAvailableResourcesToQueue(RMNodeLabelsManager.NO_LABEL, limit);
   }
 
   /**
    * Set available resources. To be called by scheduler periodically as
    * resources become available.
+   * @param partition Node Partition
    * @param user
    * @param limit resource limit
    */
-  public void setAvailableResourcesToUser(String user, Resource limit) {
-    QueueMetrics userMetrics = getUserMetrics(user);
-    if (userMetrics != null) {
-      userMetrics.setAvailableResourcesToQueue(limit);
+  public void setAvailableResourcesToUser(String partition,
+      String user, Resource limit) {
+    if(partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) {
+      QueueMetrics userMetrics = getUserMetrics(user);
+      if (userMetrics != null) {
+        userMetrics.setAvailableResourcesToQueue(partition, limit);
+      }
     }
   }
 
   /**
    * Increment pending resource metrics
+   * @param partition Node Partition
    * @param user
    * @param containers
    * @param res the TOTAL delta of resources note this is different from
    *            the other APIs which use per container resource
    */
-  public void incrPendingResources(String user, int containers, Resource res) {
-    _incrPendingResources(containers, res);
-    QueueMetrics userMetrics = getUserMetrics(user);
-    if (userMetrics != null) {
-      userMetrics.incrPendingResources(user, containers, res);
-    }
-    if (parent != null) {
-      parent.incrPendingResources(user, containers, res);
+  public void incrPendingResources(String partition, String user,
+      int containers, Resource res) {
+    if(partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) {
+      _incrPendingResources(containers, res);
+      QueueMetrics userMetrics = getUserMetrics(user);
+      if (userMetrics != null) {
+        userMetrics.incrPendingResources(partition, user, containers, res);
+      }
+      if (parent != null) {
+        parent.incrPendingResources(partition, user, containers, res);
+      }
     }
   }
 
@@ -378,14 +406,18 @@ public class QueueMetrics implements MetricsSource {
     pendingVCores.incr(res.getVirtualCores() * containers);
   }
 
-  public void decrPendingResources(String user, int containers, Resource res) {
-    _decrPendingResources(containers, res);
-    QueueMetrics userMetrics = getUserMetrics(user);
-    if (userMetrics != null) {
-      userMetrics.decrPendingResources(user, containers, res);
-    }
-    if (parent != null) {
-      parent.decrPendingResources(user, containers, res);
+
+  public void decrPendingResources(String partition, String user,
+      int containers, Resource res) {
+    if(partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) {
+      _decrPendingResources(containers, res);
+      QueueMetrics userMetrics = getUserMetrics(user);
+      if (userMetrics != null) {
+        userMetrics.decrPendingResources(partition, user, containers, res);
+      }
+      if (parent != null) {
+        parent.decrPendingResources(partition, user, containers, res);
+      }
     }
   }
 
@@ -414,58 +446,66 @@ public class QueueMetrics implements MetricsSource {
     }
   }
 
-  public void allocateResources(String user, int containers, Resource res,
-      boolean decrPending) {
-    allocatedContainers.incr(containers);
-    aggregateContainersAllocated.incr(containers);
+  public void allocateResources(String partition, String user,
+      int containers, Resource res, boolean decrPending) {
+    if(partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) {
+      allocatedContainers.incr(containers);
+      aggregateContainersAllocated.incr(containers);
 
-    allocatedMB.incr(res.getMemorySize() * containers);
-    allocatedVCores.incr(res.getVirtualCores() * containers);
-    if (decrPending) {
-      _decrPendingResources(containers, res);
-    }
-    QueueMetrics userMetrics = getUserMetrics(user);
-    if (userMetrics != null) {
-      userMetrics.allocateResources(user, containers, res, decrPending);
-    }
-    if (parent != null) {
-      parent.allocateResources(user, containers, res, decrPending);
+      allocatedMB.incr(res.getMemorySize() * containers);
+      allocatedVCores.incr(res.getVirtualCores() * containers);
+      if (decrPending) {
+        _decrPendingResources(containers, res);
+      }
+      QueueMetrics userMetrics = getUserMetrics(user);
+      if (userMetrics != null) {
+        userMetrics.allocateResources(partition, user,
+            containers, res, decrPending);
+      }
+      if (parent != null) {
+        parent.allocateResources(partition, user, containers, res, decrPending);
+      }
     }
   }
 
   /**
    * Allocate Resource for container size change.
-   *
+   * @param partition Node Partition
    * @param user
    * @param res
    */
-  public void allocateResources(String user, Resource res) {
-    allocatedMB.incr(res.getMemorySize());
-    allocatedVCores.incr(res.getVirtualCores());
+  public void allocateResources(String partition, String user, Resource res) {
+    if(partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) {
+      allocatedMB.incr(res.getMemorySize());
+      allocatedVCores.incr(res.getVirtualCores());
 
-    pendingMB.decr(res.getMemorySize());
-    pendingVCores.decr(res.getVirtualCores());
+      pendingMB.decr(res.getMemorySize());
+      pendingVCores.decr(res.getVirtualCores());
 
-    QueueMetrics userMetrics = getUserMetrics(user);
-    if (userMetrics != null) {
-      userMetrics.allocateResources(user, res);
-    }
-    if (parent != null) {
-      parent.allocateResources(user, res);
+      QueueMetrics userMetrics = getUserMetrics(user);
+      if (userMetrics != null) {
+        userMetrics.allocateResources(partition, user, res);
+      }
+      if (parent != null) {
+        parent.allocateResources(partition, user, res);
+      }
     }
   }
 
-  public void releaseResources(String user, int containers, Resource res) {
-    allocatedContainers.decr(containers);
-    aggregateContainersReleased.incr(containers);
-    allocatedMB.decr(res.getMemorySize() * containers);
-    allocatedVCores.decr(res.getVirtualCores() * containers);
-    QueueMetrics userMetrics = getUserMetrics(user);
-    if (userMetrics != null) {
-      userMetrics.releaseResources(user, containers, res);
-    }
-    if (parent != null) {
-      parent.releaseResources(user, containers, res);
+  public void releaseResources(String partition,
+      String user, int containers, Resource res) {
+    if(partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) {
+      allocatedContainers.decr(containers);
+      aggregateContainersReleased.incr(containers);
+      allocatedMB.decr(res.getMemorySize() * containers);
+      allocatedVCores.decr(res.getVirtualCores() * containers);
+      QueueMetrics userMetrics = getUserMetrics(user);
+      if (userMetrics != null) {
+        userMetrics.releaseResources(partition, user, containers, res);
+      }
+      if (parent != null) {
+        parent.releaseResources(partition, user, containers, res);
+      }
     }
   }
 
@@ -494,6 +534,12 @@ public class QueueMetrics implements MetricsSource {
     }
   }
 
+  public void reserveResource(String partition, String user, Resource res) {
+    if(partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) {
+      reserveResource(user, res);
+    }
+  }
+
   public void reserveResource(String user, Resource res) {
     reservedContainers.incr();
     reservedMB.incr(res.getMemorySize());
@@ -520,6 +566,12 @@ public class QueueMetrics implements MetricsSource {
     }
   }
 
+  public void unreserveResource(String partition, String user, Resource res) {
+    if(partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) {
+      unreserveResource(user, res);
+    }
+  }
+
   public void incrActiveUsers() {
     activeUsers.incr();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c2aa1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index 294897f..4b0bf91 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -1073,15 +1073,19 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
       for (RMContainer liveContainer : liveContainers.values()) {
         Resource resource = liveContainer.getContainer().getResource();
         ((RMContainerImpl) liveContainer).setQueueName(newQueueName);
-        oldMetrics.releaseResources(user, 1, resource);
-        newMetrics.allocateResources(user, 1, resource, false);
+        oldMetrics.releaseResources(liveContainer.getNodeLabelExpression(),
+            user, 1, resource);
+        newMetrics.allocateResources(liveContainer.getNodeLabelExpression(),
+            user, 1, resource, false);
       }
       for (Map<NodeId, RMContainer> map : reservedContainers.values()) {
         for (RMContainer reservedContainer : map.values()) {
           ((RMContainerImpl) reservedContainer).setQueueName(newQueueName);
           Resource resource = reservedContainer.getReservedResource();
-          oldMetrics.unreserveResource(user, resource);
-          newMetrics.reserveResource(user, resource);
+          oldMetrics.unreserveResource(
+              reservedContainer.getNodeLabelExpression(), user, resource);
+          newMetrics.reserveResource(
+              reservedContainer.getNodeLabelExpression(), user, resource);
         }
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c2aa1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java
index c4d1934..87fc234 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java
@@ -26,12 +26,14 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MutableGaugeFloat;
 import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 
 @Metrics(context = "yarn")
 public class CSQueueMetrics extends QueueMetrics {
 
+  //Metrics updated only for "default" partition
   @Metric("AM memory limit in MB")
   MutableGaugeLong AMResourceLimitMB;
   @Metric("AM CPU limit in virtual cores")
@@ -66,33 +68,40 @@ public class CSQueueMetrics extends QueueMetrics {
     return usedAMResourceVCores.value();
   }
 
-  public void setAMResouceLimit(Resource res) {
-    AMResourceLimitMB.set(res.getMemorySize());
-    AMResourceLimitVCores.set(res.getVirtualCores());
+  public void setAMResouceLimit(String partition, Resource res) {
+    if(partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) {
+      AMResourceLimitMB.set(res.getMemorySize());
+      AMResourceLimitVCores.set(res.getVirtualCores());
+    }
   }
 
-  public void setAMResouceLimitForUser(String user, Resource res) {
+  public void setAMResouceLimitForUser(String partition,
+      String user, Resource res) {
     CSQueueMetrics userMetrics = (CSQueueMetrics) getUserMetrics(user);
     if (userMetrics != null) {
-      userMetrics.setAMResouceLimit(res);
+      userMetrics.setAMResouceLimit(partition, res);
     }
   }
 
-  public void incAMUsed(String user, Resource res) {
-    usedAMResourceMB.incr(res.getMemorySize());
-    usedAMResourceVCores.incr(res.getVirtualCores());
-    CSQueueMetrics userMetrics = (CSQueueMetrics) getUserMetrics(user);
-    if (userMetrics != null) {
-      userMetrics.incAMUsed(user, res);
+  public void incAMUsed(String partition, String user, Resource res) {
+    if(partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) {
+      usedAMResourceMB.incr(res.getMemorySize());
+      usedAMResourceVCores.incr(res.getVirtualCores());
+      CSQueueMetrics userMetrics = (CSQueueMetrics) getUserMetrics(user);
+      if (userMetrics != null) {
+        userMetrics.incAMUsed(partition, user, res);
+      }
     }
   }
 
-  public void decAMUsed(String user, Resource res) {
-    usedAMResourceMB.decr(res.getMemorySize());
-    usedAMResourceVCores.decr(res.getVirtualCores());
-    CSQueueMetrics userMetrics = (CSQueueMetrics) getUserMetrics(user);
-    if (userMetrics != null) {
-      userMetrics.decAMUsed(user, res);
+  public void decAMUsed(String partition, String user, Resource res) {
+    if(partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) {
+      usedAMResourceMB.decr(res.getMemorySize());
+      usedAMResourceVCores.decr(res.getVirtualCores());
+      CSQueueMetrics userMetrics = (CSQueueMetrics) getUserMetrics(user);
+      if (userMetrics != null) {
+        userMetrics.decAMUsed(partition, user, res);
+      }
     }
   }
 
@@ -100,16 +109,21 @@ public class CSQueueMetrics extends QueueMetrics {
     return usedCapacity.value();
   }
 
-  public void setUsedCapacity(float usedCapacity) {
-    this.usedCapacity.set(usedCapacity);
+  public void setUsedCapacity(String partition, float usedCap) {
+    if(partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) {
+      this.usedCapacity.set(usedCap);
+    }
   }
 
   public float getAbsoluteUsedCapacity() {
     return absoluteUsedCapacity.value();
   }
 
-  public void setAbsoluteUsedCapacity(Float absoluteUsedCapacity) {
-    this.absoluteUsedCapacity.set(absoluteUsedCapacity);
+  public void setAbsoluteUsedCapacity(String partition,
+      Float absoluteUsedCap) {
+    if(partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) {
+      this.absoluteUsedCapacity.set(absoluteUsedCap);
+    }
   }
 
   public synchronized static CSQueueMetrics forQueue(String queueName,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c2aa1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
index ba22541..e1014c1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
@@ -230,13 +230,13 @@ class CSQueueUtils {
 
     // QueueMetrics does not support per-label capacities,
     // so we report values only for the default partition.
-    if (nodePartition.equals(CommonNodeLabelsManager.NO_LABEL)) {
-      queueMetrics.setUsedCapacity(
-          queueCapacities.getUsedCapacity(RMNodeLabelsManager.NO_LABEL));
-      queueMetrics.setAbsoluteUsedCapacity(
-          queueCapacities.getAbsoluteUsedCapacity(
-              RMNodeLabelsManager.NO_LABEL));
-    }
+
+    queueMetrics.setUsedCapacity(nodePartition,
+        queueCapacities.getUsedCapacity(RMNodeLabelsManager.NO_LABEL));
+    queueMetrics.setAbsoluteUsedCapacity(nodePartition,
+        queueCapacities.getAbsoluteUsedCapacity(
+            RMNodeLabelsManager.NO_LABEL));
+
   }
 
   private static Resource getMaxAvailableResourceToQueue(
@@ -302,7 +302,7 @@ class CSQueueUtils {
 
     // Update queue metrics w.r.t node labels. In a generic way, we can
     // calculate available resource from all labels in cluster.
-    childQueue.getMetrics().setAvailableResourcesToQueue(
+    childQueue.getMetrics().setAvailableResourcesToQueue(nodePartition,
         getMaxAvailableResourceToQueue(rc, nlm, childQueue, cluster));
    }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c2aa1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 104e95e..eb2432e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -696,7 +696,7 @@ public class LeafQueue extends AbstractCSQueue {
           resourceCalculator, queuePartitionUsableResource, amResourcePercent,
           minimumAllocation);
 
-      metrics.setAMResouceLimit(amResouceLimit);
+      metrics.setAMResouceLimit(nodePartition, amResouceLimit);
       queueUsage.setAMLimit(nodePartition, amResouceLimit);
       return amResouceLimit;
     } finally {
@@ -811,9 +811,10 @@ public class LeafQueue extends AbstractCSQueue {
         user.getResourceUsage().incAMUsed(partitionName,
             application.getAMResource(partitionName));
         user.getResourceUsage().setAMLimit(partitionName, userAMLimit);
-        metrics.incAMUsed(application.getUser(),
+        metrics.incAMUsed(partitionName, application.getUser(),
             application.getAMResource(partitionName));
-        metrics.setAMResouceLimitForUser(application.getUser(), userAMLimit);
+        metrics.setAMResouceLimitForUser(partitionName,
+            application.getUser(), userAMLimit);
         fsApp.remove();
         LOG.info("Application " + applicationId + " from user: " + application
             .getUser() + " activated in queue: " + getQueueName());
@@ -894,7 +895,7 @@ public class LeafQueue extends AbstractCSQueue {
             application.getAMResource(partitionName));
         user.getResourceUsage().decAMUsed(partitionName,
             application.getAMResource(partitionName));
-        metrics.decAMUsed(application.getUser(),
+        metrics.decAMUsed(partitionName, application.getUser(),
             application.getAMResource(partitionName));
       }
       applicationAttemptMap.remove(application.getApplicationAttemptId());
@@ -1338,7 +1339,7 @@ public class LeafQueue extends AbstractCSQueue {
     
     application.setHeadroomProvider(headroomProvider);
 
-    metrics.setAvailableResourcesToUser(user, headroom);
+    metrics.setAvailableResourcesToUser(nodePartition, user, headroom);
     
     return userLimit;
   }
@@ -1583,7 +1584,8 @@ public class LeafQueue extends AbstractCSQueue {
       // Note this is a bit unconventional since it gets the object and modifies
       // it here, rather then using set routine
       Resources.subtractFrom(application.getHeadroom(), resource); // headroom
-      metrics.setAvailableResourcesToUser(userName, application.getHeadroom());
+      metrics.setAvailableResourcesToUser(nodePartition,
+          userName, application.getHeadroom());
 
       if (LOG.isDebugEnabled()) {
         LOG.debug(getQueueName() + " user=" + userName + " used="
@@ -1622,7 +1624,8 @@ public class LeafQueue extends AbstractCSQueue {
       User user = usersManager.updateUserResourceUsage(userName, resource,
           nodePartition, false);
 
-      metrics.setAvailableResourcesToUser(userName, application.getHeadroom());
+      metrics.setAvailableResourcesToUser(nodePartition,
+          userName, application.getHeadroom());
 
       if (LOG.isDebugEnabled()) {
         LOG.debug(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c2aa1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 5c0b718..331585e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -18,7 +18,15 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica;
 
-import com.google.common.annotations.VisibleForTesting;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -65,28 +73,18 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Scheduli
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.allocator.AbstractContainerAllocator;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.allocator.ContainerAllocator;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerAllocationProposal;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.SchedulerContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SchedulingPlacementSet;
-
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
-
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
+import com.google.common.annotations.VisibleForTesting;
 
 /**
  * Represents an application attempt from the viewpoint of the FIFO or Capacity
@@ -198,7 +196,8 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
           "SchedulerApp", getApplicationId(), containerId, containerResource);
 
       // Update usage metrics
-      queue.getMetrics().releaseResources(getUser(), 1, containerResource);
+      queue.getMetrics().releaseResources(partition,
+          getUser(), 1, containerResource);
       attemptResourceUsage.decUsed(partition, containerResource);
 
       // Clear resource utilization metrics cache.
@@ -572,8 +571,8 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
         node.unreserveResource(this);
 
         // Update reserved metrics
-        queue.getMetrics().unreserveResource(getUser(),
-            rmContainer.getReservedResource());
+        queue.getMetrics().unreserveResource(node.getPartition(),
+            getUser(), rmContainer.getReservedResource());
         queue.decReservedResource(node.getPartition(),
             rmContainer.getReservedResource());
         return true;
@@ -782,7 +781,7 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
     // Update reserved metrics if this is the first reservation
     // rmContainer will be moved to reserved in the super.reserve
     if (!reReservation) {
-      queue.getMetrics().reserveResource(
+      queue.getMetrics().reserveResource(node.getPartition(),
           getUser(), container.getResource());
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c2aa1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index a5772ba..a678bb9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -18,6 +18,16 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
+import java.text.DecimalFormat;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -51,16 +61,6 @@ import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
-import java.text.DecimalFormat;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
 /**
  * Represents an application attempt from the viewpoint of the Fair Scheduler.
  */
@@ -169,7 +169,9 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
           "SchedulerApp", getApplicationId(), containerId, containerResource);
 
       // Update usage metrics
-      queue.getMetrics().releaseResources(getUser(), 1, containerResource);
+      queue.getMetrics().releaseResources(
+          rmContainer.getNodeLabelExpression(),
+          getUser(), 1, containerResource);
       this.attemptResourceUsage.decUsed(containerResource);
 
       // Clear resource utilization metrics cache.
@@ -653,7 +655,7 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
         reservedContainer =
             createContainer(node, perAllocationResource,
               schedulerKey);
-        getMetrics().reserveResource(getUser(),
+        getMetrics().reserveResource(node.getPartition(), getUser(),
             reservedContainer.getResource());
         RMContainer rmContainer =
                 super.reserve(node, schedulerKey, null, reservedContainer);
@@ -712,7 +714,7 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
     unreserveInternal(schedulerKey, node);
     node.unreserveResource(this);
     clearReservation(node);
-    getMetrics().unreserveResource(
+    getMetrics().unreserveResource(node.getPartition(),
         getUser(), rmContainer.getContainer().getResource());
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c2aa1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java
index 35cf1e4..5fd4151 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.log4j.Logger;
 import org.junit.Before;
@@ -59,7 +60,8 @@ public class TestMetricsInvariantChecker {
 
     QueueMetrics qm =
         QueueMetrics.forQueue(metricsSystem, "root", null, false, conf);
-    qm.setAvailableResourcesToQueue(Resource.newInstance(1, 1));
+    qm.setAvailableResourcesToQueue(RMNodeLabelsManager.NO_LABEL,
+        Resource.newInstance(1, 1));
 
     int numIterations = 1000;
     long start = System.currentTimeMillis();
@@ -79,7 +81,8 @@ public class TestMetricsInvariantChecker {
     // create a "wrong" condition in which the invariants are not respected
     QueueMetrics qm =
         QueueMetrics.forQueue(metricsSystem, "root", null, false, conf);
-    qm.setAvailableResourcesToQueue(Resource.newInstance(-1, -1));
+    qm.setAvailableResourcesToQueue(RMNodeLabelsManager.NO_LABEL,
+        Resource.newInstance(-1, -1));
 
     // test with throwing exception turned on
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c2aa1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java
index cb1104b..13144e9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
@@ -72,8 +73,10 @@ public class TestQueueMetrics {
     metrics.submitAppAttempt(user);
     checkApps(queueSource, 1, 1, 0, 0, 0, 0, true);
 
-    metrics.setAvailableResourcesToQueue(Resources.createResource(100*GB, 100));
-    metrics.incrPendingResources(user, 5, Resources.createResource(3*GB, 3));
+    metrics.setAvailableResourcesToQueue(RMNodeLabelsManager.NO_LABEL,
+        Resources.createResource(100*GB, 100));
+    metrics.incrPendingResources(RMNodeLabelsManager.NO_LABEL,
+        user, 5, Resources.createResource(3*GB, 3));
     // Available resources is set externally, as it depends on dynamic
     // configurable cluster/queue resources
     checkResources(queueSource, 0, 0, 0, 0, 0, 100*GB, 100, 15*GB, 15, 5, 0, 0, 0);
@@ -81,17 +84,21 @@ public class TestQueueMetrics {
     metrics.runAppAttempt(app.getApplicationId(), user);
     checkApps(queueSource, 1, 0, 1, 0, 0, 0, true);
 
-    metrics.allocateResources(user, 3, Resources.createResource(2*GB, 2), true);
+    metrics.allocateResources(RMNodeLabelsManager.NO_LABEL,
+        user, 3, Resources.createResource(2*GB, 2), true);
     checkResources(queueSource, 6*GB, 6, 3, 3, 0, 100*GB, 100, 9*GB, 9, 2, 0, 0, 0);
 
-    metrics.releaseResources(user, 1, Resources.createResource(2*GB, 2));
+    metrics.releaseResources(RMNodeLabelsManager.NO_LABEL,
+        user, 1, Resources.createResource(2*GB, 2));
     checkResources(queueSource, 4*GB, 4, 2, 3, 1, 100*GB, 100, 9*GB, 9, 2, 0, 0, 0);
 
-    metrics.incrPendingResources(user, 0, Resources.createResource(2 * GB, 2));
+    metrics.incrPendingResources(RMNodeLabelsManager.NO_LABEL,
+        user, 0, Resources.createResource(2 * GB, 2));
     checkResources(queueSource, 4 * GB, 4, 2, 3, 1, 100 * GB, 100, 9 * GB, 9, 2,
         0, 0, 0);
 
-    metrics.decrPendingResources(user, 0, Resources.createResource(2 * GB, 2));
+    metrics.decrPendingResources(RMNodeLabelsManager.NO_LABEL,
+        user, 0, Resources.createResource(2 * GB, 2));
     checkResources(queueSource, 4 * GB, 4, 2, 3, 1, 100 * GB, 100, 9 * GB, 9, 2,
         0, 0, 0);
 
@@ -177,9 +184,12 @@ public class TestQueueMetrics {
     checkApps(queueSource, 1, 1, 0, 0, 0, 0, true);
     checkApps(userSource, 1, 1, 0, 0, 0, 0, true);
 
-    metrics.setAvailableResourcesToQueue(Resources.createResource(100*GB, 100));
-    metrics.setAvailableResourcesToUser(user, Resources.createResource(10*GB, 10));
-    metrics.incrPendingResources(user, 5, Resources.createResource(3*GB, 3));
+    metrics.setAvailableResourcesToQueue(RMNodeLabelsManager.NO_LABEL,
+        Resources.createResource(100*GB, 100));
+    metrics.setAvailableResourcesToUser(RMNodeLabelsManager.NO_LABEL,
+        user, Resources.createResource(10*GB, 10));
+    metrics.incrPendingResources(RMNodeLabelsManager.NO_LABEL,
+        user, 5, Resources.createResource(3*GB, 3));
     // Available resources is set externally, as it depends on dynamic
     // configurable cluster/queue resources
     checkResources(queueSource, 0, 0, 0, 0, 0,  100*GB, 100, 15*GB, 15, 5, 0, 0, 0);
@@ -189,11 +199,13 @@ public class TestQueueMetrics {
     checkApps(queueSource, 1, 0, 1, 0, 0, 0, true);
     checkApps(userSource, 1, 0, 1, 0, 0, 0, true);
 
-    metrics.allocateResources(user, 3, Resources.createResource(2*GB, 2), true);
+    metrics.allocateResources(RMNodeLabelsManager.NO_LABEL,
+        user, 3, Resources.createResource(2*GB, 2), true);
     checkResources(queueSource, 6*GB, 6, 3, 3, 0, 100*GB, 100, 9*GB, 9, 2, 0, 0, 0);
     checkResources(userSource, 6*GB, 6, 3, 3, 0, 10*GB, 10, 9*GB, 9, 2, 0, 0, 0);
 
-    metrics.releaseResources(user, 1, Resources.createResource(2*GB, 2));
+    metrics.releaseResources(RMNodeLabelsManager.NO_LABEL,
+        user, 1, Resources.createResource(2*GB, 2));
     checkResources(queueSource, 4*GB, 4, 2, 3, 1, 100*GB, 100, 9*GB, 9, 2, 0, 0, 0);
     checkResources(userSource, 4*GB, 4, 2, 3, 1, 10*GB, 10, 9*GB, 9, 2, 0, 0, 0);
 
@@ -283,11 +295,16 @@ public class TestQueueMetrics {
     checkApps(userSource, 1, 1, 0, 0, 0, 0, true);
     checkApps(parentUserSource, 1, 1, 0, 0, 0, 0, true);
 
-    parentMetrics.setAvailableResourcesToQueue(Resources.createResource(100*GB, 100));
-    metrics.setAvailableResourcesToQueue(Resources.createResource(100*GB, 100));
-    parentMetrics.setAvailableResourcesToUser(user, Resources.createResource(10*GB, 10));
-    metrics.setAvailableResourcesToUser(user, Resources.createResource(10*GB, 10));
-    metrics.incrPendingResources(user, 5, Resources.createResource(3*GB, 3));
+    parentMetrics.setAvailableResourcesToQueue(RMNodeLabelsManager.NO_LABEL,
+        Resources.createResource(100*GB, 100));
+    metrics.setAvailableResourcesToQueue(RMNodeLabelsManager.NO_LABEL,
+        Resources.createResource(100*GB, 100));
+    parentMetrics.setAvailableResourcesToUser(RMNodeLabelsManager.NO_LABEL,
+        user, Resources.createResource(10*GB, 10));
+    metrics.setAvailableResourcesToUser(RMNodeLabelsManager.NO_LABEL,
+        user, Resources.createResource(10*GB, 10));
+    metrics.incrPendingResources(RMNodeLabelsManager.NO_LABEL,
+        user, 5, Resources.createResource(3*GB, 3));
     checkResources(queueSource, 0, 0, 0, 0, 0, 100*GB, 100, 15*GB, 15, 5, 0, 0, 0);
     checkResources(parentQueueSource, 0, 0, 0, 0, 0, 100*GB, 100, 15*GB, 15, 5, 0, 0, 0);
     checkResources(userSource, 0, 0, 0, 0, 0, 10*GB, 10, 15*GB, 15, 5, 0, 0, 0);
@@ -297,8 +314,10 @@ public class TestQueueMetrics {
     checkApps(queueSource, 1, 0, 1, 0, 0, 0, true);
     checkApps(userSource, 1, 0, 1, 0, 0, 0, true);
 
-    metrics.allocateResources(user, 3, Resources.createResource(2*GB, 2), true);
-    metrics.reserveResource(user, Resources.createResource(3*GB, 3));
+    metrics.allocateResources(RMNodeLabelsManager.NO_LABEL,
+        user, 3, Resources.createResource(2*GB, 2), true);
+    metrics.reserveResource(RMNodeLabelsManager.NO_LABEL,
+        user, Resources.createResource(3*GB, 3));
     // Available resources is set externally, as it depends on dynamic
     // configurable cluster/queue resources
     checkResources(queueSource, 6*GB, 6, 3, 3, 0, 100*GB, 100, 9*GB, 9, 2, 3*GB, 3, 1);
@@ -306,8 +325,10 @@ public class TestQueueMetrics {
     checkResources(userSource, 6*GB, 6, 3, 3, 0, 10*GB, 10, 9*GB, 9, 2, 3*GB, 3, 1);
     checkResources(parentUserSource, 6*GB, 6, 3, 3, 0, 10*GB, 10, 9*GB, 9, 2, 3*GB, 3, 1);
 
-    metrics.releaseResources(user, 1, Resources.createResource(2*GB, 2));
-    metrics.unreserveResource(user, Resources.createResource(3*GB, 3));
+    metrics.releaseResources(RMNodeLabelsManager.NO_LABEL,
+        user, 1, Resources.createResource(2*GB, 2));
+    metrics.unreserveResource(RMNodeLabelsManager.NO_LABEL,
+          user, Resources.createResource(3*GB, 3));
     checkResources(queueSource, 4*GB, 4, 2, 3, 1, 100*GB, 100, 9*GB, 9, 2, 0, 0, 0);
     checkResources(parentQueueSource, 4*GB, 4, 2, 3, 1, 100*GB, 100, 9*GB, 9, 2, 0, 0, 0);
     checkResources(userSource, 4*GB, 4, 2, 3, 1, 10*GB, 10, 9*GB, 9, 2, 0, 0, 0);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c2aa1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
index 06253ff..fa16eff 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils.toSchedulerKey;
 import static org.junit.Assert.assertEquals;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -41,14 +42,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
-
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.junit.After;
 import org.junit.Test;
 
-import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils.toSchedulerKey;
-
 public class TestSchedulerApplicationAttempt {
 
   private static final NodeId nodeId = NodeId.newInstance("somehost", 5);
@@ -103,7 +101,8 @@ public class TestSchedulerApplicationAttempt {
     Map<NodeId, RMContainer> reservations = new HashMap<NodeId, RMContainer>();
     reservations.put(node.getNodeID(), container2);
     app.reservedContainers.put(toSchedulerKey(prio1), reservations);
-    oldMetrics.reserveResource(user, reservedResource);
+    oldMetrics.reserveResource(container2.getNodeLabelExpression(),
+        user, reservedResource);
     
     checkQueueMetrics(oldMetrics, 1, 1, 1536, 2, 2048, 3, 3072, 4);
     checkQueueMetrics(newMetrics, 0, 0, 0, 0, 0, 0, 0, 0);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c2aa1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index bf1f6eb..3c6e6df 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -2384,8 +2384,10 @@ public class TestCapacityScheduler {
     sch.getApplicationAttempt(appAttemptId).getLiveContainersMap()
         .put(newContainerId, rmContainer);
     QueueMetrics queueA1M = queueA1.getMetrics();
-    queueA1M.incrPendingResources("user1", 1, resource);
-    queueA1M.allocateResources("user1", resource);
+    queueA1M.incrPendingResources(rmContainer.getNodeLabelExpression(),
+        "user1", 1, resource);
+    queueA1M.allocateResources(rmContainer.getNodeLabelExpression(),
+        "user1", resource);
     // remove attempt
     sch.handle(new AppAttemptRemovedSchedulerEvent(appAttemptId,
         RMAppAttemptState.KILLED, true));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9c2aa1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
index fc1d284..740ef33 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
@@ -1958,8 +1958,8 @@ public class TestNodeLabelContainerAllocation {
         reportNm2.getAvailableResource().getMemorySize());
 
     LeafQueue leafQueue = (LeafQueue) cs.getQueue("a");
-    assertEquals(0 * GB, leafQueue.getMetrics().getAvailableMB());
-    assertEquals(5 * GB, leafQueue.getMetrics().getAllocatedMB());
+    assertEquals(5 * GB, leafQueue.getMetrics().getAvailableMB());
+    assertEquals(0 * GB, leafQueue.getMetrics().getAllocatedMB());
 
     // Kill all apps in queue a
     cs.killAllAppsInQueue("a");
@@ -2061,8 +2061,8 @@ public class TestNodeLabelContainerAllocation {
     double delta = 0.0001;
     // 3GB is used from label x quota. 1.5 GB is remaining from default label.
     // 2GB is remaining from label x.
-    assertEquals(3.5 * GB, leafQueue.getMetrics().getAvailableMB(), delta);
-    assertEquals(4 * GB, leafQueue.getMetrics().getAllocatedMB());
+    assertEquals(6.5 * GB, leafQueue.getMetrics().getAvailableMB(), delta);
+    assertEquals(1 * GB, leafQueue.getMetrics().getAllocatedMB());
 
     // app1 asks for 1 default partition container
     am1.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>());
@@ -2079,7 +2079,7 @@ public class TestNodeLabelContainerAllocation {
     // 3GB is used from label x quota. 2GB used from default label.
     // So total 2.5 GB is remaining.
     assertEquals(2.5 * GB, leafQueue.getMetrics().getAvailableMB(), delta);
-    assertEquals(5 * GB, leafQueue.getMetrics().getAllocatedMB());
+    assertEquals(2 * GB, leafQueue.getMetrics().getAllocatedMB());
 
     rm1.close();
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] hadoop git commit: HDFS-11345. Document the configuration key for FSNamesystem lock fairness. Contributed by Erik Krogen.

Posted by st...@apache.org.
HDFS-11345. Document the configuration key for FSNamesystem lock fairness. Contributed by Erik Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c769167
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c769167
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c769167

Branch: refs/heads/HADOOP-13345
Commit: 2c769167dbdb66c52d2ba7b7193a686444085570
Parents: 15964ef
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Jun 20 16:35:54 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Jun 20 16:35:54 2017 +0900

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 ++++
 .../hadoop/hdfs/server/namenode/FSNamesystemLock.java     |  5 ++++-
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml       | 10 ++++++++++
 .../hadoop/hdfs/server/namenode/TestFSNamesystemLock.java |  7 ++++---
 4 files changed, 22 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c769167/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index eaaff60..e7aba4e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -423,6 +423,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final long
       DFS_NAMENODE_MAX_LOCK_HOLD_TO_RELEASE_LEASE_MS_DEFAULT = 25;
 
+  public static final String DFS_NAMENODE_FSLOCK_FAIR_KEY =
+      "dfs.namenode.fslock.fair";
+  public static final boolean DFS_NAMENODE_FSLOCK_FAIR_DEFAULT = true;
+
   public static final String  DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY =
       "dfs.namenode.lock.detailed-metrics.enabled";
   public static final boolean DFS_NAMENODE_LOCK_DETAILED_METRICS_DEFAULT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c769167/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
index be33c4b..8c60faa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
@@ -32,6 +32,8 @@ import org.apache.hadoop.util.Timer;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_FSLOCK_FAIR_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_FSLOCK_FAIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LOCK_DETAILED_METRICS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
@@ -113,7 +115,8 @@ class FSNamesystemLock {
   @VisibleForTesting
   FSNamesystemLock(Configuration conf,
       MutableRatesWithAggregation detailedHoldTimeMetrics, Timer timer) {
-    boolean fair = conf.getBoolean("dfs.namenode.fslock.fair", true);
+    boolean fair = conf.getBoolean(DFS_NAMENODE_FSLOCK_FAIR_KEY,
+        DFS_NAMENODE_FSLOCK_FAIR_DEFAULT);
     FSNamesystem.LOG.info("fsLock is fair: " + fair);
     this.coarseLock = new ReentrantReadWriteLock(fair);
     this.timer = timer;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c769167/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index dbf78fc..0f72041 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2760,6 +2760,16 @@
 </property>
 
 <property>
+  <name>dfs.namenode.fslock.fair</name>
+  <value>true</value>
+  <description>If this is true, the FS Namesystem lock will be used in Fair mode,
+    which will help to prevent writer threads from being starved, but can provide
+    lower lock throughput. See java.util.concurrent.locks.ReentrantReadWriteLock
+    for more information on fair/non-fair locks.
+  </description>
+</property>
+
+<property>
   <name>dfs.namenode.startup.delay.block.deletion.sec</name>
   <value>0</value>
   <description>The delay in seconds at which we will pause the blocks deletion

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c769167/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
index 1af8bf7..7f9746d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
@@ -40,6 +40,7 @@ import java.util.concurrent.TimeoutException;
 import java.util.regex.Pattern;
 
 import static org.junit.Assert.*;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_FSLOCK_FAIR_KEY;
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
 
@@ -53,11 +54,11 @@ public class TestFSNamesystemLock {
   public void testFsLockFairness() throws IOException, InterruptedException{
     Configuration conf = new Configuration();
 
-    conf.setBoolean("dfs.namenode.fslock.fair", true);
+    conf.setBoolean(DFS_NAMENODE_FSLOCK_FAIR_KEY, true);
     FSNamesystemLock fsnLock = new FSNamesystemLock(conf, null);
     assertTrue(fsnLock.coarseLock.isFair());
 
-    conf.setBoolean("dfs.namenode.fslock.fair", false);
+    conf.setBoolean(DFS_NAMENODE_FSLOCK_FAIR_KEY, false);
     fsnLock = new FSNamesystemLock(conf, null);
     assertFalse(fsnLock.coarseLock.isFair());
   }
@@ -103,7 +104,7 @@ public class TestFSNamesystemLock {
     final int threadCount = 3;
     final CountDownLatch latch = new CountDownLatch(threadCount);
     final Configuration conf = new Configuration();
-    conf.setBoolean("dfs.namenode.fslock.fair", true);
+    conf.setBoolean(DFS_NAMENODE_FSLOCK_FAIR_KEY, true);
     final FSNamesystemLock rwLock = new FSNamesystemLock(conf, null);
     rwLock.writeLock();
     ExecutorService helper = Executors.newFixedThreadPool(threadCount);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] hadoop git commit: Confusion/name conflict between NameNodeActivity#BlockReportNumOps and RpcDetailedActivity#BlockReportNumOps. Contributed by Erik Krogen.

Posted by st...@apache.org.
Confusion/name conflict between NameNodeActivity#BlockReportNumOps and RpcDetailedActivity#BlockReportNumOps. Contributed by Erik Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c22cf004
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c22cf004
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c22cf004

Branch: refs/heads/HADOOP-13345
Commit: c22cf004425daa9c350df5e365b0db85b1628b40
Parents: e806c6e
Author: Zhe Zhang <zh...@apache.org>
Authored: Wed Jun 21 16:34:19 2017 -0700
Committer: Zhe Zhang <zh...@apache.org>
Committed: Wed Jun 21 16:34:19 2017 -0700

----------------------------------------------------------------------
 .../server/blockmanagement/BlockManager.java    |  2 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  1 -
 .../namenode/metrics/NameNodeMetrics.java       | 25 ++++++++------------
 .../org/apache/hadoop/hdfs/TestSafeMode.java    |  4 ++--
 .../namenode/metrics/TestNameNodeMetrics.java   |  4 ++--
 5 files changed, 15 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c22cf004/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index acd4c47..a0c4698 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2436,7 +2436,7 @@ public class BlockManager implements BlockStatsMXBean {
     // Log the block report processing stats from Namenode perspective
     final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
     if (metrics != null) {
-      metrics.addBlockReport((int) (endTime - startTime));
+      metrics.addStorageBlockReport((int) (endTime - startTime));
     }
     blockLog.info("BLOCK* processReport 0x{}: from storage {} node {}, " +
         "blocks: {}, hasStaleStorage: {}, processing time: {} msecs, " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c22cf004/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 599178a..fd5a05b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1486,7 +1486,6 @@ public class NameNodeRpcServer implements NamenodeProtocols {
               blocks, context);
         }
       });
-      metrics.incrStorageBlockReportOps();
     }
     bm.removeBRLeaseIfNeeded(nodeReg, context);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c22cf004/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
index c4cfa6a..cb81f5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
@@ -74,8 +74,6 @@ public class NameNodeMetrics {
   MutableCounterLong snapshotDiffReportOps;
   @Metric("Number of blockReceivedAndDeleted calls")
   MutableCounterLong blockReceivedAndDeletedOps;
-  @Metric("Number of blockReports from individual storages")
-  MutableCounterLong storageBlockReportOps;
   @Metric("Number of blockReports and blockReceivedAndDeleted queued")
   MutableGaugeInt blockOpsQueued;
   @Metric("Number of blockReports and blockReceivedAndDeleted batch processed")
@@ -111,8 +109,9 @@ public class NameNodeMetrics {
   final MutableQuantiles[] syncsQuantiles;
   @Metric("Journal transactions batched in sync")
   MutableCounterLong transactionsBatchedInSync;
-  @Metric("Block report") MutableRate blockReport;
-  final MutableQuantiles[] blockReportQuantiles;
+  @Metric("Number of blockReports from individual storages")
+  MutableRate storageBlockReport;
+  final MutableQuantiles[] storageBlockReportQuantiles;
   @Metric("Cache report") MutableRate cacheReport;
   final MutableQuantiles[] cacheReportQuantiles;
   @Metric("Generate EDEK time") private MutableRate generateEDEKTime;
@@ -143,7 +142,7 @@ public class NameNodeMetrics {
     
     final int len = intervals.length;
     syncsQuantiles = new MutableQuantiles[len];
-    blockReportQuantiles = new MutableQuantiles[len];
+    storageBlockReportQuantiles = new MutableQuantiles[len];
     cacheReportQuantiles = new MutableQuantiles[len];
     generateEDEKTimeQuantiles = new MutableQuantiles[len];
     warmUpEDEKTimeQuantiles = new MutableQuantiles[len];
@@ -154,9 +153,9 @@ public class NameNodeMetrics {
       syncsQuantiles[i] = registry.newQuantiles(
           "syncs" + interval + "s",
           "Journal syncs", "ops", "latency", interval);
-      blockReportQuantiles[i] = registry.newQuantiles(
-          "blockReport" + interval + "s", 
-          "Block report", "ops", "latency", interval);
+      storageBlockReportQuantiles[i] = registry.newQuantiles(
+          "storageBlockReport" + interval + "s",
+          "Storage block report", "ops", "latency", interval);
       cacheReportQuantiles[i] = registry.newQuantiles(
           "cacheReport" + interval + "s",
           "Cache report", "ops", "latency", interval);
@@ -284,10 +283,6 @@ public class NameNodeMetrics {
   public void incrBlockReceivedAndDeletedOps() {
     blockReceivedAndDeletedOps.incr();
   }
-  
-  public void incrStorageBlockReportOps() {
-    storageBlockReportOps.incr();
-  }
 
   public void setBlockOpsQueued(int size) {
     blockOpsQueued.set(size);
@@ -316,9 +311,9 @@ public class NameNodeMetrics {
     fsImageLoadTime.set((int) elapsed);
   }
 
-  public void addBlockReport(long latency) {
-    blockReport.add(latency);
-    for (MutableQuantiles q : blockReportQuantiles) {
+  public void addStorageBlockReport(long latency) {
+    storageBlockReport.add(latency);
+    for (MutableQuantiles q : storageBlockReportQuantiles) {
       q.add(latency);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c22cf004/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index 62dac74..f03b440 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -216,8 +216,8 @@ public class TestSafeMode {
     GenericTestUtils.waitFor(new Supplier<Boolean>() {
       @Override
       public Boolean get() {
-        return getLongCounter("StorageBlockReportOps", getMetrics(NN_METRICS)) ==
-            cluster.getStoragesPerDatanode();
+        return getLongCounter("StorageBlockReportNumOps",
+            getMetrics(NN_METRICS)) == cluster.getStoragesPerDatanode();
       }
     }, 10, 10000);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c22cf004/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index c84f8e8..f598d8c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -781,7 +781,7 @@ public class TestNameNodeMetrics {
     // We have one sync when the cluster starts up, just opening the journal
     assertCounter("SyncsNumOps", 3L, rb);
     // Each datanode reports in when the cluster comes up
-    assertCounter("BlockReportNumOps",
+    assertCounter("StorageBlockReportNumOps",
                   (long) DATANODE_COUNT * cluster.getStoragesPerDatanode(), rb);
     
     // Sleep for an interval+slop to let the percentiles rollover
@@ -789,7 +789,7 @@ public class TestNameNodeMetrics {
     
     // Check that the percentiles were updated
     assertQuantileGauges("Syncs1s", rb);
-    assertQuantileGauges("BlockReport1s", rb);
+    assertQuantileGauges("StorageBlockReport1s", rb);
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] hadoop git commit: HADOOP-14524. Make CryptoCodec Closeable so it can be cleaned up proactively.

Posted by st...@apache.org.
HADOOP-14524. Make CryptoCodec Closeable so it can be cleaned up proactively.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4ebc23ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4ebc23ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4ebc23ba

Branch: refs/heads/HADOOP-13345
Commit: 4ebc23ba7b16c7b9acf38b5a864682a6c8890690
Parents: 942ecbb
Author: Xiao Chen <xi...@apache.org>
Authored: Thu Jun 15 14:25:52 2017 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Fri Jun 16 09:49:33 2017 -0700

----------------------------------------------------------------------
 .../apache/hadoop/crypto/AesCtrCryptoCodec.java |  6 ++++
 .../org/apache/hadoop/crypto/CryptoCodec.java   |  3 +-
 .../apache/hadoop/crypto/CryptoInputStream.java |  1 +
 .../hadoop/crypto/CryptoOutputStream.java       |  1 +
 .../hadoop/crypto/OpensslAesCtrCryptoCodec.java | 13 +++++++-
 .../crypto/key/KeyProviderCryptoExtension.java  | 27 +++++++++++------
 ...yptoStreamsWithOpensslAesCtrCryptoCodec.java | 32 ++++++++++++++++++--
 .../datatransfer/sasl/DataTransferSaslUtil.java |  1 +
 .../apache/hadoop/mapreduce/CryptoUtils.java    | 14 +++++++--
 9 files changed, 82 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ebc23ba/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java
index 5e286b9..3e52560 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java
@@ -22,6 +22,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 
 import com.google.common.base.Preconditions;
 
+import java.io.IOException;
+
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public abstract class AesCtrCryptoCodec extends CryptoCodec {
@@ -61,4 +63,8 @@ public abstract class AesCtrCryptoCodec extends CryptoCodec {
       IV[i] = (byte) sum;
     }
   }
+
+  @Override
+  public void close() throws IOException {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ebc23ba/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java
index 493e23d..d9c16bb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.crypto;
 
+import java.io.Closeable;
 import java.security.GeneralSecurityException;
 import java.util.List;
 
@@ -42,7 +43,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-public abstract class CryptoCodec implements Configurable {
+public abstract class CryptoCodec implements Configurable, Closeable {
   public static Logger LOG = LoggerFactory.getLogger(CryptoCodec.class);
   
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ebc23ba/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
index b7ded92..0be6e34 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
@@ -315,6 +315,7 @@ public class CryptoInputStream extends FilterInputStream implements
     
     super.close();
     freeBuffers();
+    codec.close();
     closed = true;
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ebc23ba/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
index d2f146a..9fb0ff6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
@@ -239,6 +239,7 @@ public class CryptoOutputStream extends FilterOutputStream implements
       flush();
       if (closeOutputStream) {
         super.close();
+        codec.close();
       }
       freeBuffers();
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ebc23ba/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java
index d0a12e9..d08e588 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.crypto;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY;
 
+import java.io.Closeable;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.security.GeneralSecurityException;
@@ -89,7 +90,17 @@ public class OpensslAesCtrCryptoCodec extends AesCtrCryptoCodec {
   public void generateSecureRandom(byte[] bytes) {
     random.nextBytes(bytes);
   }
-  
+
+  @Override
+  public void close() throws IOException {
+    try {
+      Closeable r = (Closeable) this.random;
+      r.close();
+    } catch (ClassCastException e) {
+    }
+    super.close();
+  }
+
   private static class OpensslAesCtrCipher implements Encryptor, Decryptor {
     private final OpensslCipher cipher;
     private final int mode;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ebc23ba/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
index 9ae98b4..ea5ff28 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
@@ -274,12 +274,16 @@ public class KeyProviderCryptoExtension extends
       // Generate random bytes for new key and IV
 
       CryptoCodec cc = CryptoCodec.getInstance(keyProvider.getConf());
-      final byte[] newKey = new byte[encryptionKey.getMaterial().length];
-      cc.generateSecureRandom(newKey);
-      final byte[] iv = new byte[cc.getCipherSuite().getAlgorithmBlockSize()];
-      cc.generateSecureRandom(iv);
-      Encryptor encryptor = cc.createEncryptor();
-      return generateEncryptedKey(encryptor, encryptionKey, newKey, iv);
+      try {
+        final byte[] newKey = new byte[encryptionKey.getMaterial().length];
+        cc.generateSecureRandom(newKey);
+        final byte[] iv = new byte[cc.getCipherSuite().getAlgorithmBlockSize()];
+        cc.generateSecureRandom(iv);
+        Encryptor encryptor = cc.createEncryptor();
+        return generateEncryptedKey(encryptor, encryptionKey, newKey, iv);
+      } finally {
+        cc.close();
+      }
     }
 
     private EncryptedKeyVersion generateEncryptedKey(final Encryptor encryptor,
@@ -322,9 +326,13 @@ public class KeyProviderCryptoExtension extends
 
       final KeyVersion dek = decryptEncryptedKey(ekv);
       final CryptoCodec cc = CryptoCodec.getInstance(keyProvider.getConf());
-      final Encryptor encryptor = cc.createEncryptor();
-      return generateEncryptedKey(encryptor, ekNow, dek.getMaterial(),
-          ekv.getEncryptedKeyIv());
+      try {
+        final Encryptor encryptor = cc.createEncryptor();
+        return generateEncryptedKey(encryptor, ekNow, dek.getMaterial(),
+            ekv.getEncryptedKeyIv());
+      } finally {
+        cc.close();
+      }
     }
 
     @Override
@@ -364,6 +372,7 @@ public class KeyProviderCryptoExtension extends
       bbOut.flip();
       byte[] decryptedKey = new byte[keyLen];
       bbOut.get(decryptedKey);
+      cc.close();
       return new KeyVersion(encryptionKey.getName(), EK, decryptedKey);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ebc23ba/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java
index cc02f48..241e876 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java
@@ -18,12 +18,17 @@
 package org.apache.hadoop.crypto;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.random.OsSecureRandom;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_AES_CTR_NOPADDING_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 
 public class TestCryptoStreamsWithOpensslAesCtrCryptoCodec 
     extends TestCryptoStreams {
@@ -32,8 +37,7 @@ public class TestCryptoStreamsWithOpensslAesCtrCryptoCodec
   public static void init() throws Exception {
     GenericTestUtils.assumeInNativeProfile();
     Configuration conf = new Configuration();
-    conf.set(
-        CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_AES_CTR_NOPADDING_KEY,
+    conf.set(HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_AES_CTR_NOPADDING_KEY,
         OpensslAesCtrCryptoCodec.class.getName());
     codec = CryptoCodec.getInstance(conf);
     assertNotNull("Unable to instantiate codec " +
@@ -42,4 +46,28 @@ public class TestCryptoStreamsWithOpensslAesCtrCryptoCodec
     assertEquals(OpensslAesCtrCryptoCodec.class.getCanonicalName(),
         codec.getClass().getCanonicalName());
   }
+
+  @Test
+  public void testCodecClosesRandom() throws Exception {
+    GenericTestUtils.assumeInNativeProfile();
+    Configuration conf = new Configuration();
+    conf.set(HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_AES_CTR_NOPADDING_KEY,
+        OpensslAesCtrCryptoCodec.class.getName());
+    conf.set(
+        CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY,
+        OsSecureRandom.class.getName());
+    CryptoCodec codecWithRandom = CryptoCodec.getInstance(conf);
+    assertNotNull(
+        "Unable to instantiate codec " + OpensslAesCtrCryptoCodec.class
+            .getName() + ", is the required " + "version of OpenSSL installed?",
+        codecWithRandom);
+    OsSecureRandom random =
+        (OsSecureRandom) Whitebox.getInternalState(codecWithRandom, "random");
+    // trigger the OsSecureRandom to create an internal FileInputStream
+    random.nextBytes(new byte[10]);
+    assertNotNull(Whitebox.getInternalState(random, "stream"));
+    // verify closing the codec closes the codec's random's stream.
+    codecWithRandom.close();
+    assertNull(Whitebox.getInternalState(random, "stream"));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ebc23ba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
index 5e07550..f4651eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
@@ -286,6 +286,7 @@ public final class DataTransferSaslUtil {
           codec.generateSecureRandom(inIv);
           codec.generateSecureRandom(outKey);
           codec.generateSecureRandom(outIv);
+          codec.close();
           return new CipherOption(suite, inKey, inIv, outKey, outIv);
         }
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ebc23ba/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CryptoUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CryptoUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CryptoUtils.java
index c05b6b0..00119cd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CryptoUtils.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CryptoUtils.java
@@ -66,16 +66,24 @@ public class CryptoUtils {
     if (isEncryptedSpillEnabled(conf)) {
       byte[] iv = new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()];
       cryptoCodec.generateSecureRandom(iv);
+      cryptoCodec.close();
       return iv;
     } else {
       return null;
     }
   }
 
-  public static int cryptoPadding(Configuration conf) {
+  public static int cryptoPadding(Configuration conf) throws IOException {
     // Sizeof(IV) + long(start-offset)
-    return isEncryptedSpillEnabled(conf) ? CryptoCodec.getInstance(conf)
-        .getCipherSuite().getAlgorithmBlockSize() + 8 : 0;
+    if (!isEncryptedSpillEnabled(conf)) {
+      return 0;
+    }
+    final CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf);
+    try {
+      return cryptoCodec.getCipherSuite().getAlgorithmBlockSize() + 8;
+    } finally {
+      cryptoCodec.close();
+    }
   }
 
   private static byte[] getEncryptionKey() throws IOException {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] hadoop git commit: HADOOP-14549. Use GenericTestUtils.setLogLevel when available in hadoop-tools. Contributed by wenxin he.

Posted by st...@apache.org.
HADOOP-14549. Use GenericTestUtils.setLogLevel when available in hadoop-tools. Contributed by wenxin he.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a594f96f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a594f96f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a594f96f

Branch: refs/heads/HADOOP-13345
Commit: a594f96fb1155d94fe08ed8f85dca789a9a4c790
Parents: 0111711
Author: Akira Ajisaka <aa...@apache.org>
Authored: Mon Jun 26 16:41:00 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon Jun 26 16:41:00 2017 +0900

----------------------------------------------------------------------
 .../org/apache/hadoop/test/GenericTestUtils.java |  5 +++++
 .../apache/hadoop/tools/TestHadoopArchives.java  | 12 ++++++------
 .../java/org/apache/hadoop/tools/TestDistCh.java | 12 ++++++------
 .../mapred/gridmix/TestGridmixSubmission.java    | 10 +++++-----
 .../hadoop/mapred/gridmix/TestLoadJob.java       | 19 ++++++++++---------
 .../hadoop/mapred/gridmix/TestSleepJob.java      | 14 +++++++-------
 6 files changed, 39 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a594f96f/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index fcc4e3c..00dc7f2 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -134,6 +134,11 @@ public abstract class GenericTestUtils {
     setLogLevel(toLog4j(logger), level);
   }
 
+  public static void setLogLevel(org.slf4j.Logger logger,
+                                 org.slf4j.event.Level level) {
+    setLogLevel(toLog4j(logger), Level.toLevel(level.toString()));
+  }
+
   /**
    * Extracts the name of the method where the invocation has happened
    * @return String name of the invoking method

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a594f96f/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
index e9ecf04..c63bc42 100644
--- a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
+++ b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
@@ -28,8 +28,6 @@ import java.util.Collections;
 import java.util.List;
 import java.util.StringTokenizer;
 
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -43,15 +41,18 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.JarFinder;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
-import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import static org.junit.Assert.*;
+import static org.slf4j.LoggerFactory.getLogger;
+
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.event.Level;
 
 /**
  * test {@link HadoopArchives}
@@ -62,9 +63,8 @@ public class TestHadoopArchives {
       .getJar(HadoopArchives.class);
 
   {
-    ((Log4JLogger) LogFactory.getLog(org.apache.hadoop.security.Groups.class))
-        .getLogger().setLevel(Level.ERROR);
-
+    GenericTestUtils.setLogLevel(
+        getLogger(org.apache.hadoop.security.Groups.class), Level.ERROR);
   }
 
   private static final String inputDir = "input";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a594f96f/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestDistCh.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestDistCh.java b/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestDistCh.java
index 510d9ac..3704c5b 100644
--- a/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestDistCh.java
+++ b/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestDistCh.java
@@ -24,8 +24,6 @@ import java.io.PrintStream;
 import java.util.Arrays;
 import java.util.Random;
 
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -40,14 +38,16 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.mapred.MiniMRClientClusterFactory;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
-import org.apache.log4j.Level;
+import org.slf4j.event.Level;
+
+import static org.slf4j.LoggerFactory.getLogger;
 
 public class TestDistCh extends junit.framework.TestCase {
   {
-    ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.StateChange")
-        ).getLogger().setLevel(Level.ERROR);
+    GenericTestUtils.setLogLevel(
+        getLogger("org.apache.hadoop.hdfs.StateChange"), Level.ERROR);
     GenericTestUtils.setLogLevel(DataNode.LOG, Level.ERROR);
-    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ERROR);
+    GenericTestUtils.setLogLevel(getLogger(FSNamesystem.class), Level.ERROR);
   }
 
   static final Long RANDOM_NUMBER_GENERATOR_SEED = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a594f96f/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java
index d4bfddc..f849843 100644
--- a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java
+++ b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestGridmixSubmission.java
@@ -17,19 +17,18 @@
  */
 package org.apache.hadoop.mapred.gridmix;
 
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.tools.rumen.JobStory;
 import org.apache.hadoop.tools.rumen.JobStoryProducer;
 import org.apache.hadoop.util.ExitUtil;
-import org.apache.log4j.Level;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.event.Level;
 
 
 import java.io.ByteArrayOutputStream;
@@ -41,6 +40,7 @@ import java.util.zip.GZIPInputStream;
 
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.junit.Assert.*;
+import static org.slf4j.LoggerFactory.getLogger;
 
 public class TestGridmixSubmission extends CommonJobTest {
   private static File inSpace = new File("src" + File.separator + "test"
@@ -48,8 +48,8 @@ public class TestGridmixSubmission extends CommonJobTest {
 
 
   static {
-    ((Log4JLogger) LogFactory.getLog("org.apache.hadoop.mapred.gridmix"))
-            .getLogger().setLevel(Level.DEBUG);
+    GenericTestUtils.setLogLevel(
+        getLogger("org.apache.hadoop.mapred.gridmix"), Level.DEBUG);
   }
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a594f96f/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestLoadJob.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestLoadJob.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestLoadJob.java
index 69c3a79..3ed3d4e 100644
--- a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestLoadJob.java
+++ b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestLoadJob.java
@@ -17,28 +17,29 @@
  */
 package org.apache.hadoop.mapred.gridmix;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.log4j.Level;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.event.Level;
 
 import java.io.IOException;
 
+import static org.slf4j.LoggerFactory.getLogger;
+
 /*
  Test LoadJob Gridmix sends data to job and after that
  */
 public class TestLoadJob extends CommonJobTest {
 
-  public static final Log LOG = LogFactory.getLog(Gridmix.class);
+  public static final Logger LOG = getLogger(Gridmix.class);
 
   static {
-    ((Log4JLogger) LogFactory.getLog("org.apache.hadoop.mapred.gridmix"))
-            .getLogger().setLevel(Level.DEBUG);
-    ((Log4JLogger) LogFactory.getLog(StressJobFactory.class)).getLogger()
-            .setLevel(Level.DEBUG);
+    GenericTestUtils.setLogLevel(
+        getLogger("org.apache.hadoop.mapred.gridmix"), Level.DEBUG);
+    GenericTestUtils.setLogLevel(
+        getLogger(StressJobFactory.class), Level.DEBUG);
   }
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a594f96f/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestSleepJob.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestSleepJob.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestSleepJob.java
index 580910c..5a648de 100644
--- a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestSleepJob.java
+++ b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestSleepJob.java
@@ -17,32 +17,32 @@
  */
 package org.apache.hadoop.mapred.gridmix;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.tools.rumen.JobStory;
-import org.apache.log4j.Level;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.event.Level;
 
 import java.io.IOException;
 import java.util.List;
 
 import static org.junit.Assert.*;
+import static org.slf4j.LoggerFactory.getLogger;
 
 public class TestSleepJob extends CommonJobTest {
 
-  public static final Log LOG = LogFactory.getLog(Gridmix.class);
+  public static final Logger LOG = getLogger(Gridmix.class);
 
   static {
-    ((Log4JLogger) LogFactory.getLog("org.apache.hadoop.mapred.gridmix"))
-            .getLogger().setLevel(Level.DEBUG);
+    GenericTestUtils.setLogLevel(
+        getLogger("org.apache.hadoop.mapred.gridmix"), Level.DEBUG);
   }
 
   static GridmixJobSubmissionPolicy policy = GridmixJobSubmissionPolicy.REPLAY;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] hadoop git commit: HDFS-11995. HDFS Architecture documentation incorrectly describes writing to a local temporary file. Contributed by Nandakumar.

Posted by st...@apache.org.
HDFS-11995. HDFS Architecture documentation incorrectly describes writing to a local temporary file. Contributed by Nandakumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d954a647
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d954a647
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d954a647

Branch: refs/heads/HADOOP-13345
Commit: d954a64730c00346476322743462cde857164177
Parents: 73fb750
Author: Masatake Iwasaki <iw...@apache.org>
Authored: Tue Jun 20 08:07:42 2017 +0900
Committer: Masatake Iwasaki <iw...@apache.org>
Committed: Tue Jun 20 08:07:42 2017 +0900

----------------------------------------------------------------------
 .../hadoop-hdfs/src/site/markdown/HdfsDesign.md | 33 +++-----------------
 1 file changed, 4 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d954a647/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
index 4bf1897..76cd2bf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
@@ -201,38 +201,13 @@ A typical block size used by HDFS is 128 MB.
 Thus, an HDFS file is chopped up into 128 MB chunks, and if possible,
 each chunk will reside on a different DataNode.
 
-### Staging
-
-A client request to create a file does not reach the NameNode immediately.
-In fact, initially the HDFS client caches the file data into a local buffer.
-Application writes are transparently redirected to this local buffer.
-When the local file accumulates data worth over one chunk size, the client contacts the NameNode.
-The NameNode inserts the file name into the file system hierarchy and allocates a data block for it.
-The NameNode responds to the client request with the identity of the DataNode and the destination data block.
-Then the client flushes the chunk of data from the local buffer to the specified DataNode.
-When a file is closed, the remaining un-flushed data in the local buffer is transferred to the DataNode.
-The client then tells the NameNode that the file is closed. At this point,
-the NameNode commits the file creation operation into a persistent store.
-If the NameNode dies before the file is closed, the file is lost.
-
-The above approach has been adopted after careful consideration of target applications that run on HDFS.
-These applications need streaming writes to files.
-If a client writes to a remote file directly without any client side buffering,
-the network speed and the congestion in the network impacts throughput considerably.
-This approach is not without precedent.
-Earlier distributed file systems, e.g. AFS, have used client side caching to improve performance.
-A POSIX requirement has been relaxed to achieve higher performance of data uploads.
-
 ### Replication Pipelining
 
-When a client is writing data to an HDFS file,
-its data is first written to a local buffer as explained in the previous section.
-Suppose the HDFS file has a replication factor of three.
-When the local buffer accumulates a chunk of user data,
-the client retrieves a list of DataNodes from the NameNode.
+When a client is writing data to an HDFS file with a replication factor of three,
+the NameNode retrieves a list of DataNodes using a replication target choosing algorithm.
 This list contains the DataNodes that will host a replica of that block.
-The client then flushes the data chunk to the first DataNode.
-The first DataNode starts receiving the data in small portions,
+The client then writes to the first DataNode.
+The first DataNode starts receiving the data in portions,
 writes each portion to its local repository and transfers that portion to the second DataNode in the list.
 The second DataNode, in turn starts receiving each portion of the data block,
 writes that portion to its repository and then flushes that portion to the third DataNode.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] hadoop git commit: YARN-6680. Avoid locking overhead for NO_LABEL lookups. Contributed by Daryn Sharp

Posted by st...@apache.org.
YARN-6680. Avoid locking overhead for NO_LABEL lookups. Contributed by Daryn Sharp


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee89ac84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee89ac84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee89ac84

Branch: refs/heads/HADOOP-13345
Commit: ee89ac84e68d3e181b75c63f74a0444f9d28146f
Parents: e9c2aa1
Author: Naganarasimha <na...@apache.org>
Authored: Mon Jun 19 22:42:45 2017 +0530
Committer: Naganarasimha <na...@apache.org>
Committed: Mon Jun 19 22:42:45 2017 +0530

----------------------------------------------------------------------
 .../yarn/nodelabels/CommonNodeLabelsManager.java      |  8 +++++++-
 .../nodelabels/RMNodeLabelsManager.java               |  8 ++++++--
 .../resourcemanager/scheduler/ResourceUsage.java      | 14 +++++++-------
 .../server/resourcemanager/webapp/TestRMWebApp.java   |  8 ++++++--
 4 files changed, 26 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee89ac84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
index 60ade2d..66e945f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
@@ -99,6 +99,8 @@ public class CommonNodeLabelsManager extends AbstractService {
   protected ConcurrentMap<String, Host> nodeCollections =
       new ConcurrentHashMap<String, Host>();
 
+  protected RMNodeLabel noNodeLabel;
+
   protected final ReadLock readLock;
   protected final WriteLock writeLock;
 
@@ -225,7 +227,8 @@ public class CommonNodeLabelsManager extends AbstractService {
     isCentralizedNodeLabelConfiguration  =
         YarnConfiguration.isCentralizedNodeLabelConfiguration(conf);
 
-    labelCollections.put(NO_LABEL, new RMNodeLabel(NO_LABEL));
+    noNodeLabel = new RMNodeLabel(NO_LABEL);
+    labelCollections.put(NO_LABEL, noNodeLabel);
   }
 
   /**
@@ -947,6 +950,9 @@ public class CommonNodeLabelsManager extends AbstractService {
   }
 
   public boolean isExclusiveNodeLabel(String nodeLabel) throws IOException {
+    if (nodeLabel.equals(NO_LABEL)) {
+      return noNodeLabel.getIsExclusive();
+    }
     try {
       readLock.lock();
       RMNodeLabel label = labelCollections.get(nodeLabel);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee89ac84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
index 79b25ed..507f696 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java
@@ -519,12 +519,16 @@ public class RMNodeLabelsManager extends CommonNodeLabelsManager {
   
   public Resource getResourceByLabel(String label, Resource clusterResource) {
     label = normalizeLabel(label);
+    if (label.equals(NO_LABEL)) {
+      return noNodeLabel.getResource();
+    }
     try {
       readLock.lock();
-      if (null == labelCollections.get(label)) {
+      RMNodeLabel nodeLabel = labelCollections.get(label);
+      if (nodeLabel == null) {
         return Resources.none();
       }
-      return labelCollections.get(label).getResource();
+      return nodeLabel.getResource();
     } finally {
       readLock.unlock();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee89ac84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
index 2857379..6f0c7d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceUsage.java
@@ -28,7 +28,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
 
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
-import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
 /**
@@ -46,6 +45,7 @@ public class ResourceUsage {
   private Map<String, UsageByLabel> usages;
   // short for no-label :)
   private static final String NL = CommonNodeLabelsManager.NO_LABEL;
+  private final UsageByLabel usageNoLabel;
 
   public ResourceUsage() {
     ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
@@ -53,7 +53,8 @@ public class ResourceUsage {
     writeLock = lock.writeLock();
 
     usages = new HashMap<String, UsageByLabel>();
-    usages.put(NL, new UsageByLabel(NL));
+    usageNoLabel = new UsageByLabel(NL);
+    usages.put(NL, usageNoLabel);
   }
 
   // Usage enum here to make implement cleaner
@@ -323,10 +324,9 @@ public class ResourceUsage {
   }
 
   private Resource _get(String label, ResourceType type) {
-    if (label == null) {
-      label = RMNodeLabelsManager.NO_LABEL;
+    if (label == null || label.equals(NL)) {
+      return normalize(usageNoLabel.resArr[type.idx]);
     }
-    
     try {
       readLock.lock();
       UsageByLabel usage = usages.get(label);
@@ -362,8 +362,8 @@ public class ResourceUsage {
   }
 
   private UsageByLabel getAndAddIfMissing(String label) {
-    if (label == null) {
-      label = RMNodeLabelsManager.NO_LABEL;
+    if (label == null || label.equals(NL)) {
+      return usageNoLabel;
     }
     if (!usages.containsKey(label)) {
       UsageByLabel u = new UsageByLabel(label);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee89ac84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
index 03fc206..8a65040 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
@@ -52,6 +52,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.MockAsm;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
@@ -229,12 +230,15 @@ public class TestRMWebApp {
     setupQueueConfiguration(conf);
 
     CapacityScheduler cs = new CapacityScheduler();
-    cs.setConf(new YarnConfiguration());
+    YarnConfiguration yarnConf = new YarnConfiguration();
+    cs.setConf(yarnConf);
     RMContext rmContext = new RMContextImpl(null, null, null, null, null,
         null, new RMContainerTokenSecretManager(conf),
         new NMTokenSecretManagerInRM(conf),
         new ClientToAMTokenSecretManagerInRM(), null);
-    rmContext.setNodeLabelManager(new NullRMNodeLabelsManager());
+    RMNodeLabelsManager labelManager = new NullRMNodeLabelsManager();
+    labelManager.init(yarnConf);
+    rmContext.setNodeLabelManager(labelManager);
     cs.setRMContext(rmContext);
     cs.init(conf);
     return cs;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] hadoop git commit: HDFS-11890. Handle NPE in BlockRecoveryWorker when DN is getting shoutdown. Contributed by Surendra Singh Lilhore.

Posted by st...@apache.org.
HDFS-11890. Handle NPE in BlockRecoveryWorker when DN is getting shoutdown. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/099cbb42
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/099cbb42
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/099cbb42

Branch: refs/heads/HADOOP-13345
Commit: 099cbb427ad535c3369d9ac3fda6463502fc1c54
Parents: d954a64
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Tue Jun 20 11:03:56 2017 +0800
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Tue Jun 20 11:03:56 2017 +0800

----------------------------------------------------------------------
 .../hdfs/server/datanode/BlockRecoveryWorker.java     | 14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/099cbb42/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
index 792b6af..15c4af1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
@@ -127,8 +127,7 @@ public class BlockRecoveryWorker {
       // - Original state is RWR or better
       for(DatanodeID id : locs) {
         try {
-          DatanodeID bpReg = new DatanodeID(
-              datanode.getBPOfferService(bpid).bpRegistration);
+          DatanodeID bpReg = getDatanodeID(bpid);
           InterDatanodeProtocol proxyDN = bpReg.equals(id)?
               datanode: DataNode.createInterDataNodeProtocolProxy(id, conf,
               dnConf.socketTimeout, dnConf.connectToDnViaHostname);
@@ -398,8 +397,7 @@ public class BlockRecoveryWorker {
       for (int i = 0; i < locs.length; i++) {
         DatanodeID id = locs[i];
         try {
-          DatanodeID bpReg = new DatanodeID(
-              datanode.getBPOfferService(bpid).bpRegistration);
+          DatanodeID bpReg = getDatanodeID(bpid);
           InterDatanodeProtocol proxyDN = bpReg.equals(id) ?
               datanode : DataNode.createInterDataNodeProtocolProxy(id, conf,
               dnConf.socketTimeout, dnConf.connectToDnViaHostname);
@@ -532,6 +530,14 @@ public class BlockRecoveryWorker {
     }
   }
 
+  private DatanodeID getDatanodeID(String bpid) throws IOException {
+    BPOfferService bpos = datanode.getBPOfferService(bpid);
+    if (bpos == null) {
+      throw new IOException("No block pool offer service for bpid=" + bpid);
+    }
+    return new DatanodeID(bpos.bpRegistration);
+  }
+
   private static void logRecoverBlock(String who, RecoveringBlock rb) {
     ExtendedBlock block = rb.getBlock();
     DatanodeInfo[] targets = rb.getLocations();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] hadoop git commit: HDFS-11933. Arguments check for ErasureCodingPolicy->composePolicyName. Contributed by Lu Fei

Posted by st...@apache.org.
HDFS-11933. Arguments check for ErasureCodingPolicy->composePolicyName. Contributed by Lu Fei


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a010b330
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a010b330
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a010b330

Branch: refs/heads/HADOOP-13345
Commit: a010b330e7797d2ecb5b01ec69ea40438579602f
Parents: 1a59847
Author: Kai Zheng <ka...@intel.com>
Authored: Wed Jun 21 13:46:18 2017 +0800
Committer: Kai Zheng <ka...@intel.com>
Committed: Wed Jun 21 13:46:18 2017 +0800

----------------------------------------------------------------------
 .../hdfs/protocol/ErasureCodingPolicy.java      |  2 ++
 .../hdfs/protocol/TestErasureCodingPolicy.java  | 22 ++++++++++++++++++++
 2 files changed, 24 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a010b330/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
index b63d2c0..368a2f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
@@ -58,6 +58,8 @@ public final class ErasureCodingPolicy {
   }
 
   public static String composePolicyName(ECSchema schema, int cellSize) {
+    Preconditions.checkNotNull(schema);
+    Preconditions.checkArgument(cellSize > 0, "cellSize must be positive");
     Preconditions.checkArgument(cellSize % 1024 == 0,
         "cellSize must be 1024 aligned");
     return schema.getCodecName().toUpperCase() + "-" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a010b330/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestErasureCodingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestErasureCodingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestErasureCodingPolicy.java
index 17fb01c..f1674af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestErasureCodingPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestErasureCodingPolicy.java
@@ -51,6 +51,28 @@ public class TestErasureCodingPolicy {
     } catch (IllegalArgumentException e) {
       GenericTestUtils.assertExceptionContains("cellSize", e);
     }
+    try {
+      new ErasureCodingPolicy(null, 1024, (byte) -1);
+      fail("Instantiated invalid ErasureCodingPolicy");
+    } catch (NullPointerException e) {
+    }
+    try {
+      new ErasureCodingPolicy(SCHEMA_1, -1, (byte) -1);
+      fail("Instantiated invalid ErasureCodingPolicy");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains("cellSize", e);
+    }
+    try {
+      new ErasureCodingPolicy(null, 1024);
+      fail("Instantiated invalid ErasureCodingPolicy");
+    } catch (NullPointerException e) {
+    }
+    try {
+      new ErasureCodingPolicy(SCHEMA_1, -1);
+      fail("Instantiated invalid ErasureCodingPolicy");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains("cellSize", e);
+    }
   }
 
   @Test


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] hadoop git commit: MAPREDUCE-6897. Add Unit Test to ensure Job end notification gets sent even when appMaster stop gets YarnRuntimeException. Contributed by Gergely Novák

Posted by st...@apache.org.
MAPREDUCE-6897. Add Unit Test to ensure Job end notification gets sent even when appMaster stop gets YarnRuntimeException. Contributed by Gergely Novák


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82bbcbf3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82bbcbf3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82bbcbf3

Branch: refs/heads/HADOOP-13345
Commit: 82bbcbf37f8137112a6270932b2ad7572785c387
Parents: 9ef2743
Author: Ravi Prakash <ra...@apache.org>
Authored: Fri Jun 16 14:25:12 2017 -0700
Committer: Ravi Prakash <ra...@apache.org>
Committed: Fri Jun 16 14:25:44 2017 -0700

----------------------------------------------------------------------
 .../mapreduce/v2/app/TestJobEndNotifier.java    | 25 +++++++++++++++++---
 1 file changed, 22 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82bbcbf3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java
index ecfa43c..d122a9b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.mapreduce.v2.app;
 
 import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
@@ -30,6 +31,7 @@ import java.io.PrintStream;
 import java.net.Proxy;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.nio.channels.ClosedChannelException;
 
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
@@ -53,6 +55,7 @@ import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator;
 import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -197,8 +200,8 @@ public class TestJobEndNotifier extends JobEndNotifier {
 
   }
 
-  @Test
-  public void testNotificationOnLastRetryNormalShutdown() throws Exception {
+  private void testNotificationOnLastRetry(boolean withRuntimeException)
+      throws Exception {
     HttpServer2 server = startHttpServer();
     // Act like it is the second attempt. Default max attempts is 2
     MRApp app = spy(new MRAppWithCustomContainerAllocator(
@@ -210,17 +213,33 @@ public class TestJobEndNotifier extends JobEndNotifier {
     JobImpl job = (JobImpl)app.submit(conf);
     app.waitForInternalState(job, JobStateInternal.SUCCEEDED);
     // Unregistration succeeds: successfullyUnregistered is set
+    if (withRuntimeException) {
+      YarnRuntimeException runtimeException = new YarnRuntimeException(
+          new ClosedChannelException());
+      doThrow(runtimeException).when(app).stop();
+    }
     app.shutDownJob();
     Assert.assertTrue(app.isLastAMRetry());
     Assert.assertEquals(1, JobEndServlet.calledTimes);
     Assert.assertEquals("jobid=" + job.getID() + "&status=SUCCEEDED",
         JobEndServlet.requestUri.getQuery());
     Assert.assertEquals(JobState.SUCCEEDED.toString(),
-      JobEndServlet.foundJobState);
+        JobEndServlet.foundJobState);
     server.stop();
   }
 
   @Test
+  public void testNotificationOnLastRetryNormalShutdown() throws Exception {
+    testNotificationOnLastRetry(false);
+  }
+
+  @Test
+  public void testNotificationOnLastRetryShutdownWithRuntimeException()
+      throws Exception {
+    testNotificationOnLastRetry(true);
+  }
+
+  @Test
   public void testAbsentNotificationOnNotLastRetryUnregistrationFailure()
       throws Exception {
     HttpServer2 server = startHttpServer();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] hadoop git commit: HDFS-12009. Accept human-friendly units in dfsadmin -setBalancerBandwidth and -setQuota. Contributed by Andrew Wang.

Posted by st...@apache.org.
HDFS-12009. Accept human-friendly units in dfsadmin -setBalancerBandwidth and -setQuota. Contributed by Andrew Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68dc7c24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68dc7c24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68dc7c24

Branch: refs/heads/HADOOP-13345
Commit: 68dc7c24050951f4709f7fe5363e11a003cf53eb
Parents: 740204b
Author: Xiao Chen <xi...@apache.org>
Authored: Thu Jun 22 19:56:43 2017 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Thu Jun 22 19:57:54 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  5 +--
 .../java/org/apache/hadoop/hdfs/TestQuota.java  | 13 +++++--
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 36 ++++++++++++++++++++
 3 files changed, 50 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68dc7c24/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 70509d4..b771ff0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -195,7 +195,8 @@ public class DFSAdmin extends FsShell {
       super(conf);
       CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE);
       List<String> parameters = c.parse(args, pos);
-      this.quota = Long.parseLong(parameters.remove(0));
+      this.quota =
+          StringUtils.TraditionalBinaryPrefix.string2long(parameters.remove(0));
       this.args = parameters.toArray(new String[parameters.size()]);
     }
     
@@ -938,7 +939,7 @@ public class DFSAdmin extends FsShell {
     int exitCode = -1;
 
     try {
-      bandwidth = Long.parseLong(argv[idx]);
+      bandwidth = StringUtils.TraditionalBinaryPrefix.string2long(argv[idx]);
     } catch (NumberFormatException nfe) {
       System.err.println("NumberFormatException: " + nfe.getMessage());
       System.err.println("Usage: hdfs dfsadmin"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68dc7c24/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index 4bfb62c..dd1d538 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -184,11 +184,20 @@ public class TestQuota {
     final short replication = 5;
     final long spaceQuota = fileLen * replication * 15 / 8;
 
-    // 1: create a directory test and set its quota to be 3
+    // 1: create a test directory
     final Path parent = new Path(dir, "test");
     assertTrue(dfs.mkdirs(parent));
-    String[] args = new String[]{"-setQuota", "3", parent.toString()};
+
+    // Try setting name quota with suffixes
+    String[] args;
+    args = new String[]{"-setQuota", "3K", parent.toString()};
+    runCommand(admin, args, false);
+    args = new String[]{"-setQuota", "3m", parent.toString()};
     runCommand(admin, args, false);
+    // Set the final name quota to 3
+    args = new String[]{"-setQuota", "3", parent.toString()};
+    runCommand(admin, args, false);
+
 
     //try setting space quota with a 'binary prefix'
     runCommand(admin, false, "-setSpaceQuota", "2t", parent.toString());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68dc7c24/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 2ef45e7..ed43dc0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -696,4 +696,40 @@ public class TestDFSAdmin {
         client.getDatanodeStorageReport(DatanodeReportType.DEAD).length);
     assertEquals(numCorruptBlocks, client.getCorruptBlocksCount());
   }
+
+  @Test
+  public void testSetBalancerBandwidth() throws Exception {
+    redirectStream();
+
+    final DFSAdmin dfsAdmin = new DFSAdmin(conf);
+    String outStr;
+
+    // Test basic case: 10000
+    assertEquals(0, ToolRunner.run(dfsAdmin,
+        new String[]{"-setBalancerBandwidth", "10000"}));
+    outStr = scanIntoString(out);
+    assertTrue("Did not set bandwidth!", outStr.contains("Balancer " +
+        "bandwidth is set to 10000"));
+
+    // Test parsing with units
+    resetStream();
+    assertEquals(0, ToolRunner.run(dfsAdmin,
+        new String[]{"-setBalancerBandwidth", "10m"}));
+    outStr = scanIntoString(out);
+    assertTrue("Did not set bandwidth!", outStr.contains("Balancer " +
+        "bandwidth is set to 10485760"));
+
+    resetStream();
+    assertEquals(0, ToolRunner.run(dfsAdmin,
+        new String[]{"-setBalancerBandwidth", "10k"}));
+    outStr = scanIntoString(out);
+    assertTrue("Did not set bandwidth!", outStr.contains("Balancer " +
+        "bandwidth is set to 10240"));
+
+    // Test negative numbers
+    assertEquals(-1, ToolRunner.run(dfsAdmin,
+        new String[]{"-setBalancerBandwidth", "-10000"}));
+    assertEquals(-1, ToolRunner.run(dfsAdmin,
+        new String[]{"-setBalancerBandwidth", "-10m"}));
+  }
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] hadoop git commit: HDFS-11978. Remove invalid '-usage' command of 'ec' and add missing commands 'addPolicies' 'listCodecs' in doc. Contributed by wenxin he.

Posted by st...@apache.org.
HDFS-11978. Remove invalid '-usage' command of 'ec' and add missing commands 'addPolicies' 'listCodecs' in doc. Contributed by wenxin he.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b654a49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b654a49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b654a49

Branch: refs/heads/HADOOP-13345
Commit: 2b654a493cb88798bc5572b868ee1ffb411a07cb
Parents: dd65eea
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Tue Jun 20 09:36:55 2017 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Tue Jun 20 09:36:55 2017 -0700

----------------------------------------------------------------------
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md               | 5 ++++-
 .../hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md          | 3 +--
 2 files changed, 5 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b654a49/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index be0f89e..38bdc40 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -444,7 +444,8 @@ Usage:
          [-getPolicy -path <path>]
          [-unsetPolicy -path <path>]
          [-listPolicies]
-         [-usage [cmd ...]]
+         [-addPolicies -policyFile <file>]
+         [-listCodecs]
          [-help [cmd ...]]
 
 | COMMAND\_OPTION | Description |
@@ -453,6 +454,8 @@ Usage:
 |-getPolicy| Get ErasureCoding policy information about a specified path|
 |-unsetPolicy| Unset an ErasureCoding policy set by a previous call to "setPolicy" on a directory |
 |-listPolicies| Lists all supported ErasureCoding policies|
+|-addPolicies| Add a list of erasure coding policies|
+|-listCodecs| Get the list of supported erasure coding codecs and coders in system|
 
 Runs the ErasureCoding CLI. See [HDFS ErasureCoding](./HDFSErasureCoding.html#Administrative_commands) for more information on this command.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b654a49/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index dcb56f8..51f54a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -156,7 +156,6 @@ Deployment
          [-listPolicies]
          [-addPolicies -policyFile <file>]
          [-listCodecs]
-         [-usage [cmd ...]]
          [-help [cmd ...]]
 
 Below are the details about each command.
@@ -187,4 +186,4 @@ Below are the details about each command.
 
  *  `[-listCodecs]`
 
-     Get the list of supported erasure coding codecs and coders in system. A coder is an implementation of a codec. A codec can have different implementations, thus different coders. The coders for a codec are listed in a fall back order.
\ No newline at end of file
+     Get the list of supported erasure coding codecs and coders in system. A coder is an implementation of a codec. A codec can have different implementations, thus different coders. The coders for a codec are listed in a fall back order.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] hadoop git commit: HADOOP-14429. FTPFileSystem#getFsAction always returns FsAction.NONE. (Hongyuan Li via Yongjun Zhang)

Posted by st...@apache.org.
HADOOP-14429. FTPFileSystem#getFsAction always returns FsAction.NONE. (Hongyuan Li via Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5157f6c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5157f6c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5157f6c4

Branch: refs/heads/HADOOP-13345
Commit: 5157f6c46ec342fb650b3c5853061ed1e4a182b6
Parents: 45ff4d3
Author: Yongjun Zhang <yz...@cloudera.com>
Authored: Tue Jun 20 16:51:13 2017 -0700
Committer: Yongjun Zhang <yz...@cloudera.com>
Committed: Tue Jun 20 16:51:13 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/fs/ftp/FTPFileSystem.java |  9 ++--
 .../apache/hadoop/fs/ftp/TestFTPFileSystem.java | 55 ++++++++++++++++++++
 2 files changed, 60 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5157f6c4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
index 6ce39c1..5f4c8552 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
@@ -415,16 +415,17 @@ public class FTPFileSystem extends FileSystem {
     return client.removeDirectory(pathName);
   }
 
-  private FsAction getFsAction(int accessGroup, FTPFile ftpFile) {
+  @VisibleForTesting
+  FsAction getFsAction(int accessGroup, FTPFile ftpFile) {
     FsAction action = FsAction.NONE;
     if (ftpFile.hasPermission(accessGroup, FTPFile.READ_PERMISSION)) {
-      action.or(FsAction.READ);
+      action = action.or(FsAction.READ);
     }
     if (ftpFile.hasPermission(accessGroup, FTPFile.WRITE_PERMISSION)) {
-      action.or(FsAction.WRITE);
+      action = action.or(FsAction.WRITE);
     }
     if (ftpFile.hasPermission(accessGroup, FTPFile.EXECUTE_PERMISSION)) {
-      action.or(FsAction.EXECUTE);
+      action = action.or(FsAction.EXECUTE);
     }
     return action;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5157f6c4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java
index 0604604..aab52ae 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java
@@ -17,14 +17,18 @@
  */
 package org.apache.hadoop.fs.ftp;
 
+import com.google.common.base.Preconditions;
 import org.apache.commons.net.ftp.FTP;
 
 import org.apache.commons.net.ftp.FTPClient;
+import org.apache.commons.net.ftp.FTPFile;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
 
+
 import static org.junit.Assert.assertEquals;
 
 /**
@@ -82,4 +86,55 @@ public class TestFTPFileSystem {
         client.getDataConnectionMode());
 
   }
+
+  @Test
+  public void testGetFsAction(){
+    FTPFileSystem ftp = new FTPFileSystem();
+    int[] accesses = new int[] {FTPFile.USER_ACCESS, FTPFile.GROUP_ACCESS,
+        FTPFile.WORLD_ACCESS};
+    FsAction[] actions = FsAction.values();
+    for(int i = 0; i < accesses.length; i++){
+      for(int j = 0; j < actions.length; j++){
+        enhancedAssertEquals(actions[j], ftp.getFsAction(accesses[i],
+            getFTPFileOf(accesses[i], actions[j])));
+      }
+    }
+  }
+
+  private void enhancedAssertEquals(FsAction actionA, FsAction actionB){
+    String notNullErrorMessage = "FsAction cannot be null here.";
+    Preconditions.checkNotNull(actionA, notNullErrorMessage);
+    Preconditions.checkNotNull(actionB, notNullErrorMessage);
+    String errorMessageFormat = "expect FsAction is %s, whereas it is %s now.";
+    String notEqualErrorMessage = String.format(errorMessageFormat,
+        actionA.name(), actionB.name());
+    assertEquals(notEqualErrorMessage, actionA, actionB);
+  }
+
+  private FTPFile getFTPFileOf(int access, FsAction action) {
+    boolean check = access == FTPFile.USER_ACCESS ||
+                      access == FTPFile.GROUP_ACCESS ||
+                      access == FTPFile.WORLD_ACCESS;
+    String errorFormat = "access must be in [%d,%d,%d], but it is %d now.";
+    String errorMessage = String.format(errorFormat, FTPFile.USER_ACCESS,
+         FTPFile.GROUP_ACCESS, FTPFile.WORLD_ACCESS, access);
+    Preconditions.checkArgument(check, errorMessage);
+    Preconditions.checkNotNull(action);
+    FTPFile ftpFile = new FTPFile();
+
+    if(action.implies(FsAction.READ)){
+      ftpFile.setPermission(access, FTPFile.READ_PERMISSION, true);
+    }
+
+    if(action.implies(FsAction.WRITE)){
+      ftpFile.setPermission(access, FTPFile.WRITE_PERMISSION, true);
+    }
+
+    if(action.implies(FsAction.EXECUTE)){
+      ftpFile.setPermission(access, FTPFile.EXECUTE_PERMISSION, true);
+    }
+
+    return ftpFile;
+  }
+
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] hadoop git commit: YARN-5876. TestResourceTrackerService#testGracefulDecommissionWithApp fails intermittently on trunk. (Robert Kanter via Yufei Gu)

Posted by st...@apache.org.
YARN-5876. TestResourceTrackerService#testGracefulDecommissionWithApp fails intermittently on trunk. (Robert Kanter via Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b772628
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b772628
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b772628

Branch: refs/heads/HADOOP-13345
Commit: 0b77262890d76b0a3a35fa64befc8a406bc70b27
Parents: abdea26
Author: Yufei Gu <yu...@apache.org>
Authored: Fri Jun 23 13:26:03 2017 -0700
Committer: Yufei Gu <yu...@apache.org>
Committed: Fri Jun 23 13:26:03 2017 -0700

----------------------------------------------------------------------
 .../yarn/server/resourcemanager/MockRM.java     | 27 +++++++++++++++-----
 1 file changed, 20 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b772628/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index 2ff4fb2..23009db 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -120,7 +120,7 @@ public class MockRM extends ResourceManager {
   private static final int SECOND = 1000;
   private static final int TIMEOUT_MS_FOR_ATTEMPT = 40 * SECOND;
   private static final int TIMEOUT_MS_FOR_APP_REMOVED = 40 * SECOND;
-  private static final int TIMEOUT_MS_FOR_CONTAINER_AND_NODE = 10 * SECOND;
+  private static final int TIMEOUT_MS_FOR_CONTAINER_AND_NODE = 20 * SECOND;
   private static final int WAIT_MS_PER_LOOP = 10;
 
   private final boolean useNullRMNodeLabelsManager;
@@ -853,9 +853,17 @@ public class MockRM extends ResourceManager {
     drainEventsImplicitly();
   }
 
+  private RMNode getRMNode(NodeId nodeId) {
+    RMNode node = getRMContext().getRMNodes().get(nodeId);
+    if (node == null) {
+      node = getRMContext().getInactiveRMNodes().get(nodeId);
+    }
+    return node;
+  }
+
   /**
    * Wait until a node has reached a specified state.
-   * The timeout is 10 seconds.
+   * The timeout is 20 seconds.
    * @param nodeId the id of a node
    * @param finalState the node state waited
    * @throws InterruptedException
@@ -864,12 +872,17 @@ public class MockRM extends ResourceManager {
   public void waitForState(NodeId nodeId, NodeState finalState)
       throws InterruptedException {
     drainEventsImplicitly();
-    RMNode node = getRMContext().getRMNodes().get(nodeId);
-    if (node == null) {
-      node = getRMContext().getInactiveRMNodes().get(nodeId);
-    }
-    Assert.assertNotNull("node shouldn't be null", node);
     int timeWaiting = 0;
+    RMNode node = getRMNode(nodeId);
+    while (node == null) {
+      if (timeWaiting >= TIMEOUT_MS_FOR_CONTAINER_AND_NODE) {
+        break;
+      }
+      node = getRMNode(nodeId);
+      Thread.sleep(WAIT_MS_PER_LOOP);
+      timeWaiting += WAIT_MS_PER_LOOP;
+    }
+    Assert.assertNotNull("node shouldn't be null (timedout)", node);
     while (!finalState.equals(node.getState())) {
       if (timeWaiting >= TIMEOUT_MS_FOR_CONTAINER_AND_NODE) {
         break;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] hadoop git commit: HADOOP-14296. Move logging APIs over to slf4j in hadoop-tools.

Posted by st...@apache.org.
HADOOP-14296. Move logging APIs over to slf4j in hadoop-tools.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33695406
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33695406
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33695406

Branch: refs/heads/HADOOP-13345
Commit: 3369540653a41dd0194b65f5ef1d53225fb97ba8
Parents: 099cbb4
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Jun 20 13:18:26 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Tue Jun 20 13:18:26 2017 +0900

----------------------------------------------------------------------
 .../TestFileSystemOperationsWithThreads.java    |  8 +--
 .../TestNativeAzureFileSystemClientLogging.java | 12 ++--
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   | 48 ++++++--------
 .../hadoop/yarn/sls/appmaster/AMSimulator.java  | 16 ++---
 .../yarn/sls/appmaster/MRAMSimulator.java       | 68 +++++++++-----------
 .../yarn/sls/nodemanager/NMSimulator.java       | 30 ++++-----
 .../yarn/sls/scheduler/SchedulerMetrics.java    |  8 ++-
 7 files changed, 88 insertions(+), 102 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33695406/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java
index 64316f6..343391f 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsWithThreads.java
@@ -27,18 +27,18 @@ import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.apache.log4j.Logger;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests the Native Azure file system (WASB) using parallel threads for rename and delete operations.
@@ -68,8 +68,8 @@ public class TestFileSystemOperationsWithThreads extends AbstractWasbTestBase {
     fs.initialize(uri, conf);
 
     // Capture logs
-    logs = LogCapturer.captureLogs(new Log4JLogger(Logger
-        .getRootLogger()));
+    logs = LogCapturer.captureLogs(
+        LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME));
   }
 
   /*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33695406/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
index c9cce75..caecfde 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
@@ -24,12 +24,12 @@ import static org.junit.Assert.assertTrue;
 import java.net.URI;
 import java.util.StringTokenizer;
 
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.apache.log4j.Logger;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Test to validate Azure storage client side logging. Tests works only when
@@ -97,8 +97,8 @@ public class TestNativeAzureFileSystemClientLogging
   @Test
   public void testLoggingEnabled() throws Exception {
 
-    LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
-        .getRootLogger()));
+    LogCapturer logs = LogCapturer.captureLogs(
+        LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME));
 
     // Update configuration based on the Test.
     updateFileSystemConfiguration(true);
@@ -111,8 +111,8 @@ public class TestNativeAzureFileSystemClientLogging
   @Test
   public void testLoggingDisabled() throws Exception {
 
-    LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
-        .getRootLogger()));
+    LogCapturer logs = LogCapturer.captureLogs(
+        LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME));
 
     // Update configuration based on the Test.
     updateFileSystemConfiguration(false);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33695406/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index a77d401..02da056 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -22,7 +22,6 @@ import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.Reader;
-import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -85,7 +84,8 @@ import org.apache.hadoop.yarn.sls.synthetic.SynthTraceJobProducer;
 import org.apache.hadoop.yarn.sls.utils.SLSUtils;
 import org.apache.hadoop.yarn.util.UTCClock;
 import org.apache.hadoop.yarn.util.resource.Resources;
-import org.apache.log4j.Logger;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @Private
 @Unstable
@@ -120,7 +120,7 @@ public class SLSRunner extends Configured implements Tool {
           new HashMap<String, Object>();
 
   // logger
-  public final static Logger LOG = Logger.getLogger(SLSRunner.class);
+  public final static Logger LOG = LoggerFactory.getLogger(SLSRunner.class);
 
   private final static int DEFAULT_MAPPER_PRIORITY = 20;
   private final static int DEFAULT_REDUCER_PRIORITY = 10;
@@ -322,14 +322,12 @@ public class SLSRunner extends Configured implements Tool {
       if (numRunningNodes == numNMs) {
         break;
       }
-      LOG.info(MessageFormat.format(
-          "SLSRunner is waiting for all "
-              + "nodes RUNNING. {0} of {1} NMs initialized.",
-          numRunningNodes, numNMs));
+      LOG.info("SLSRunner is waiting for all nodes RUNNING."
+          + " {} of {} NMs initialized.", numRunningNodes, numNMs);
       Thread.sleep(1000);
     }
-    LOG.info(MessageFormat.format("SLSRunner takes {0} ms to launch all nodes.",
-        (System.currentTimeMillis() - startTimeMS)));
+    LOG.info("SLSRunner takes {} ms to launch all nodes.",
+        System.currentTimeMillis() - startTimeMS);
   }
 
   @SuppressWarnings("unchecked")
@@ -375,7 +373,7 @@ public class SLSRunner extends Configured implements Tool {
         try {
           createAMForJob(jobIter.next());
         } catch (Exception e) {
-          LOG.error("Failed to create an AM: " + e.getMessage());
+          LOG.error("Failed to create an AM: {}", e.getMessage());
         }
       }
     }
@@ -497,7 +495,7 @@ public class SLSRunner extends Configured implements Tool {
         try {
           createAMForJob(job, baselineTimeMS);
         } catch (Exception e) {
-          LOG.error("Failed to create an AM: " + e.getMessage());
+          LOG.error("Failed to create an AM: {}", e.getMessage());
         }
 
         job = reader.getNext();
@@ -519,7 +517,7 @@ public class SLSRunner extends Configured implements Tool {
     jobStartTimeMS -= baselineTimeMs;
     jobFinishTimeMS -= baselineTimeMs;
     if (jobStartTimeMS < 0) {
-      LOG.warn("Warning: reset job " + oldJobId + " start time to 0.");
+      LOG.warn("Warning: reset job {} start time to 0.", oldJobId);
       jobFinishTimeMS = jobFinishTimeMS - jobStartTimeMS;
       jobStartTimeMS = 0;
     }
@@ -610,7 +608,7 @@ public class SLSRunner extends Configured implements Tool {
         jobStartTimeMS -= baselineTimeMS;
         jobFinishTimeMS -= baselineTimeMS;
         if (jobStartTimeMS < 0) {
-          LOG.warn("Warning: reset job " + oldJobId + " start time to 0.");
+          LOG.warn("Warning: reset job {} start time to 0.", oldJobId);
           jobFinishTimeMS = jobFinishTimeMS - jobStartTimeMS;
           jobStartTimeMS = 0;
         }
@@ -726,16 +724,14 @@ public class SLSRunner extends Configured implements Tool {
     if (printSimulation) {
       // node
       LOG.info("------------------------------------");
-      LOG.info(MessageFormat.format(
-          "# nodes = {0}, # racks = {1}, capacity "
-              + "of each node {2} MB memory and {3} vcores.",
-          numNMs, numRacks, nmMemoryMB, nmVCores));
+      LOG.info("# nodes = {}, # racks = {}, capacity " +
+              "of each node {} MB memory and {} vcores.",
+              numNMs, numRacks, nmMemoryMB, nmVCores);
       LOG.info("------------------------------------");
       // job
-      LOG.info(MessageFormat.format(
-          "# applications = {0}, # total "
-              + "tasks = {1}, average # tasks per application = {2}",
-          numAMs, numTasks, (int) (Math.ceil((numTasks + 0.0) / numAMs))));
+      LOG.info("# applications = {}, # total " +
+              "tasks = {}, average # tasks per application = {}",
+              numAMs, numTasks, (int)(Math.ceil((numTasks + 0.0) / numAMs)));
       LOG.info("JobId\tQueue\tAMType\tDuration\t#Tasks");
       for (Map.Entry<String, AMSimulator> entry : amMap.entrySet()) {
         AMSimulator am = entry.getValue();
@@ -744,15 +740,13 @@ public class SLSRunner extends Configured implements Tool {
       }
       LOG.info("------------------------------------");
       // queue
-      LOG.info(MessageFormat.format(
-          "number of queues = {0}  average " + "number of apps = {1}",
+      LOG.info("number of queues = {}  average number of apps = {}",
           queueAppNumMap.size(),
-          (int) (Math.ceil((numAMs + 0.0) / queueAppNumMap.size()))));
+          (int)(Math.ceil((numAMs + 0.0) / queueAppNumMap.size())));
       LOG.info("------------------------------------");
       // runtime
-      LOG.info(
-          MessageFormat.format("estimated simulation time is {0}" + " seconds",
-              (long) (Math.ceil(maxRuntime / 1000.0))));
+      LOG.info("estimated simulation time is {} seconds",
+          (long)(Math.ceil(maxRuntime / 1000.0)));
       LOG.info("------------------------------------");
     }
     // package these information in the simulateInfoMap used by other places

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33695406/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
index 70c5579..7ce3ef0 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
@@ -22,7 +22,6 @@ import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.nio.ByteBuffer;
 import java.security.PrivilegedExceptionAction;
-import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -68,13 +67,13 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.sls.scheduler.SchedulerMetrics;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.resource.Resources;
-import org.apache.log4j.Logger;
-
 import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator;
 import org.apache.hadoop.yarn.sls.scheduler.SchedulerWrapper;
 import org.apache.hadoop.yarn.sls.SLSRunner;
 import org.apache.hadoop.yarn.sls.scheduler.TaskRunner;
 import org.apache.hadoop.yarn.sls.utils.SLSUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @Private
 @Unstable
@@ -115,7 +114,7 @@ public abstract class AMSimulator extends TaskRunner.Task {
   volatile boolean isAMContainerRunning = false;
   volatile Container amContainer;
   
-  protected final Logger LOG = Logger.getLogger(AMSimulator.class);
+  private static final Logger LOG = LoggerFactory.getLogger(AMSimulator.class);
 
   // resource for AM container
   private final static int MR_AM_CONTAINER_RESOURCE_MEMORY_MB = 1024;
@@ -216,7 +215,7 @@ public abstract class AMSimulator extends TaskRunner.Task {
 
   @Override
   public void lastStep() throws Exception {
-    LOG.info(MessageFormat.format("Application {0} is shutting down.", appId));
+    LOG.info("Application {} is shutting down.", appId);
     // unregister tracking
     if (isTracked) {
       untrackApp();
@@ -224,7 +223,7 @@ public abstract class AMSimulator extends TaskRunner.Task {
 
     // Finish AM container
     if (amContainer != null) {
-      LOG.info("AM container = " + amContainer.getId() + " reported to finish");
+      LOG.info("AM container = {} reported to finish", amContainer.getId());
       se.getNmMap().get(amContainer.getNodeId()).cleanupContainer(
           amContainer.getId());
     } else {
@@ -343,7 +342,7 @@ public abstract class AMSimulator extends TaskRunner.Task {
         return null;
       }
     });
-    LOG.info(MessageFormat.format("Submit a new application {0}", appId));
+    LOG.info("Submit a new application {}", appId);
   }
 
   private void registerAM()
@@ -370,8 +369,7 @@ public abstract class AMSimulator extends TaskRunner.Task {
       }
     });
 
-    LOG.info(MessageFormat.format(
-            "Register the application master for application {0}", appId));
+    LOG.info("Register the application master for application {}", appId);
   }
 
   private void trackApp() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33695406/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
index de6d19d..7ac30ab 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.yarn.sls.appmaster;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
-import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.LinkedList;
@@ -42,10 +41,10 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-
 import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator;
 import org.apache.hadoop.yarn.sls.SLSRunner;
-import org.apache.log4j.Logger;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @Private
 @Unstable
@@ -111,7 +110,8 @@ public class MRAMSimulator extends AMSimulator {
   // finished
   private boolean isFinished = false;
 
-  public final Logger LOG = Logger.getLogger(MRAMSimulator.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(MRAMSimulator.class);
 
   @SuppressWarnings("checkstyle:parameternumber")
   public void init(int id, int heartbeatInterval,
@@ -135,9 +135,8 @@ public class MRAMSimulator extends AMSimulator {
       }
     }
 
-    LOG.info(MessageFormat
-        .format("Added new job with {0} mapper and {1} reducers",
-            allMaps.size(), allReduces.size()));
+    LOG.info("Added new job with {} mapper and {} reducers",
+        allMaps.size(), allReduces.size());
 
     mapTotal = allMaps.size();
     reduceTotal = allReduces.size();
@@ -165,22 +164,21 @@ public class MRAMSimulator extends AMSimulator {
           ContainerId containerId = cs.getContainerId();
           if (cs.getExitStatus() == ContainerExitStatus.SUCCESS) {
             if (assignedMaps.containsKey(containerId)) {
-              LOG.debug(MessageFormat.format("Application {0} has one" +
-                      "mapper finished ({1}).", appId, containerId));
+              LOG.debug("Application {} has one mapper finished ({}).",
+                  appId, containerId);
               assignedMaps.remove(containerId);
               mapFinished ++;
               finishedContainers ++;
             } else if (assignedReduces.containsKey(containerId)) {
-              LOG.debug(MessageFormat.format("Application {0} has one" +
-                      "reducer finished ({1}).", appId, containerId));
+              LOG.debug("Application {} has one reducer finished ({}).",
+                  appId, containerId);
               assignedReduces.remove(containerId);
               reduceFinished ++;
               finishedContainers ++;
             } else if (amContainer.getId().equals(containerId)){
               // am container released event
               isFinished = true;
-              LOG.info(MessageFormat.format("Application {0} goes to " +
-                      "finish.", appId));
+              LOG.info("Application {} goes to finish.", appId);
             }
 
             if (mapFinished >= mapTotal && reduceFinished >= reduceTotal) {
@@ -189,16 +187,16 @@ public class MRAMSimulator extends AMSimulator {
           } else {
             // container to be killed
             if (assignedMaps.containsKey(containerId)) {
-              LOG.debug(MessageFormat.format("Application {0} has one " +
-                      "mapper killed ({1}).", appId, containerId));
+              LOG.debug("Application {} has one mapper killed ({}).",
+                  appId, containerId);
               pendingFailedMaps.add(assignedMaps.remove(containerId));
             } else if (assignedReduces.containsKey(containerId)) {
-              LOG.debug(MessageFormat.format("Application {0} has one " +
-                      "reducer killed ({1}).", appId, containerId));
+              LOG.debug("Application {} has one reducer killed ({}).",
+                  appId, containerId);
               pendingFailedReduces.add(assignedReduces.remove(containerId));
             } else if (amContainer.getId().equals(containerId)){
-              LOG.info(MessageFormat.format("Application {0}'s AM is " +
-                      "going to be killed. Waiting for rescheduling...", appId));
+              LOG.info("Application {}'s AM is " +
+                  "going to be killed. Waiting for rescheduling...", appId);
             }
           }
         }
@@ -209,8 +207,8 @@ public class MRAMSimulator extends AMSimulator {
               (mapFinished >= mapTotal) &&
               (reduceFinished >= reduceTotal)) {
         isAMContainerRunning = false;
-        LOG.debug(MessageFormat.format("Application {0} sends out event " +
-                "to clean up its AM container.", appId));
+        LOG.debug("Application {} sends out event to clean up"
+            + " its AM container.", appId);
         isFinished = true;
         break;
       }
@@ -219,15 +217,15 @@ public class MRAMSimulator extends AMSimulator {
       for (Container container : response.getAllocatedContainers()) {
         if (! scheduledMaps.isEmpty()) {
           ContainerSimulator cs = scheduledMaps.remove();
-          LOG.debug(MessageFormat.format("Application {0} starts a " +
-                  "launch a mapper ({1}).", appId, container.getId()));
+          LOG.debug("Application {} starts to launch a mapper ({}).",
+              appId, container.getId());
           assignedMaps.put(container.getId(), cs);
           se.getNmMap().get(container.getNodeId())
                   .addNewContainer(container, cs.getLifeTime());
         } else if (! this.scheduledReduces.isEmpty()) {
           ContainerSimulator cs = scheduledReduces.remove();
-          LOG.debug(MessageFormat.format("Application {0} starts a " +
-                  "launch a reducer ({1}).", appId, container.getId()));
+          LOG.debug("Application {} starts to launch a reducer ({}).",
+              appId, container.getId());
           assignedReduces.put(container.getId(), cs);
           se.getNmMap().get(container.getNodeId())
                   .addNewContainer(container, cs.getLifeTime());
@@ -289,17 +287,15 @@ public class MRAMSimulator extends AMSimulator {
       if (!pendingMaps.isEmpty()) {
         ask = packageRequests(mergeLists(pendingMaps, scheduledMaps),
             PRIORITY_MAP);
-        LOG.debug(MessageFormat
-            .format("Application {0} sends out " + "request for {1} mappers.",
-                appId, pendingMaps.size()));
+        LOG.debug("Application {} sends out request for {} mappers.",
+            appId, pendingMaps.size());
         scheduledMaps.addAll(pendingMaps);
         pendingMaps.clear();
       } else if (!pendingFailedMaps.isEmpty()) {
         ask = packageRequests(mergeLists(pendingFailedMaps, scheduledMaps),
             PRIORITY_MAP);
-        LOG.debug(MessageFormat.format(
-            "Application {0} sends out " + "requests for {1} failed mappers.",
-            appId, pendingFailedMaps.size()));
+        LOG.debug("Application {} sends out requests for {} failed mappers.",
+            appId, pendingFailedMaps.size());
         scheduledMaps.addAll(pendingFailedMaps);
         pendingFailedMaps.clear();
       }
@@ -308,17 +304,15 @@ public class MRAMSimulator extends AMSimulator {
       if (!pendingReduces.isEmpty()) {
         ask = packageRequests(mergeLists(pendingReduces, scheduledReduces),
             PRIORITY_REDUCE);
-        LOG.debug(MessageFormat
-            .format("Application {0} sends out " + "requests for {1} reducers.",
-                appId, pendingReduces.size()));
+        LOG.debug("Application {} sends out requests for {} reducers.",
+                appId, pendingReduces.size());
         scheduledReduces.addAll(pendingReduces);
         pendingReduces.clear();
       } else if (!pendingFailedReduces.isEmpty()) {
         ask = packageRequests(mergeLists(pendingFailedReduces, scheduledReduces),
             PRIORITY_REDUCE);
-        LOG.debug(MessageFormat.format(
-            "Application {0} sends out " + "request for {1} failed reducers.",
-            appId, pendingFailedReduces.size()));
+        LOG.debug("Application {} sends out request for {} failed reducers.",
+            appId, pendingFailedReduces.size());
         scheduledReduces.addAll(pendingFailedReduces);
         pendingFailedReduces.clear();
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33695406/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java
index 0947ba8..9197b1e 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.sls.nodemanager;
 
 import java.io.IOException;
-import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
@@ -51,11 +50,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.Records;
-import org.apache.log4j.Logger;
-
 import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator;
 import org.apache.hadoop.yarn.sls.scheduler.TaskRunner;
 import org.apache.hadoop.yarn.sls.utils.SLSUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @Private
 @Unstable
@@ -74,7 +73,7 @@ public class NMSimulator extends TaskRunner.Task {
   private ResourceManager rm;
   // heart beat response id
   private int RESPONSE_ID = 1;
-  private final static Logger LOG = Logger.getLogger(NMSimulator.class);
+  private final static Logger LOG = LoggerFactory.getLogger(NMSimulator.class);
   
   public void init(String nodeIdStr, int memory, int cores,
           int dispatchTime, int heartBeatInterval, ResourceManager rm)
@@ -120,8 +119,7 @@ public class NMSimulator extends TaskRunner.Task {
       while ((cs = containerQueue.poll()) != null) {
         runningContainers.remove(cs.getId());
         completedContainerList.add(cs.getId());
-        LOG.debug(MessageFormat.format("Container {0} has completed",
-                cs.getId()));
+        LOG.debug("Container {} has completed", cs.getId());
       }
     }
     
@@ -148,14 +146,14 @@ public class NMSimulator extends TaskRunner.Task {
             synchronized(amContainerList) {
               amContainerList.remove(containerId);
             }
-            LOG.debug(MessageFormat.format("NodeManager {0} releases " +
-                "an AM ({1}).", node.getNodeID(), containerId));
+            LOG.debug("NodeManager {} releases an AM ({}).",
+                node.getNodeID(), containerId);
           } else {
             cs = runningContainers.remove(containerId);
             containerQueue.remove(cs);
             releasedContainerList.add(containerId);
-            LOG.debug(MessageFormat.format("NodeManager {0} releases a " +
-                "container ({1}).", node.getNodeID(), containerId));
+            LOG.debug("NodeManager {} releases a container ({}).",
+                node.getNodeID(), containerId);
           }
         }
       }
@@ -189,8 +187,8 @@ public class NMSimulator extends TaskRunner.Task {
     // add complete containers
     synchronized(completedContainerList) {
       for (ContainerId cId : completedContainerList) {
-        LOG.debug(MessageFormat.format("NodeManager {0} completed" +
-                " container ({1}).", node.getNodeID(), cId));
+        LOG.debug("NodeManager {} completed container ({}).",
+            node.getNodeID(), cId);
         csList.add(newContainerStatus(
                 cId, ContainerState.COMPLETE, ContainerExitStatus.SUCCESS));
       }
@@ -199,8 +197,8 @@ public class NMSimulator extends TaskRunner.Task {
     // released containers
     synchronized(releasedContainerList) {
       for (ContainerId cId : releasedContainerList) {
-        LOG.debug(MessageFormat.format("NodeManager {0} released container" +
-                " ({1}).", node.getNodeID(), cId));
+        LOG.debug("NodeManager {} released container ({}).",
+            node.getNodeID(), cId);
         csList.add(newContainerStatus(
                 cId, ContainerState.COMPLETE, ContainerExitStatus.ABORTED));
       }
@@ -227,8 +225,8 @@ public class NMSimulator extends TaskRunner.Task {
    * launch a new container with the given life time
    */
   public void addNewContainer(Container container, long lifeTimeMS) {
-    LOG.debug(MessageFormat.format("NodeManager {0} launches a new " +
-            "container ({1}).", node.getNodeID(), container.getId()));
+    LOG.debug("NodeManager {} launches a new container ({}).",
+        node.getNodeID(), container.getId());
     if (lifeTimeMS != -1) {
       // normal container
       ContainerSimulator cs = new ContainerSimulator(container.getId(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33695406/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SchedulerMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SchedulerMetrics.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SchedulerMetrics.java
index a8792e8..cabc94a 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SchedulerMetrics.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SchedulerMetrics.java
@@ -60,14 +60,16 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedule
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.sls.conf.SLSConfiguration;
 import org.apache.hadoop.yarn.sls.web.SLSWebApp;
-import org.apache.log4j.Logger;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @Private
 @Unstable
 public abstract class SchedulerMetrics {
   private static final String EOL = System.getProperty("line.separator");
   private static final int SAMPLING_SIZE = 60;
-  private static final Logger LOG = Logger.getLogger(SchedulerMetrics.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SchedulerMetrics.class);
 
   protected ResourceScheduler scheduler;
   protected Set<String> trackedQueues;
@@ -420,7 +422,7 @@ public abstract class SchedulerMetrics {
         SLSConfiguration.METRICS_RECORD_INTERVAL_MS_DEFAULT);
     File dir = new File(metricsOutputDir + "/metrics");
     if(!dir.exists() && !dir.mkdirs()) {
-      LOG.error("Cannot create directory " + dir.getAbsoluteFile());
+      LOG.error("Cannot create directory {}", dir.getAbsoluteFile());
     }
     final CsvReporter reporter = CsvReporter.forRegistry(metrics)
         .formatFor(Locale.US)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] hadoop git commit: HADOOP-14540. Replace MRv1 specific terms in HostsFileReader. Contributed by hu xiaodong.

Posted by st...@apache.org.
HADOOP-14540. Replace MRv1 specific terms in HostsFileReader. Contributed by hu xiaodong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30080459
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30080459
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30080459

Branch: refs/heads/HADOOP-13345
Commit: 300804595e5b8fd447ecb530fcd0ae5b0e59bb63
Parents: 7ade512
Author: Akira Ajisaka <aa...@apache.org>
Authored: Mon Jun 19 19:39:36 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon Jun 19 19:39:36 2017 +0900

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/util/HostsFileReader.java    | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30080459/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
index 2913b87..340f792 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
@@ -42,8 +42,8 @@ import org.w3c.dom.Node;
 import org.w3c.dom.NodeList;
 import org.xml.sax.SAXException;
 
-// Keeps track of which datanodes/tasktrackers are allowed to connect to the 
-// namenode/jobtracker.
+// Keeps track of which datanodes/nodemanagers are allowed to connect to the
+// namenode/resourcemanager.
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Unstable
 public class HostsFileReader {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] hadoop git commit: YARN-6517. Fix warnings from Spotbugs in hadoop-yarn-common(addendum). Contributed by Weiwei Yang.

Posted by st...@apache.org.
YARN-6517. Fix warnings from Spotbugs in hadoop-yarn-common(addendum). Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7582deda
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7582deda
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7582deda

Branch: refs/heads/HADOOP-13345
Commit: 7582dedad1c73eabdc3eeece0a3a860e7bb33c1a
Parents: 6460df2
Author: Naganarasimha <na...@apache.org>
Authored: Sun Jun 18 19:53:42 2017 +0530
Committer: Naganarasimha <na...@apache.org>
Committed: Sun Jun 18 19:53:42 2017 +0530

----------------------------------------------------------------------
 .../hadoop/yarn/logaggregation/AggregatedLogFormat.java  | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7582deda/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
index e5cb255..d806b12 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
@@ -310,12 +310,17 @@ public class AggregatedLogFormat {
     }
 
     private Set<File> getPendingLogFilesToUpload(File containerLogDir) {
-      if(containerLogDir == null ||
-          containerLogDir.listFiles() == null) {
+      if(containerLogDir == null) {
         return new HashSet<>(0);
       }
+
+      File[] filesList = containerLogDir.listFiles();
+      if (filesList == null) {
+        return new HashSet<>(0);
+      }
+
       Set<File> candidates =
-          new HashSet<File>(Arrays.asList(containerLogDir.listFiles()));
+          new HashSet<File>(Arrays.asList(filesList));
       for (File logFile : candidates) {
         this.allExistingFileMeta.add(getLogFileMetaData(logFile));
       }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] hadoop git commit: MAPREDUCE-6898. TestKill.testKillTask is flaky. Contributed by Peter Bacsko

Posted by st...@apache.org.
MAPREDUCE-6898. TestKill.testKillTask is flaky. Contributed by Peter Bacsko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e78542f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e78542f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e78542f4

Branch: refs/heads/HADOOP-13345
Commit: e78542f4bea90e8545e9e06af9c07e89d2be43d9
Parents: 4ebc23b
Author: Jason Lowe <jl...@yahoo-inc.com>
Authored: Fri Jun 16 14:24:54 2017 -0500
Committer: Jason Lowe <jl...@yahoo-inc.com>
Committed: Fri Jun 16 14:24:54 2017 -0500

----------------------------------------------------------------------
 .../hadoop/mapreduce/v2/app/TestKill.java       | 36 +++++++++++++++++---
 1 file changed, 31 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e78542f4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
index f681cf8..176ac01 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
@@ -21,11 +21,15 @@ package org.apache.hadoop.mapreduce.v2.app;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.service.Service;
 import org.junit.Assert;
-
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.jobhistory.EventType;
+import org.apache.hadoop.mapreduce.jobhistory.TaskStartedEvent;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
@@ -100,12 +104,34 @@ public class TestKill {
   @Test
   public void testKillTask() throws Exception {
     final CountDownLatch latch = new CountDownLatch(1);
-    MRApp app = new BlockingMRApp(2, 0, latch);
+    final CountDownLatch jobRunning = new CountDownLatch(1);
+
+    final Dispatcher dispatcher = new AsyncDispatcher() {
+      protected void dispatch(Event event) {
+        // We have to wait until the internal state is RUNNING and not SETUP
+        // because it can cause a transition failure. If the dispatchable
+        // event is TASK_STARTED, we can continue because the job must be in
+        // RUNNING at that point.
+        if (event.getType() == EventType.TASK_STARTED) {
+          jobRunning.countDown();
+        }
+        super.dispatch(event);
+      }
+    };
+
+    MRApp app = new BlockingMRApp(2, 0, latch) {
+      @Override
+      public Dispatcher createDispatcher() {
+        return dispatcher;
+      }
+    };
+
     //this will start the job but job won't complete as Task is blocked
     Job job = app.submit(new Configuration());
-    
-    //wait and vailidate for Job to become RUNNING
-    app.waitForInternalState((JobImpl) job, JobStateInternal.RUNNING);
+
+    // wait until the job transitions to JobInternalState.RUNNING
+    jobRunning.await(10, TimeUnit.SECONDS);
+
     Map<TaskId,Task> tasks = job.getTasks();
     Assert.assertEquals("No of tasks is not correct", 2, 
         tasks.size());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] hadoop git commit: HDFS-12024. Fix typo's in FsDatasetImpl.java. Contributed by Yasen liu.

Posted by st...@apache.org.
HDFS-12024. Fix typo's in FsDatasetImpl.java. Contributed by Yasen liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/abdea262
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/abdea262
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/abdea262

Branch: refs/heads/HADOOP-13345
Commit: abdea26280136587a47aea075ada6122d40d706e
Parents: e5db9af
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Fri Jun 23 22:56:28 2017 +0800
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Fri Jun 23 22:56:28 2017 +0800

----------------------------------------------------------------------
 .../hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java       | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/abdea262/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 1e159cd..208d554 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -498,7 +498,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
   /**
    * Removes a set of volumes from FsDataset.
-   * @param storageLocationsToRemove a set of
+   * @param storageLocsToRemove a set of
    * {@link StorageLocation}s for each volume.
    * @param clearFailure set true to clear failure information.
    */
@@ -2392,7 +2392,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
         datanode.reportBadBlocks(new ExtendedBlock(bpid, corruptBlock),
             memBlockInfo.getVolume());
       } catch (IOException e) {
-        LOG.warn("Failed to repot bad block " + corruptBlock, e);
+        LOG.warn("Failed to report bad block " + corruptBlock, e);
       }
     }
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] hadoop git commit: HADOOP-14486 TestSFTPFileSystem#testGetAccessTime test failure using openJDK 1.8.0. Contributed by Hongyuan Li.

Posted by st...@apache.org.
HADOOP-14486 TestSFTPFileSystem#testGetAccessTime test failure using openJDK 1.8.0.
Contributed by Hongyuan Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/942ecbbc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/942ecbbc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/942ecbbc

Branch: refs/heads/HADOOP-13345
Commit: 942ecbbc9876427051965ddd167df5cc1cc253e3
Parents: e855cc4
Author: Steve Loughran <st...@apache.org>
Authored: Fri Jun 16 09:45:22 2017 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Fri Jun 16 09:45:22 2017 +0100

----------------------------------------------------------------------
 .../src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/942ecbbc/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
index 3d57dab..e8eedd9 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
@@ -315,6 +315,7 @@ public class TestSFTPFileSystem {
     java.nio.file.Path path = (local).pathToFile(file).toPath();
     long accessTime1 = Files.readAttributes(path, BasicFileAttributes.class)
         .lastAccessTime().toMillis();
+    accessTime1 = (accessTime1 / 1000) * 1000;
     long accessTime2 = sftpFs.getFileStatus(file).getAccessTime();
     assertEquals(accessTime1, accessTime2);
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] hadoop git commit: HDFS-11606. Add CLI cmd to remove an erasure code policy. Contributed by Tim Yao

Posted by st...@apache.org.
HDFS-11606. Add CLI cmd to remove an erasure code policy. Contributed by Tim Yao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5db3f984
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5db3f984
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5db3f984

Branch: refs/heads/HADOOP-13345
Commit: 5db3f9846882c51991d16853a5c431664f3f801f
Parents: a010b33
Author: Kai Zheng <ka...@intel.com>
Authored: Wed Jun 21 14:01:55 2017 +0800
Committer: Kai Zheng <ka...@intel.com>
Committed: Wed Jun 21 14:01:55 2017 +0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  6 +++
 .../hadoop/hdfs/DistributedFileSystem.java      | 11 ++++
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    | 29 ++++++++++-
 .../hadoop/hdfs/protocol/ClientProtocol.java    |  8 +++
 .../ClientNamenodeProtocolTranslatorPB.java     | 15 ++++++
 .../src/main/proto/ClientNamenodeProtocol.proto |  2 +
 .../src/main/proto/erasurecoding.proto          |  7 +++
 ...tNamenodeProtocolServerSideTranslatorPB.java | 14 +++++
 .../namenode/ErasureCodingPolicyManager.java    | 25 +++++++++
 .../server/namenode/FSDirErasureCodingOp.java   | 13 +++++
 .../hdfs/server/namenode/FSNamesystem.java      | 22 ++++++++
 .../hdfs/server/namenode/NameNodeRpcServer.java |  7 +++
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   | 54 ++++++++++++++++++--
 .../src/site/markdown/HDFSErasureCoding.md      |  4 ++
 .../hadoop/hdfs/TestDistributedFileSystem.java  | 29 +++++++++++
 15 files changed, 242 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5db3f984/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 51f04e0..d114f0f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2777,6 +2777,12 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     return namenode.addErasureCodingPolicies(policies);
   }
 
+  public void removeErasureCodingPolicy(String ecPolicyName)
+      throws IOException {
+    checkOpen();
+    namenode.removeErasureCodingPolicy(ecPolicyName);
+  }
+
   public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
     checkOpen();
     return new DFSInotifyEventInputStream(namenode, tracer);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5db3f984/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 1a9ae48..3e09804 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -2608,6 +2608,17 @@ public class DistributedFileSystem extends FileSystem {
   }
 
   /**
+   * Remove erasure coding policy.
+   *
+   * @param ecPolicyName The name of the policy to be removed.
+   * @throws IOException
+   */
+  public void removeErasureCodingPolicy(String ecPolicyName)
+      throws IOException {
+    dfs.removeErasureCodingPolicy(ecPolicyName);
+  }
+
+  /**
    * Unset the erasure coding policy from the source path.
    *
    * @param path     The directory to unset the policy

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5db3f984/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
index 21de0ab..701bf0f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
@@ -41,15 +41,16 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSInotifyEventInputStream;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 
 /**
  * The public API for performing administrative functions on HDFS. Those writing
@@ -521,6 +522,32 @@ public class HdfsAdmin {
     dfs.unsetErasureCodingPolicy(path);
   }
 
+  /**
+   * Add Erasure coding policies to HDFS. For each policy input, schema and
+   * cellSize are musts, name and id are ignored. They will be automatically
+   * created and assigned by Namenode once the policy is successfully added, and
+   * will be returned in the response.
+   *
+   * @param policies The user defined ec policy list to add.
+   * @return Return the response list of adding operations.
+   * @throws IOException
+   */
+  public AddECPolicyResponse[] addErasureCodingPolicies(
+      ErasureCodingPolicy[] policies)  throws IOException {
+    return dfs.addErasureCodingPolicies(policies);
+  }
+
+  /**
+   * Remove erasure coding policy.
+   *
+   * @param ecPolicyName The name of the policy to be removed.
+   * @throws IOException
+   */
+  public void removeErasureCodingPolicy(String ecPolicyName)
+      throws IOException {
+    dfs.removeErasureCodingPolicy(ecPolicyName);
+  }
+
   private void provisionEZTrash(Path path) throws IOException {
     // make sure the path is an EZ
     EncryptionZone ez = dfs.getEZForPath(path);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5db3f984/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index e132e04..313f322 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1563,6 +1563,14 @@ public interface ClientProtocol {
       ErasureCodingPolicy[] policies) throws IOException;
 
   /**
+   * Remove erasure coding policy.
+   * @param ecPolicyName The name of the policy to be removed.
+   * @throws IOException
+   */
+  @AtMostOnce
+  void removeErasureCodingPolicy(String ecPolicyName) throws IOException;
+
+  /**
    * Get the erasure coding policies loaded in Namenode.
    *
    * @throws IOException

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5db3f984/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index 0d517f80..d913f81 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -185,6 +185,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodin
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto;
@@ -1694,6 +1695,20 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
 
   @Override
+  public void removeErasureCodingPolicy(String ecPolicyName)
+      throws IOException {
+    RemoveErasureCodingPolicyRequestProto.Builder builder =
+        RemoveErasureCodingPolicyRequestProto.newBuilder();
+    builder.setEcPolicyName(ecPolicyName);
+    RemoveErasureCodingPolicyRequestProto req = builder.build();
+    try {
+      rpcProxy.removeErasureCodingPolicy(null, req);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
   public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
     try {
       GetErasureCodingPoliciesResponseProto response = rpcProxy

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5db3f984/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index c56c0b1..d3e15d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -955,6 +955,8 @@ service ClientNamenodeProtocol {
       returns(GetErasureCodingPoliciesResponseProto);
   rpc addErasureCodingPolicies(AddErasureCodingPoliciesRequestProto)
       returns(AddErasureCodingPoliciesResponseProto);
+  rpc removeErasureCodingPolicy(RemoveErasureCodingPolicyRequestProto)
+      returns(RemoveErasureCodingPolicyResponseProto);
   rpc getErasureCodingPolicy(GetErasureCodingPolicyRequestProto)
       returns(GetErasureCodingPolicyResponseProto);
   rpc getErasureCodingCodecs(GetErasureCodingCodecsRequestProto)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5db3f984/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
index ea878ec..ed0fd81 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
@@ -61,6 +61,13 @@ message AddErasureCodingPoliciesResponseProto {
   repeated AddECPolicyResponseProto responses = 1;
 }
 
+message RemoveErasureCodingPolicyRequestProto {
+  required string ecPolicyName = 1;
+}
+
+message RemoveErasureCodingPolicyResponseProto {
+}
+
 message UnsetErasureCodingPolicyRequestProto {
   required string src = 1;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5db3f984/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 7135ff1..16adc70 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -228,6 +228,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodin
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto;
@@ -1694,6 +1696,18 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   }
 
   @Override
+  public RemoveErasureCodingPolicyResponseProto removeErasureCodingPolicy(
+      RpcController controller, RemoveErasureCodingPolicyRequestProto request)
+      throws ServiceException {
+    try {
+      server.removeErasureCodingPolicy(request.getEcPolicyName());
+      return RemoveErasureCodingPolicyResponseProto.newBuilder().build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
   public GetErasureCodingPolicyResponseProto getErasureCodingPolicy(RpcController controller,
       GetErasureCodingPolicyRequestProto request) throws ServiceException {
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5db3f984/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 0feb79c..d22e47e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.stream.Collectors;
@@ -68,6 +69,11 @@ public final class ErasureCodingPolicyManager {
   private Map<Byte, ErasureCodingPolicy> userPoliciesByID;
 
   /**
+   * All removed policies sorted by name.
+   */
+  private Map<String, ErasureCodingPolicy> removedPoliciesByName;
+
+  /**
    * All enabled policies maintained in NN memory for fast querying,
    * identified and sorted by its name.
    */
@@ -91,6 +97,7 @@ public final class ErasureCodingPolicyManager {
         DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT);
     this.userPoliciesByID = new TreeMap<>();
     this.userPoliciesByName = new TreeMap<>();
+    this.removedPoliciesByName = new TreeMap<>();
     this.enabledPoliciesByName = new TreeMap<>();
     for (String policyName : policyNames) {
       if (policyName.trim().isEmpty()) {
@@ -243,4 +250,22 @@ public final class ErasureCodingPolicyManager {
         .max(Byte::compareTo).orElse(USER_DEFINED_POLICY_START_ID);
     return (byte) (currentId + 1);
   }
+
+  public synchronized void removePolicy(String name) {
+    if (SystemErasureCodingPolicies.getByName(name) != null) {
+      throw new IllegalArgumentException("System erasure coding policy " +
+          name + " cannot be removed");
+    }
+    ErasureCodingPolicy policy = userPoliciesByName.get(name);
+    if (policy == null) {
+      throw new IllegalArgumentException("The policy name " +
+          name + " does not exists");
+    }
+    enabledPoliciesByName.remove(name);
+    removedPoliciesByName.put(name, policy);
+  }
+
+  public List<ErasureCodingPolicy> getRemovedPolicies() {
+    return removedPoliciesByName.values().stream().collect(Collectors.toList());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5db3f984/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index bedbe7d..a040262 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -218,6 +218,19 @@ final class FSDirErasureCodingOp {
     return fsn.getErasureCodingPolicyManager().addPolicy(policy);
   }
 
+  /**
+   * Remove an erasure coding policy.
+   *
+   * @param fsn namespace
+   * @param ecPolicyName the name of the policy to be removed
+   * @throws IOException
+   */
+  static void removeErasureCodePolicy(final FSNamesystem fsn,
+      String ecPolicyName) throws IOException {
+    Preconditions.checkNotNull(ecPolicyName);
+    fsn.getErasureCodingPolicyManager().removePolicy(ecPolicyName);
+  }
+
   private static List<XAttr> removeErasureCodingPolicyXAttr(
       final FSNamesystem fsn, final INodesInPath srcIIP) throws IOException {
     FSDirectory fsd = fsn.getFSDirectory();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5db3f984/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 3f7f1ca..f7acb55 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7087,6 +7087,28 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   }
 
   /**
+   * Remove an erasure coding policy.
+   * @param ecPolicyName the name of the policy to be removed
+   * @throws IOException
+   */
+  void removeErasureCodingPolicy(String ecPolicyName) throws IOException {
+    final String operationName = "removeErasureCodingPolicy";
+    checkOperation(OperationCategory.WRITE);
+    boolean success = false;
+    writeLock();
+    try {
+      FSDirErasureCodingOp.removeErasureCodePolicy(this, ecPolicyName);
+      success = true;
+    } finally {
+      writeUnlock(operationName);
+      if (success) {
+        getEditLog().logSync();
+      }
+      logAuditEvent(success, operationName, null, null, null);
+    }
+  }
+
+  /**
    * Unset an erasure coding policy from the given path.
    * @param srcArg  The path of the target directory.
    * @throws AccessControlException  if the caller is not the superuser.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5db3f984/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index e11a546..599178a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -2301,6 +2301,13 @@ public class NameNodeRpcServer implements NamenodeProtocols {
     return namesystem.addECPolicies(policies);
   }
 
+  @Override
+  public void removeErasureCodingPolicy(String ecPolicyName)
+      throws IOException {
+    checkNNStartup();
+    namesystem.removeErasureCodingPolicy(ecPolicyName);
+  }
+
   @Override // ReconfigurationProtocol
   public void startReconfiguration() throws IOException {
     checkNNStartup();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5db3f984/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index 2041a05..a983056 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -199,7 +199,7 @@ public class ECAdmin extends Configured implements Tool {
     }
   }
 
-  /** Command to get the erasure coding policy for a file or directory */
+  /** Command to get the erasure coding policy for a file or directory. */
   private static class GetECPolicyCommand implements AdminHelper.Command {
     @Override
     public String getName() {
@@ -254,7 +254,54 @@ public class ECAdmin extends Configured implements Tool {
     }
   }
 
-  /** Command to set the erasure coding policy to a file/directory */
+  /** Command to remove an erasure coding policy. */
+  private static class RemoveECPolicyCommand implements AdminHelper.Command {
+    @Override
+    public String getName() {
+      return "-removePolicy";
+    }
+
+    @Override
+    public String getShortUsage() {
+      return "[" + getName() + " -policy <policy>]\n";
+    }
+
+    @Override
+    public String getLongUsage() {
+      TableListing listing = AdminHelper.getOptionDescriptionListing();
+      listing.addRow("<policy>", "The name of the erasure coding policy");
+      return getShortUsage() + "\n" +
+          "Remove an erasure coding policy.\n" +
+          listing.toString();
+    }
+
+    @Override
+    public int run(Configuration conf, List<String> args) throws IOException {
+      final String ecPolicyName = StringUtils.popOptionWithArgument(
+          "-policy", args);
+      if (ecPolicyName == null) {
+        System.err.println("Please specify the policy name.\nUsage: " +
+            getLongUsage());
+        return 1;
+      }
+      if (args.size() > 0) {
+        System.err.println(getName() + ": Too many arguments");
+        return 1;
+      }
+      final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+      try {
+        dfs.removeErasureCodingPolicy(ecPolicyName);
+        System.out.println("Erasure coding policy " + ecPolicyName +
+            "is removed");
+      } catch (IOException e) {
+        System.err.println(AdminHelper.prettifyException(e));
+        return 2;
+      }
+      return 0;
+    }
+  }
+
+  /** Command to set the erasure coding policy to a file/directory. */
   private static class SetECPolicyCommand implements AdminHelper.Command {
     @Override
     public String getName() {
@@ -313,7 +360,7 @@ public class ECAdmin extends Configured implements Tool {
     }
   }
 
-  /** Command to unset the erasure coding policy set for a file/directory */
+  /** Command to unset the erasure coding policy set for a file/directory. */
   private static class UnsetECPolicyCommand
       implements AdminHelper.Command {
 
@@ -421,6 +468,7 @@ public class ECAdmin extends Configured implements Tool {
       new ListECPoliciesCommand(),
       new AddECPoliciesCommand(),
       new GetECPolicyCommand(),
+      new RemoveECPolicyCommand(),
       new SetECPolicyCommand(),
       new UnsetECPolicyCommand(),
       new ListECCodecsCommand()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5db3f984/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 51f54a2..4d0b2e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -187,3 +187,7 @@ Below are the details about each command.
  *  `[-listCodecs]`
 
      Get the list of supported erasure coding codecs and coders in system. A coder is an implementation of a codec. A codec can have different implementations, thus different coders. The coders for a codec are listed in a fall back order.
+
+*  `[-removePolicy -policy <policyName>]`
+
+     Remove an erasure coding policy.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5db3f984/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 4479419..1d24f52 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -78,6 +78,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
 import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
 import org.apache.hadoop.hdfs.net.Peer;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
@@ -85,7 +86,9 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.web.WebHdfsConstants;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.ScriptBasedMapping;
@@ -1538,4 +1541,30 @@ public class TestDistributedFileSystem {
       assertEquals(16 * 2, status.getLen());
     }
   }
+
+  @Test
+  public void testRemoveErasureCodingPolicy() throws Exception {
+    Configuration conf = getTestConfiguration();
+    MiniDFSCluster cluster = null;
+
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      DistributedFileSystem fs = cluster.getFileSystem();
+      ECSchema toAddSchema = new ECSchema("rs", 3, 2);
+      ErasureCodingPolicy toAddPolicy =
+          new ErasureCodingPolicy(toAddSchema, 128 * 1024, (byte) 254);
+      String policyName = toAddPolicy.getName();
+      ErasureCodingPolicy[] policies = new ErasureCodingPolicy[]{toAddPolicy};
+      fs.addErasureCodingPolicies(policies);
+      assertEquals(policyName, ErasureCodingPolicyManager.getInstance().
+          getByName(policyName).getName());
+      fs.removeErasureCodingPolicy(policyName);
+      assertEquals(policyName, ErasureCodingPolicyManager.getInstance().
+          getRemovedPolicies().get(0).getName());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] hadoop git commit: HADOOP-14424. Add CRC32C performance test. Contributed by LiXin Ge.

Posted by st...@apache.org.
HADOOP-14424. Add CRC32C performance test. Contributed by LiXin Ge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75043d34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75043d34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75043d34

Branch: refs/heads/HADOOP-13345
Commit: 75043d342935828cfe8bdabfab8784c83f797887
Parents: 7582ded
Author: Masatake Iwasaki <iw...@apache.org>
Authored: Mon Jun 19 09:16:45 2017 +0900
Committer: Masatake Iwasaki <iw...@apache.org>
Committed: Mon Jun 19 09:16:45 2017 +0900

----------------------------------------------------------------------
 .../hadoop/util/Crc32PerformanceTest.java       | 253 +++++++++++++------
 1 file changed, 170 insertions(+), 83 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75043d34/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java
index d8963df..34dfc3a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/Crc32PerformanceTest.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.fs.ChecksumException;
 import org.apache.log4j.Level;
 
 /**
- * Performance tests to compare performance of Crc32 implementations
+ * Performance tests to compare performance of Crc32|Crc32C implementations
  * This can be run from the command line with:
  *
  *   java -cp path/to/test/classes:path/to/common/classes \
@@ -43,17 +43,21 @@ import org.apache.log4j.Level;
  *
  *  hadoop org.apache.hadoop.util.Crc32PerformanceTest
  *
+ * If any argument is provided, this test will run with non-directly buffer.
+ *
  * The output is in JIRA table format.
  */
 public class Crc32PerformanceTest {
   static final int MB = 1024 * 1024;
 
-  static interface Crc32 {
+  interface Crc32 {
 
-    public void verifyChunked(ByteBuffer data, int bytesPerCrc, ByteBuffer crcs,
+    void verifyChunked(ByteBuffer data, int bytesPerCrc, ByteBuffer crcs,
         String filename, long basePos) throws ChecksumException;
 
-    static final class Native implements Crc32 {
+    DataChecksum.Type crcType();
+
+    final class Native implements Crc32 {
       @Override
       public void verifyChunked(ByteBuffer data, int bytesPerSum,
           ByteBuffer sums, String fileName, long basePos)
@@ -61,40 +65,91 @@ public class Crc32PerformanceTest {
         NativeCrc32.verifyChunkedSums(bytesPerSum, DataChecksum.Type.CRC32.id,
             sums, data, fileName, basePos);
       }
+
+      @Override
+      public DataChecksum.Type crcType() {
+        return DataChecksum.Type.CRC32;
+      }
     }
 
+    final class NativeC implements Crc32 {
+      @Override
+      public void verifyChunked(ByteBuffer data, int bytesPerSum,
+          ByteBuffer sums, String fileName, long basePos)
+              throws ChecksumException {
+
+        if (data.isDirect()) {
+          NativeCrc32.verifyChunkedSums(bytesPerSum,
+                  DataChecksum.Type.CRC32C.id, sums, data, fileName, basePos);
+        } else {
+          final int dataOffset = data.arrayOffset() + data.position();
+          final int crcsOffset = sums.arrayOffset() + sums.position();
+          NativeCrc32.verifyChunkedSumsByteArray(bytesPerSum,
+              DataChecksum.Type.CRC32C.id, sums.array(), crcsOffset,
+              data.array(), dataOffset, data.remaining(), fileName, basePos);
+        }
+      }
 
-    static abstract class AbstractCrc32<T extends Checksum> implements Crc32 {
+      @Override
+      public DataChecksum.Type crcType() {
+        return DataChecksum.Type.CRC32C;
+      }
+    }
+
+    abstract class AbstractCrc32<T extends Checksum> implements Crc32 {
       abstract T newAlgorithm();
 
       @Override
       public void verifyChunked(ByteBuffer data, int bytesPerCrc,
-          ByteBuffer crcs, String filename, long basePos)
+          ByteBuffer sums, String filename, long basePos)
               throws ChecksumException {
         final Checksum algorithm = newAlgorithm();
-        if (data.hasArray() && crcs.hasArray()) {
-          DataChecksum.verifyChunked(DataChecksum.Type.CRC32, algorithm,
-              data.array(), data.position(), data.remaining(), bytesPerCrc,
-              crcs.array(), crcs.position(), filename, basePos);
+        final DataChecksum.Type type = crcType();
+        if (data.hasArray() && sums.hasArray()) {
+          DataChecksum.verifyChunked(type, algorithm, data.array(),
+              data.position(), data.remaining(), bytesPerCrc, sums.array(),
+              sums.position(), filename, basePos);
         } else {
-          DataChecksum.verifyChunked(DataChecksum.Type.CRC32, algorithm,
-              data, bytesPerCrc, crcs, filename, basePos);
+          DataChecksum.verifyChunked(type, algorithm, data, bytesPerCrc,
+              sums, filename, basePos);
         }
       }
     }
 
-    static final class Zip extends AbstractCrc32<CRC32> {
+    final class Zip extends AbstractCrc32<CRC32> {
       @Override
       public CRC32 newAlgorithm() {
         return new CRC32();
       }
+
+      @Override
+      public DataChecksum.Type crcType() {
+        return DataChecksum.Type.CRC32;
+      }
     }
 
-    static final class PureJava extends AbstractCrc32<PureJavaCrc32> {
+    final class PureJava extends AbstractCrc32<PureJavaCrc32> {
       @Override
       public PureJavaCrc32 newAlgorithm() {
         return new PureJavaCrc32();
       }
+
+      @Override
+      public DataChecksum.Type crcType() {
+        return DataChecksum.Type.CRC32;
+      }
+    }
+
+    final class PureJavaC extends AbstractCrc32<PureJavaCrc32C> {
+      @Override
+      public PureJavaCrc32C newAlgorithm() {
+        return new PureJavaCrc32C();
+      }
+
+      @Override
+      public DataChecksum.Type crcType() {
+        return DataChecksum.Type.CRC32C;
+      }
     }
   }
 
@@ -114,9 +169,13 @@ public class Crc32PerformanceTest {
 
     crcs.add(Crc32.Zip.class);
     crcs.add(Crc32.PureJava.class);
+    crcs.add(Crc32.PureJavaC.class);
 
-    if (direct && NativeCrc32.isAvailable()) {
-      crcs.add(Crc32.Native.class);
+    if (NativeCrc32.isAvailable()) {
+      if (direct) {
+        crcs.add(Crc32.Native.class);
+      }
+      crcs.add(Crc32.NativeC.class);
       ((Log4JLogger)LogFactory.getLog(NativeCodeLoader.class))
           .getLogger().setLevel(Level.ALL);
     }
@@ -131,13 +190,18 @@ public class Crc32PerformanceTest {
     out.printf("Elapsed %.1fs\n", secondsElapsed(startTime));
   }
 
-  public static void main(String args[]) throws Exception {
-    new Crc32PerformanceTest(64, 5, true).run();
+  public static void main(String[] args) throws Exception {
+    boolean isdirect = true;
+
+    if (args.length > 0) {
+      isdirect = false;
+    }
+    new Crc32PerformanceTest(64, 5, isdirect).run();
   }
 
-  private static void printCell(String s, int width, PrintStream out) {
+  private static void printCell(String s, int width, PrintStream outCrc) {
     final int w = s.length() > width? s.length(): width;
-    out.printf(" %" + w + "s |", s);
+    outCrc.printf(" %" + w + "s |", s);
   }
 
   private ByteBuffer allocateByteBuffer(int length) {
@@ -155,16 +219,29 @@ public class Crc32PerformanceTest {
     return dataBufs;
   }
 
-  private ByteBuffer computeCrc(ByteBuffer dataBufs, int bytePerCrc) {
+  private ByteBuffer computeCrc(ByteBuffer dataBufs, int bytePerCrc,
+      DataChecksum.Type type) {
+    final int size = 4 * (dataBufs.remaining() - 1) / bytePerCrc + 1;
+    final ByteBuffer crcBufs = allocateByteBuffer(size);
+    final DataChecksum checksum = DataChecksum.newDataChecksum(
+        type, bytePerCrc);
+    checksum.calculateChunkedSums(dataBufs, crcBufs);
+    return crcBufs;
+  }
+
+  private ByteBuffer computeCrc(Class<? extends Crc32> clazz,
+      ByteBuffer dataBufs, int bytePerCrc) throws Exception {
+    final Constructor<? extends Crc32> ctor = clazz.getConstructor();
+    final Crc32 crc = ctor.newInstance();
     final int size = 4 * (dataBufs.remaining() - 1) / bytePerCrc + 1;
     final ByteBuffer crcBufs = allocateByteBuffer(size);
     final DataChecksum checksum = DataChecksum.newDataChecksum(
-        DataChecksum.Type.CRC32, bytePerCrc);
+        crc.crcType(), bytePerCrc);
     checksum.calculateChunkedSums(dataBufs, crcBufs);
     return crcBufs;
   }
 
-  private void doBench(final List<Class<? extends Crc32>> crcs)
+  private void doBench(final List<Class<? extends Crc32>> crcTargets)
       throws Exception {
     final ByteBuffer[] dataBufs = new ByteBuffer[16];
     for(int i = 0; i < dataBufs.length; i++) {
@@ -176,9 +253,9 @@ public class Crc32PerformanceTest {
     out.printf(" (bpc: byte-per-crc in MB/sec; #T: #Theads)\n");
 
     // Warm up implementations to get jit going.
-    final ByteBuffer[] crc32 = {computeCrc(dataBufs[0], 32)};
-    final ByteBuffer[] crc512 = {computeCrc(dataBufs[0], 512)};
-    for (Class<? extends Crc32> c : crcs) {
+    for (Class<? extends Crc32> c : crcTargets) {
+      final ByteBuffer[] crc32 = {computeCrc(c, dataBufs[0], 32)};
+      final ByteBuffer[] crc512 = {computeCrc(c, dataBufs[0], 512)};
       doBench(c, 1, dataBufs, crc32, 32);
       doBench(c, 1, dataBufs, crc512, 512);
     }
@@ -189,57 +266,69 @@ public class Crc32PerformanceTest {
     }
   }
 
-  private void doBench(final List<Class<? extends Crc32>> crcs,
-      final ByteBuffer[] dataBufs, final int bytePerCrc, final PrintStream out)
+  private void doBench(final List<Class<? extends Crc32>> crcTargets,
+      final ByteBuffer[] dataBufs, final int bytePerCrc,
+      final PrintStream outCrc)
           throws Exception {
     final ByteBuffer[] crcBufs = new ByteBuffer[dataBufs.length];
-    for(int i = 0; i < crcBufs.length; i++) {
-      crcBufs[i] = computeCrc(dataBufs[i], bytePerCrc);
+    final ByteBuffer[] crcBufsC = new ByteBuffer[dataBufs.length];
+    for(int i = 0; i < dataBufs.length; i++) {
+      crcBufs[i] = computeCrc(dataBufs[i], bytePerCrc,
+          DataChecksum.Type.CRC32);
+      crcBufsC[i] = computeCrc(dataBufs[i], bytePerCrc,
+          DataChecksum.Type.CRC32C);
     }
 
     final String numBytesStr = " bpc ";
     final String numThreadsStr = "#T";
     final String diffStr = "% diff";
 
-    out.print('|');
-    printCell(numBytesStr, 0, out);
-    printCell(numThreadsStr, 0, out);
-    for (int i = 0; i < crcs.size(); i++) {
-      final Class<? extends Crc32> c = crcs.get(i);
-      out.print('|');
-      printCell(c.getSimpleName(), 8, out);
-      for(int j = 0; j < i; j++) {
-        printCell(diffStr, diffStr.length(), out);
+    outCrc.print('|');
+    printCell(numBytesStr, 0, outCrc);
+    printCell(numThreadsStr, 0, outCrc);
+    for (int i = 0; i < crcTargets.size(); i++) {
+      final Class<? extends Crc32> c = crcTargets.get(i);
+      outCrc.print('|');
+      printCell(c.getSimpleName(), 8, outCrc);
+      if (i > 0) {
+        printCell(diffStr, diffStr.length(), outCrc);
       }
     }
-    out.printf("\n");
+    outCrc.printf("\n");
 
     for(int numThreads = 1; numThreads <= dataBufs.length; numThreads <<= 1) {
-      out.printf("|");
-      printCell(String.valueOf(bytePerCrc), numBytesStr.length(), out);
-      printCell(String.valueOf(numThreads), numThreadsStr.length(), out);
+      outCrc.printf("|");
+      printCell(String.valueOf(bytePerCrc), numBytesStr.length(), outCrc);
+      printCell(String.valueOf(numThreads), numThreadsStr.length(), outCrc);
 
       final List<BenchResult> previous = new ArrayList<BenchResult>();
-      for(Class<? extends Crc32> c : crcs) {
+      for(Class<? extends Crc32> c : crcTargets) {
         System.gc();
 
-        final BenchResult result = doBench(c, numThreads, dataBufs, crcBufs,
-            bytePerCrc);
+        final BenchResult result;
+        final Constructor<? extends Crc32> ctor = c.getConstructor();
+        final Crc32 crc = ctor.newInstance();
+        if (crc.crcType() == DataChecksum.Type.CRC32) {
+          result = doBench(c, numThreads, dataBufs, crcBufs, bytePerCrc);
+        } else {
+          result = doBench(c, numThreads, dataBufs, crcBufsC, bytePerCrc);
+        }
         printCell(String.format("%9.1f", result.mbps),
-            c.getSimpleName().length() + 1, out);
+                c.getSimpleName().length() + 1, outCrc);
 
-        //compare result with previous
-        for(BenchResult p : previous) {
+        //compare result with the last previous.
+        final int size = previous.size();
+        if (size > 0) {
+          BenchResult p = previous.get(size - 1);
           final double diff = (result.mbps - p.mbps) / p.mbps * 100;
-          printCell(String.format("%5.1f%%", diff), diffStr.length(), out);
+          printCell(String.format("%5.1f%%", diff), diffStr.length(), outCrc);
         }
         previous.add(result);
       }
-      out.printf("\n");
+      outCrc.printf("\n");
     }
   }
 
-
   private BenchResult doBench(Class<? extends Crc32> clazz,
       final int numThreads, final ByteBuffer[] dataBufs,
       final ByteBuffer[] crcBufs, final int bytePerCrc)
@@ -248,36 +337,34 @@ public class Crc32PerformanceTest {
     final Thread[] threads = new Thread[numThreads];
     final BenchResult[] results = new BenchResult[threads.length];
 
-    {
-      final Constructor<? extends Crc32> ctor = clazz.getConstructor();
-
-      for(int i = 0; i < threads.length; i++) {
-        final Crc32 crc = ctor.newInstance();
-        final long byteProcessed = dataBufs[i].remaining() * trials;
-        final int index = i;
-        threads[i] = new Thread() {
-          @Override
-          public void run() {
-            final long startTime = System.nanoTime();
-            for (int i = 0; i < trials; i++) {
-              dataBufs[index].mark();
-              crcBufs[index].mark();
-              try {
-                crc.verifyChunked(dataBufs[index], bytePerCrc, crcBufs[index],
-                    crc.getClass().getSimpleName(), dataBufs[index].position());
-              } catch (Throwable t) {
-                results[index] = new BenchResult(t);
-                return;
-              } finally {
-                dataBufs[index].reset();
-                crcBufs[index].reset();
-              }
+    final Constructor<? extends Crc32> ctor = clazz.getConstructor();
+
+    for(int i = 0; i < threads.length; i++) {
+      final Crc32 crc = ctor.newInstance();
+      final long byteProcessed = dataBufs[i].remaining() * trials;
+      final int index = i;
+      threads[i] = new Thread() {
+        @Override
+        public void run() {
+          final long startTime = System.nanoTime();
+          for (int i = 0; i < trials; i++) {
+            dataBufs[index].mark();
+            crcBufs[index].mark();
+            try {
+              crc.verifyChunked(dataBufs[index], bytePerCrc, crcBufs[index],
+                  crc.getClass().getSimpleName(), dataBufs[index].position());
+            } catch (Throwable t) {
+              results[index] = new BenchResult(t);
+              return;
+            } finally {
+              dataBufs[index].reset();
+              crcBufs[index].reset();
             }
-            final double secsElapsed = secondsElapsed(startTime);
-            results[index] = new BenchResult(byteProcessed/secsElapsed/MB);
           }
-        };
-      }
+          final double secsElapsed = secondsElapsed(startTime);
+          results[index] = new BenchResult(byteProcessed/secsElapsed/MB);
+        }
+      };
     }
 
     for(Thread t : threads) {
@@ -295,7 +382,7 @@ public class Crc32PerformanceTest {
   }
 
   private static class BenchResult {
-    /** Speed (MB per second) */
+    /** Speed (MB per second). */
     final double mbps;
     final Throwable thrown;
 
@@ -321,7 +408,7 @@ public class Crc32PerformanceTest {
     return (System.nanoTime() - startTime) / 1000000000.0d;
   }
 
-  static void printSystemProperties(PrintStream out) {
+  static void printSystemProperties(PrintStream outCrc) {
     final String[] names = {
         "java.version",
         "java.runtime.name",
@@ -344,7 +431,7 @@ public class Crc32PerformanceTest {
 
     final Properties p = System.getProperties();
     for(String n : names) {
-      out.printf("%" + max + "s = %s\n", n, p.getProperty(n));
+      outCrc.printf("%" + max + "s = %s\n", n, p.getProperty(n));
     }
   }
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] hadoop git commit: HDFS-11916. Extend TestErasureCodingPolicies/TestErasureCodingPolicyWithSnapshot with a random EC policy. Contributed by Takanobu Asanuma.

Posted by st...@apache.org.
HDFS-11916. Extend TestErasureCodingPolicies/TestErasureCodingPolicyWithSnapshot with a random EC policy. Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73fb7501
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73fb7501
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73fb7501

Branch: refs/heads/HADOOP-13345
Commit: 73fb75017e238e72c3162914f0db66e50139e199
Parents: ee89ac8
Author: Lei Xu <le...@apache.org>
Authored: Mon Jun 19 10:25:20 2017 -0700
Committer: Lei Xu <le...@apache.org>
Committed: Mon Jun 19 10:25:20 2017 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/TestErasureCodingPolicies.java  | 60 ++++++++++----------
 ...ErasureCodingPoliciesWithRandomECPolicy.java | 48 ++++++++++++++++
 .../TestErasureCodingPolicyWithSnapshot.java    | 50 ++++++++--------
 ...ingPolicyWithSnapshotWithRandomECPolicy.java | 49 ++++++++++++++++
 4 files changed, 155 insertions(+), 52 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73fb7501/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index 4a4bed5..f90a2f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -61,15 +61,19 @@ public class TestErasureCodingPolicies {
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
   private static final int BLOCK_SIZE = 1024;
-  private static final ErasureCodingPolicy EC_POLICY =
-      StripedFileTestUtil.getDefaultECPolicy();
+  private ErasureCodingPolicy ecPolicy;
   private FSNamesystem namesystem;
 
+  public ErasureCodingPolicy getEcPolicy() {
+    return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
   @Rule
   public Timeout timeout = new Timeout(60 * 1000);
 
   @Before
   public void setupCluster() throws IOException {
+    ecPolicy = getEcPolicy();
     conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     DFSTestUtil.enableAllECPolicies(conf);
@@ -100,8 +104,7 @@ public class TestErasureCodingPolicies {
     DFSTestUtil.createFile(fs, replicatedFile, 0, (short) 3, 0L);
 
     // set ec policy on dir
-    fs.setErasureCodingPolicy(dir,
-        StripedFileTestUtil.getDefaultECPolicy().getName());
+    fs.setErasureCodingPolicy(dir, ecPolicy.getName());
     // create a file which should be using ec
     final Path ecSubDir = new Path(dir, "ecSubDir");
     final Path ecFile = new Path(ecSubDir, "ecFile");
@@ -153,7 +156,7 @@ public class TestErasureCodingPolicies {
     fs.mkdir(testDir, FsPermission.getDirDefault());
 
     /* Normal creation of an erasure coding directory */
-    fs.setErasureCodingPolicy(testDir, EC_POLICY.getName());
+    fs.setErasureCodingPolicy(testDir, ecPolicy.getName());
 
     /* Verify files under the directory are striped */
     final Path ECFilePath = new Path(testDir, "foo");
@@ -169,7 +172,7 @@ public class TestErasureCodingPolicies {
     fs.mkdir(notEmpty, FsPermission.getDirDefault());
     final Path oldFile = new Path(notEmpty, "old");
     fs.create(oldFile);
-    fs.setErasureCodingPolicy(notEmpty, EC_POLICY.getName());
+    fs.setErasureCodingPolicy(notEmpty, ecPolicy.getName());
     final Path newFile = new Path(notEmpty, "new");
     fs.create(newFile);
     INode oldInode = namesystem.getFSDirectory().getINode(oldFile.toString());
@@ -181,10 +184,10 @@ public class TestErasureCodingPolicies {
     final Path dir1 = new Path("/dir1");
     final Path dir2 = new Path(dir1, "dir2");
     fs.mkdir(dir1, FsPermission.getDirDefault());
-    fs.setErasureCodingPolicy(dir1, EC_POLICY.getName());
+    fs.setErasureCodingPolicy(dir1, ecPolicy.getName());
     fs.mkdir(dir2, FsPermission.getDirDefault());
     try {
-      fs.setErasureCodingPolicy(dir2, EC_POLICY.getName());
+      fs.setErasureCodingPolicy(dir2, ecPolicy.getName());
     } catch (IOException e) {
       fail("Nested erasure coding policies are supported");
     }
@@ -193,7 +196,7 @@ public class TestErasureCodingPolicies {
     final Path fPath = new Path("/file");
     fs.create(fPath);
     try {
-      fs.setErasureCodingPolicy(fPath, EC_POLICY.getName());
+      fs.setErasureCodingPolicy(fPath, ecPolicy.getName());
       fail("Erasure coding policy on file");
     } catch (IOException e) {
       assertExceptionContains("erasure coding policy for a file", e);
@@ -213,11 +216,11 @@ public class TestErasureCodingPolicies {
     // Already set directory-level policies should still be in effect
     Path disabledPolicy = new Path(dir1, "afterDisabled");
     Assert.assertEquals("Dir does not have policy set",
-        EC_POLICY,
+        ecPolicy,
         fs.getErasureCodingPolicy(dir1));
     fs.create(disabledPolicy).close();
     Assert.assertEquals("File did not inherit dir's policy",
-        EC_POLICY,
+        ecPolicy,
         fs.getErasureCodingPolicy(disabledPolicy));
 
     // Also check loading disabled EC policies from fsimage
@@ -227,10 +230,10 @@ public class TestErasureCodingPolicies {
     cluster.restartNameNodes();
 
     Assert.assertEquals("Dir does not have policy set",
-        EC_POLICY,
+        ecPolicy,
         fs.getErasureCodingPolicy(dir1));
     Assert.assertEquals("File does not have policy set",
-        EC_POLICY,
+        ecPolicy,
         fs.getErasureCodingPolicy(disabledPolicy));
   }
 
@@ -240,8 +243,8 @@ public class TestErasureCodingPolicies {
     final Path dstECDir = new Path("/dstEC");
     fs.mkdir(srcECDir, FsPermission.getDirDefault());
     fs.mkdir(dstECDir, FsPermission.getDirDefault());
-    fs.setErasureCodingPolicy(srcECDir, EC_POLICY.getName());
-    fs.setErasureCodingPolicy(dstECDir, EC_POLICY.getName());
+    fs.setErasureCodingPolicy(srcECDir, ecPolicy.getName());
+    fs.setErasureCodingPolicy(dstECDir, ecPolicy.getName());
     final Path srcFile = new Path(srcECDir, "foo");
     fs.create(srcFile);
 
@@ -275,8 +278,7 @@ public class TestErasureCodingPolicies {
   public void testReplication() throws IOException {
     final Path testDir = new Path("/ec");
     fs.mkdir(testDir, FsPermission.getDirDefault());
-    fs.setErasureCodingPolicy(testDir,
-        StripedFileTestUtil.getDefaultECPolicy().getName());
+    fs.setErasureCodingPolicy(testDir, ecPolicy.getName());
     final Path fooFile = new Path(testDir, "foo");
     // create ec file with replication=0
     fs.create(fooFile, FsPermission.getFileDefault(), true,
@@ -330,10 +332,10 @@ public class TestErasureCodingPolicies {
   private void verifyErasureCodingInfo(
       String src, ErasureCodingPolicy usingECPolicy) throws IOException {
     HdfsFileStatus hdfsFileStatus = fs.getClient().getFileInfo(src);
-    ErasureCodingPolicy ecPolicy = hdfsFileStatus.getErasureCodingPolicy();
-    assertNotNull(ecPolicy);
+    ErasureCodingPolicy actualPolicy = hdfsFileStatus.getErasureCodingPolicy();
+    assertNotNull(actualPolicy);
     assertEquals("Actually used ecPolicy should be equal with target ecPolicy",
-        usingECPolicy, ecPolicy);
+        usingECPolicy, actualPolicy);
   }
 
   @Test
@@ -342,13 +344,13 @@ public class TestErasureCodingPolicies {
     ECSchema rsSchema = new ECSchema("rs", 4, 2);
     String policyName = "RS-4-2-128k";
     int cellSize = 128 * 1024;
-    ErasureCodingPolicy ecPolicy =
+    ErasureCodingPolicy invalidPolicy =
         new ErasureCodingPolicy(policyName, rsSchema, cellSize, (byte) -1);
     String src = "/ecDir4-2";
     final Path ecDir = new Path(src);
     try {
       fs.mkdir(ecDir, FsPermission.getDirDefault());
-      fs.getClient().setErasureCodingPolicy(src, ecPolicy.getName());
+      fs.getClient().setErasureCodingPolicy(src, invalidPolicy.getName());
       fail("HadoopIllegalArgumentException should be thrown for"
           + "setting an invalid erasure coding policy");
     } catch (Exception e) {
@@ -429,8 +431,7 @@ public class TestErasureCodingPolicies {
     Path ecfile = new Path(ecdir, "ecfile");
     fs.setPermission(new Path("/"), new FsPermission((short)0777));
     userfs.mkdirs(ecdir);
-    final String ecPolicyName = StripedFileTestUtil.getDefaultECPolicy()
-        .getName();
+    final String ecPolicyName = ecPolicy.getName();
     useradmin.setErasureCodingPolicy(ecdir, ecPolicyName);
     assertEquals("Policy not present on dir",
         ecPolicyName,
@@ -537,12 +538,12 @@ public class TestErasureCodingPolicies {
     final Path filePath1 = new Path(dirPath, "file1");
 
     fs.mkdirs(dirPath);
-    fs.setErasureCodingPolicy(dirPath, EC_POLICY.getName());
+    fs.setErasureCodingPolicy(dirPath, ecPolicy.getName());
 
     // null EC policy name value means inheriting parent directory's policy
     fs.createFile(filePath0).build().close();
     ErasureCodingPolicy ecPolicyOnFile = fs.getErasureCodingPolicy(filePath0);
-    assertEquals(EC_POLICY, ecPolicyOnFile);
+    assertEquals(ecPolicy, ecPolicyOnFile);
 
     // Test illegal EC policy name
     final String illegalPolicyName = "RS-DEFAULT-1-2-64k";
@@ -560,7 +561,8 @@ public class TestErasureCodingPolicies {
     final ErasureCodingPolicy ecPolicyOnDir =
         SystemErasureCodingPolicies.getByID(
             SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
-    ecPolicyOnFile = EC_POLICY;
+    ecPolicyOnFile = SystemErasureCodingPolicies.getByID(
+        SystemErasureCodingPolicies.RS_6_3_POLICY_ID);
     fs.setErasureCodingPolicy(dirPath, ecPolicyOnDir.getName());
     fs.createFile(filePath0).ecPolicyName(ecPolicyOnFile.getName())
         .build().close();
@@ -578,11 +580,11 @@ public class TestErasureCodingPolicies {
     final Path filePath = new Path(dirPath, "file");
 
     fs.mkdirs(dirPath);
-    fs.setErasureCodingPolicy(dirPath, EC_POLICY.getName());
+    fs.setErasureCodingPolicy(dirPath, ecPolicy.getName());
 
     final String ecPolicyName = "RS-10-4-64k";
     fs.createFile(filePath).build().close();
-    assertEquals(EC_POLICY, fs.getErasureCodingPolicy(filePath));
+    assertEquals(ecPolicy, fs.getErasureCodingPolicy(filePath));
     fs.delete(filePath, true);
 
     fs.createFile(filePath)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73fb7501/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPoliciesWithRandomECPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPoliciesWithRandomECPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPoliciesWithRandomECPolicy.java
new file mode 100644
index 0000000..9072b86
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPoliciesWithRandomECPolicy.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This test extends TestErasureCodingPolicies to use a random (non-default) EC
+ * policy.
+ */
+public class TestErasureCodingPoliciesWithRandomECPolicy extends
+    TestErasureCodingPolicies {
+  private static final Logger LOG = LoggerFactory.getLogger(
+      TestErasureCodingPoliciesWithRandomECPolicy.class);
+
+  private ErasureCodingPolicy ecPolicy;
+
+  public TestErasureCodingPoliciesWithRandomECPolicy() {
+    // If you want to debug this test with a specific ec policy, please use
+    // SystemErasureCodingPolicies class.
+    // e.g. ecPolicy = SystemErasureCodingPolicies.getByID(RS_3_2_POLICY_ID);
+    ecPolicy = StripedFileTestUtil.getRandomNonDefaultECPolicy();
+    LOG.info("run {} with {}.", TestErasureCodingPoliciesWithRandomECPolicy
+        .class.getSuperclass().getSimpleName(), ecPolicy.getName());
+  }
+
+  @Override
+  public ErasureCodingPolicy getEcPolicy() {
+    return ecPolicy;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73fb7501/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
index 5549bf9..fbeada6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
@@ -40,17 +40,21 @@ public class TestErasureCodingPolicyWithSnapshot {
   private Configuration conf;
 
   private final static int SUCCESS = 0;
-  private final ErasureCodingPolicy sysDefaultPolicy =
-      StripedFileTestUtil.getDefaultECPolicy();
-  private final short groupSize = (short) (
-      sysDefaultPolicy.getNumDataUnits() +
-          sysDefaultPolicy.getNumParityUnits());
+  private ErasureCodingPolicy ecPolicy;
+  private short groupSize;
+
+  public ErasureCodingPolicy getEcPolicy() {
+    return StripedFileTestUtil.getDefaultECPolicy();
+  }
 
   @Before
   public void setupCluster() throws IOException {
+    ecPolicy = getEcPolicy();
+    groupSize = (short) (ecPolicy.getNumDataUnits()
+        + ecPolicy.getNumParityUnits());
     conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
-        sysDefaultPolicy.getName());
+        ecPolicy.getName());
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
@@ -77,12 +81,12 @@ public class TestErasureCodingPolicyWithSnapshot {
     fs.mkdirs(ecDir);
     fs.allowSnapshot(ecDirParent);
     // set erasure coding policy
-    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy.getName());
+    fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
     DFSTestUtil.createFile(fs, ecFile, len, (short) 1, 0xFEED);
     String contents = DFSTestUtil.readFile(fs, ecFile);
     final Path snap1 = fs.createSnapshot(ecDirParent, "snap1");
     final Path snap1ECDir = new Path(snap1, ecDir.getName());
-    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+    assertEquals("Got unexpected erasure coding policy", ecPolicy,
         fs.getErasureCodingPolicy(snap1ECDir));
 
     // Now delete the dir which has erasure coding policy. Re-create the dir again, and
@@ -95,18 +99,18 @@ public class TestErasureCodingPolicyWithSnapshot {
         fs.getErasureCodingPolicy(snap2ECDir));
 
     // Make dir again with system default ec policy
-    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy.getName());
+    fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
     final Path snap3 = fs.createSnapshot(ecDirParent, "snap3");
     final Path snap3ECDir = new Path(snap3, ecDir.getName());
     // Check that snap3's ECPolicy has the correct settings
     ErasureCodingPolicy ezSnap3 = fs.getErasureCodingPolicy(snap3ECDir);
-    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+    assertEquals("Got unexpected erasure coding policy", ecPolicy,
         ezSnap3);
 
     // Check that older snapshots still have the old ECPolicy settings
-    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+    assertEquals("Got unexpected erasure coding policy", ecPolicy,
         fs.getErasureCodingPolicy(snap1ECDir));
-    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+    assertEquals("Got unexpected erasure coding policy", ecPolicy,
         fs.getErasureCodingPolicy(snap2ECDir));
 
     // Verify contents of the snapshotted file
@@ -118,12 +122,12 @@ public class TestErasureCodingPolicyWithSnapshot {
     // Now delete the snapshots out of order and verify the EC policy
     // correctness
     fs.deleteSnapshot(ecDirParent, snap2.getName());
-    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+    assertEquals("Got unexpected erasure coding policy", ecPolicy,
         fs.getErasureCodingPolicy(snap1ECDir));
-    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+    assertEquals("Got unexpected erasure coding policy", ecPolicy,
         fs.getErasureCodingPolicy(snap3ECDir));
     fs.deleteSnapshot(ecDirParent, snap1.getName());
-    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+    assertEquals("Got unexpected erasure coding policy", ecPolicy,
         fs.getErasureCodingPolicy(snap3ECDir));
   }
 
@@ -136,9 +140,9 @@ public class TestErasureCodingPolicyWithSnapshot {
     fs.mkdirs(ecDir);
     fs.allowSnapshot(ecDir);
 
-    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy.getName());
+    fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
     final Path snap1 = fs.createSnapshot(ecDir, "snap1");
-    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+    assertEquals("Got unexpected erasure coding policy", ecPolicy,
         fs.getErasureCodingPolicy(snap1));
   }
 
@@ -152,10 +156,10 @@ public class TestErasureCodingPolicyWithSnapshot {
     fs.allowSnapshot(ecDir);
 
     // set erasure coding policy
-    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy.getName());
+    fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
     final Path snap1 = fs.createSnapshot(ecDir, "snap1");
     ErasureCodingPolicy ecSnap = fs.getErasureCodingPolicy(snap1);
-    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+    assertEquals("Got unexpected erasure coding policy", ecPolicy,
         ecSnap);
 
     // save namespace, restart namenode, and check ec policy correctness.
@@ -165,7 +169,7 @@ public class TestErasureCodingPolicyWithSnapshot {
     cluster.restartNameNode(true);
 
     ErasureCodingPolicy ecSnap1 = fs.getErasureCodingPolicy(snap1);
-    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+    assertEquals("Got unexpected erasure coding policy", ecPolicy,
         ecSnap1);
     assertEquals("Got unexpected ecSchema", ecSnap.getSchema(),
         ecSnap1.getSchema());
@@ -184,7 +188,7 @@ public class TestErasureCodingPolicyWithSnapshot {
     fs.allowSnapshot(ecDir);
 
     // set erasure coding policy
-    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy.getName());
+    fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
     DFSTestUtil.createFile(fs, ecFile, len, (short) 1, 0xFEED);
     final Path snap1 = fs.createSnapshot(ecDir, "snap1");
 
@@ -197,7 +201,7 @@ public class TestErasureCodingPolicyWithSnapshot {
 
     assertNull("Got unexpected erasure coding policy",
         fs.getErasureCodingPolicy(snap1CopyECDir));
-    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
+    assertEquals("Got unexpected erasure coding policy", ecPolicy,
         fs.getErasureCodingPolicy(snap1));
   }
 
@@ -212,7 +216,7 @@ public class TestErasureCodingPolicyWithSnapshot {
     fs.mkdirs(ecDir);
 
     // Set erasure coding policy
-    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy.getName());
+    fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
     DFSTestUtil.createFile(fs, ecFile, len, (short) 1, 0xFEED);
 
     // Verify FileStatus for normal and EC files

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73fb7501/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshotWithRandomECPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshotWithRandomECPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshotWithRandomECPolicy.java
new file mode 100644
index 0000000..8007ea3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshotWithRandomECPolicy.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This test extends TestErasureCodingPolicyWithSnapshot to use a random
+ * (non-default) EC policy.
+ */
+public class TestErasureCodingPolicyWithSnapshotWithRandomECPolicy extends
+    TestErasureCodingPolicyWithSnapshot {
+  private static final Logger LOG = LoggerFactory.getLogger(
+      TestErasureCodingPolicyWithSnapshotWithRandomECPolicy.class);
+
+  private ErasureCodingPolicy ecPolicy;
+
+  public TestErasureCodingPolicyWithSnapshotWithRandomECPolicy() {
+    // If you want to debug this test with a specific ec policy, please use
+    // SystemErasureCodingPolicies class.
+    // e.g. ecPolicy = SystemErasureCodingPolicies.getByID(RS_3_2_POLICY_ID);
+    ecPolicy = StripedFileTestUtil.getRandomNonDefaultECPolicy();
+    LOG.info("run {} with {}.",
+        TestErasureCodingPolicyWithSnapshotWithRandomECPolicy.class
+            .getSuperclass().getSimpleName(), ecPolicy.getName());
+  }
+
+  @Override
+  public ErasureCodingPolicy getEcPolicy() {
+    return ecPolicy;
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] hadoop git commit: HDFS-11998. Enable DFSNetworkTopology as default. Contributed by Chen Liang.

Posted by st...@apache.org.
HDFS-11998. Enable DFSNetworkTopology as default. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ae9467f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ae9467f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ae9467f

Branch: refs/heads/HADOOP-13345
Commit: 9ae9467f920e95ca989d7d51775b39e1b9fee300
Parents: c22cf00
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed Jun 21 22:01:37 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Wed Jun 21 22:01:41 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +-
 .../AvailableSpaceBlockPlacementPolicy.java     | 21 ++++++++++++++++++++
 .../src/main/resources/hdfs-default.xml         |  2 +-
 .../TestReplicationPolicyWithNodeGroup.java     |  4 ++++
 4 files changed, 27 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ae9467f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index e7aba4e..1f60f32 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1085,7 +1085,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
 
   public static final String DFS_USE_DFS_NETWORK_TOPOLOGY_KEY =
       "dfs.use.dfs.network.topology";
-  public static final boolean DFS_USE_DFS_NETWORK_TOPOLOGY_DEFAULT = false;
+  public static final boolean DFS_USE_DFS_NETWORK_TOPOLOGY_DEFAULT = true;
 
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ae9467f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java
index 706768c..8435b46 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java
@@ -24,10 +24,13 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVAILABLE_SPACE_
 import java.util.Collection;
 import java.util.Random;
 
+import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.net.DFSNetworkTopology;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 
@@ -71,11 +74,29 @@ public class AvailableSpaceBlockPlacementPolicy extends
 
   @Override
   protected DatanodeDescriptor chooseDataNode(final String scope,
+      final Collection<Node> excludedNode, StorageType type) {
+    // only the code that uses DFSNetworkTopology should trigger this code path.
+    Preconditions.checkArgument(clusterMap instanceof DFSNetworkTopology);
+    DFSNetworkTopology dfsClusterMap = (DFSNetworkTopology)clusterMap;
+    DatanodeDescriptor a = (DatanodeDescriptor) dfsClusterMap
+        .chooseRandomWithStorageType(scope, excludedNode, type);
+    DatanodeDescriptor b = (DatanodeDescriptor) dfsClusterMap
+        .chooseRandomWithStorageType(scope, excludedNode, type);
+    return select(a, b);
+  }
+
+  @Override
+  protected DatanodeDescriptor chooseDataNode(final String scope,
       final Collection<Node> excludedNode) {
     DatanodeDescriptor a =
         (DatanodeDescriptor) clusterMap.chooseRandom(scope, excludedNode);
     DatanodeDescriptor b =
         (DatanodeDescriptor) clusterMap.chooseRandom(scope, excludedNode);
+    return select(a, b);
+  }
+
+  private DatanodeDescriptor select(
+      DatanodeDescriptor a, DatanodeDescriptor b) {
     if (a != null && b != null){
       int ret = compareDataNode(a, b);
       if (ret == 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ae9467f/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 0f72041..be345af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4534,7 +4534,7 @@
 
   <property>
     <name>dfs.use.dfs.network.topology</name>
-    <value>false</value>
+    <value>true</value>
     <description>
       Enables DFSNetworkTopology to choose nodes for placing replicas.
     </description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ae9467f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
index ebd4b81..6572f78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -51,6 +52,9 @@ public class TestReplicationPolicyWithNodeGroup extends BaseReplicationPolicyTes
 
   @Override
   DatanodeDescriptor[] getDatanodeDescriptors(Configuration conf) {
+    // default is true, in this case this test will against DFSNetworkTopology
+    // but it run on NetworkTopologyWithNodeGroup, so set to false.
+    conf.setBoolean(DFS_USE_DFS_NETWORK_TOPOLOGY_KEY, false);
     conf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY,
             NetworkTopologyWithNodeGroup.class.getName());
     final String[] racks = {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] hadoop git commit: HDFS-11960. Successfully closed files can stay under-replicated. Contributed by Kihwal Lee.

Posted by st...@apache.org.
HDFS-11960. Successfully closed files can stay under-replicated. Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c0769de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c0769de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c0769de

Branch: refs/heads/HADOOP-13345
Commit: 8c0769dee4b455f4de08ccce36334f0be9e79e2c
Parents: 2e9daa2
Author: Kihwal Lee <ki...@apache.org>
Authored: Tue Jun 20 09:11:23 2017 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Tue Jun 20 09:11:23 2017 -0500

----------------------------------------------------------------------
 .../server/blockmanagement/BlockManager.java    |  3 +-
 .../TestPendingReconstruction.java              | 44 ++++++++++++++++++++
 2 files changed, 46 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c0769de/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 2ef80a9..acd4c47 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3749,7 +3749,8 @@ public class BlockManager implements BlockStatsMXBean {
     // Modify the blocks->datanode map and node's map.
     //
     BlockInfo storedBlock = getStoredBlock(block);
-    if (storedBlock != null) {
+    if (storedBlock != null &&
+        block.getGenerationStamp() == storedBlock.getGenerationStamp()) {
       pendingReconstruction.decrement(storedBlock, node);
     }
     processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c0769de/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
index aeaaf4e..7679f9d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
@@ -209,6 +209,8 @@ public class TestPendingReconstruction {
       // Place into blocksmap with GenerationStamp = 1
       blockInfo.setGenerationStamp(1);
       blocksMap.addBlockCollection(blockInfo, bc);
+      //Save it for later.
+      BlockInfo storedBlock = blockInfo;
 
       assertEquals("Size of pendingReconstructions ", 1,
           pendingReconstruction.size());
@@ -255,6 +257,48 @@ public class TestPendingReconstruction {
       // Verify size of neededReconstruction is exactly 1.
       assertEquals("size of neededReconstruction is 1 ", 1,
           neededReconstruction.size());
+
+      // Verify HDFS-11960
+      // Stop the replication/redundancy monitor
+      BlockManagerTestUtil.stopRedundancyThread(blkManager);
+      pendingReconstruction.clear();
+      // Pick a real node
+      DatanodeDescriptor desc[] = { blkManager.getDatanodeManager().
+          getDatanodes().iterator().next() };
+
+      // Add a stored block to the pendingReconstruction.
+      pendingReconstruction.increment(storedBlock, desc);
+      assertEquals("Size of pendingReconstructions ", 1,
+          pendingReconstruction.size());
+
+      // A received IBR processing calls addBlock(). If the gen stamp in the
+      // report is not the same, it should stay in pending.
+      fsn.writeLock();
+      try {
+        // Use a wrong gen stamp.
+        blkManager.addBlock(desc[0].getStorageInfos()[0],
+            new Block(1, 1, 0), null);
+      } finally {
+        fsn.writeUnlock();
+      }
+
+      // The block should still be pending
+      assertEquals("Size of pendingReconstructions ", 1,
+          pendingReconstruction.size());
+
+      // A block report with the correct gen stamp should remove the record
+      // from the pending queue.
+      fsn.writeLock();
+      try {
+        blkManager.addBlock(desc[0].getStorageInfos()[0],
+            new Block(1, 1, 1), null);
+      } finally {
+        fsn.writeUnlock();
+      }
+
+      // The pending queue should be empty.
+      assertEquals("Size of pendingReconstructions ", 0,
+          pendingReconstruction.size());
     } finally {
       if (cluster != null) {
         cluster.shutdown();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] hadoop git commit: HADOOP-14515. Specifically configure zookeeper-related log levels in KMS log4j.

Posted by st...@apache.org.
HADOOP-14515. Specifically configure zookeeper-related log levels in KMS log4j.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/15964ef1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/15964ef1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/15964ef1

Branch: refs/heads/HADOOP-13345
Commit: 15964ef1473726ef0f29949eda00932d19c64635
Parents: 3369540
Author: Xiao Chen <xi...@apache.org>
Authored: Mon Jun 19 23:10:51 2017 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Mon Jun 19 23:12:02 2017 -0700

----------------------------------------------------------------------
 .../hadoop-kms/src/test/resources/log4j.properties              | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/15964ef1/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties b/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties
index b347d27..e319af6 100644
--- a/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties
+++ b/hadoop-common-project/hadoop-kms/src/test/resources/log4j.properties
@@ -28,4 +28,7 @@ log4j.logger.org.apache.hadoop.crytpo.key.kms.server=ALL
 log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF
 log4j.logger.org.apache.hadoop.security=OFF
 log4j.logger.org.apache.directory.server.core=OFF
-log4j.logger.org.apache.hadoop.util.NativeCodeLoader=OFF
\ No newline at end of file
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=OFF
+# make zookeeper log level an explicit config, and not changing with rootLogger.
+log4j.logger.org.apache.zookeeper=INFO
+log4j.logger.org.apache.curator=INFO
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] hadoop git commit: HADOOP-14543. ZKFC should use getAversion() while setting the zkacl. Contributed by Brahma Reddy Battula.

Posted by st...@apache.org.
HADOOP-14543. ZKFC should use getAversion() while setting the zkacl. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0111711d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0111711d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0111711d

Branch: refs/heads/HADOOP-13345
Commit: 0111711d8b2355a12a3e5f4da7f5a91e89355c1b
Parents: c6bd73c
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Jun 23 16:50:47 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Jun 23 16:50:47 2017 -0700

----------------------------------------------------------------------
 .../apache/hadoop/ha/ActiveStandbyElector.java  |  2 +-
 .../ha/TestActiveStandbyElectorRealZK.java      | 28 ++++++++++++++++++++
 2 files changed, 29 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0111711d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
index dec401e..cf95a49 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
@@ -1080,7 +1080,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
         List<ACL> acl = zkClient.getACL(path, stat);
         if (acl == null || !acl.containsAll(zkAcl) ||
             !zkAcl.containsAll(acl)) {
-          zkClient.setACL(path, zkAcl, stat.getVersion());
+          zkClient.setACL(path, zkAcl, stat.getAversion());
         }
         return null;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0111711d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
index fb8c236..59c0886 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
@@ -30,7 +30,9 @@ import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
 import org.apache.hadoop.ha.ActiveStandbyElector.State;
 import org.apache.hadoop.util.ZKUtil.ZKAuthInfo;
 import org.apache.log4j.Level;
+import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.ZooDefs.Ids;
+import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.server.ZooKeeperServer;
 import org.junit.Test;
 import org.mockito.AdditionalMatchers;
@@ -256,4 +258,30 @@ public class TestActiveStandbyElectorRealZK extends ClientBaseWithFixes {
     Mockito.verify(cbs[1], Mockito.never()).becomeActive();
     checkFatalsAndReset();
   }
+
+  /**
+   * Test to verify that proper ZooKeeper ACLs can be updated on
+   * ActiveStandbyElector's parent znode.
+   */
+  @Test(timeout = 15000)
+  public void testSetZooKeeperACLsOnParentZnodeName()
+      throws Exception {
+    ActiveStandbyElectorCallback cb =
+        Mockito.mock(ActiveStandbyElectorCallback.class);
+    ActiveStandbyElector elector =
+        new ActiveStandbyElector(hostPort, 5000, PARENT_DIR,
+            Ids.READ_ACL_UNSAFE, Collections.<ZKAuthInfo>emptyList(), cb,
+            CommonConfigurationKeys.HA_FC_ELECTOR_ZK_OP_RETRIES_DEFAULT);
+
+    // Simulate the case by pre-creating znode 'parentZnodeName'. Then updates
+    // znode's data so that data version will be increased to 1. Here znode's
+    // aversion is 0.
+    ZooKeeper otherClient = createClient();
+    otherClient.create(PARENT_DIR, "sample1".getBytes(), Ids.OPEN_ACL_UNSAFE,
+        CreateMode.PERSISTENT);
+    otherClient.setData(PARENT_DIR, "sample2".getBytes(), -1);
+    otherClient.close();
+
+    elector.ensureParentZNode();
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] hadoop git commit: HADOOP-14146. KerberosAuthenticationHandler should authenticate with SPN in AP-REQ. Contributed by Daryn Sharp

Posted by st...@apache.org.
HADOOP-14146.  KerberosAuthenticationHandler should authenticate with SPN in AP-REQ.  Contributed by Daryn Sharp


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e806c6e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e806c6e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e806c6e0

Branch: refs/heads/HADOOP-13345
Commit: e806c6e0ce6026d53227b51d58ec6d5458164571
Parents: 5db3f98
Author: Daryn Sharp <da...@yahoo-inc.com>
Authored: Wed Jun 21 11:03:41 2017 -0500
Committer: Daryn Sharp <da...@yahoo-inc.com>
Committed: Wed Jun 21 11:03:41 2017 -0500

----------------------------------------------------------------------
 .../client/KerberosAuthenticator.java           |   4 +-
 .../server/KerberosAuthenticationHandler.java   | 204 +++----------------
 .../authentication/util/KerberosUtil.java       | 198 +++++++++++++++++-
 .../TestMultiSchemeAuthenticationHandler.java   |   2 +-
 .../authentication/util/TestKerberosUtil.java   |  73 +++++++
 5 files changed, 301 insertions(+), 180 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e806c6e0/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
index ceec927..9bcebc3 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
@@ -293,10 +293,10 @@ public class KerberosAuthenticator implements Authenticator {
             GSSManager gssManager = GSSManager.getInstance();
             String servicePrincipal = KerberosUtil.getServicePrincipal("HTTP",
                 KerberosAuthenticator.this.url.getHost());
-            Oid oid = KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL");
+            Oid oid = KerberosUtil.NT_GSS_KRB5_PRINCIPAL_OID;
             GSSName serviceName = gssManager.createName(servicePrincipal,
                                                         oid);
-            oid = KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID");
+            oid = KerberosUtil.GSS_KRB5_MECH_OID;
             gssContext = gssManager.createContext(serviceName, oid, null,
                                                   GSSContext.DEFAULT_LIFETIME);
             gssContext.requestCredDeleg(true);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e806c6e0/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
index e0ee227..887548b 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
@@ -28,31 +28,20 @@ import org.slf4j.LoggerFactory;
 
 import javax.security.auth.Subject;
 import javax.security.auth.kerberos.KerberosPrincipal;
-import javax.security.auth.login.AppConfigurationEntry;
-import javax.security.auth.login.Configuration;
-import javax.security.auth.login.LoginContext;
-import javax.security.auth.login.LoginException;
+import javax.security.auth.kerberos.KeyTab;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
 import java.io.File;
 import java.io.IOException;
-import java.net.InetAddress;
+import java.security.Principal;
 import java.security.PrivilegedActionException;
 import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
 import java.util.Properties;
 import java.util.Set;
 import java.util.regex.Pattern;
 
-import com.google.common.collect.HashMultimap;
-
-import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
-
 /**
  * The {@link KerberosAuthenticationHandler} implements the Kerberos SPNEGO
  * authentication mechanism for HTTP.
@@ -77,60 +66,6 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
       KerberosAuthenticationHandler.class);
 
   /**
-   * Kerberos context configuration for the JDK GSS library.
-   */
-  private static class KerberosConfiguration extends Configuration {
-    private String keytab;
-    private String principal;
-
-    public KerberosConfiguration(String keytab, String principal) {
-      this.keytab = keytab;
-      this.principal = principal;
-    }
-
-    @Override
-    public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
-      Map<String, String> options = new HashMap<String, String>();
-      if (IBM_JAVA) {
-        options.put("useKeytab",
-            keytab.startsWith("file://") ? keytab : "file://" + keytab);
-        options.put("principal", principal);
-        options.put("credsType", "acceptor");
-      } else {
-        options.put("keyTab", keytab);
-        options.put("principal", principal);
-        options.put("useKeyTab", "true");
-        options.put("storeKey", "true");
-        options.put("doNotPrompt", "true");
-        options.put("useTicketCache", "true");
-        options.put("renewTGT", "true");
-        options.put("isInitiator", "false");
-      }
-      options.put("refreshKrb5Config", "true");
-      String ticketCache = System.getenv("KRB5CCNAME");
-      if (ticketCache != null) {
-        if (IBM_JAVA) {
-          options.put("useDefaultCcache", "true");
-          // The first value searched when "useDefaultCcache" is used.
-          System.setProperty("KRB5CCNAME", ticketCache);
-          options.put("renewTGT", "true");
-          options.put("credsType", "both");
-        } else {
-          options.put("ticketCache", ticketCache);
-        }
-      }
-      if (LOG.isDebugEnabled()) {
-        options.put("debug", "true");
-      }
-
-      return new AppConfigurationEntry[]{
-          new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
-              AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
-              options), };
-    }
-  }
-
-  /**
    * Constant that identifies the authentication mechanism.
    */
   public static final String TYPE = "kerberos";
@@ -157,43 +92,6 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
   private String keytab;
   private GSSManager gssManager;
   private Subject serverSubject = new Subject();
-  private List<LoginContext> loginContexts = new ArrayList<LoginContext>();
-  /**
-   * HADOOP-10158 added support of running HTTP with multiple SPNs
-   * but implicit requirements is that they must come from the SAME local realm.
-   *
-   * This is a regression for use cases where HTTP service needs to run with
-   * with SPN from foreign realm, which is not supported after HADOOP-10158.
-   *
-   * HADOOP-13565 brings back support of SPNs from foreign realms
-   * without dependency on specific Kerberos domain_realm mapping mechanism.
-   *
-   * There are several reasons for not using native Kerberos domain_realm
-   * mapping:
-   * 1. As commented in KerberosUtil#getDomainRealm(), JDK's
-   * domain_realm mapping routines are private to the security.krb5
-   * package. As a result, KerberosUtil#getDomainRealm() always return local
-   * realm.
-   *
-   * 2. Server krb5.conf is not the only place that contains the domain_realm
-   * mapping in real deployment. Based on MIT KDC document here:
-   * https://web.mit.edu/kerberos/krb5-1.13/doc/admin/realm_config.html, the
-   * Kerberos domain_realm mapping can be implemented in one of the three
-   * mechanisms:
-   * 1) Server host-based krb5.conf on HTTP server
-   * 2) KDC-based krb5.conf on KDC server
-   * 3) DNS-based with TXT record with _kerberos prefix to the hostname.
-   *
-   * We choose to maintain domain_realm mapping based on HTTP principals
-   * from keytab. The mapping is built at login time with HTTP principals
-   * key-ed by server name and is used later to
-   * looked up SPNs based on server name from request for authentication.
-   * The multi-map implementation allows SPNs of same server from
-   * different realms.
-   *
-   */
-  private HashMultimap<String, String> serverPrincipalMap =
-      HashMultimap.create();
 
   /**
    * Creates a Kerberos SPNEGO authentication handler with the default
@@ -236,7 +134,8 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
       if (keytab == null || keytab.trim().length() == 0) {
         throw new ServletException("Keytab not defined in configuration");
       }
-      if (!new File(keytab).exists()) {
+      File keytabFile = new File(keytab);
+      if (!keytabFile.exists()) {
         throw new ServletException("Keytab does not exist: " + keytab);
       }
       
@@ -252,39 +151,19 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
       } else {
         spnegoPrincipals = new String[]{principal};
       }
-
+      KeyTab keytabInstance = KeyTab.getInstance(keytabFile);
+      serverSubject.getPrivateCredentials().add(keytabInstance);
+      for (String spnegoPrincipal : spnegoPrincipals) {
+        Principal krbPrincipal = new KerberosPrincipal(spnegoPrincipal);
+        LOG.info("Using keytab {}, for principal {}",
+            keytab, krbPrincipal);
+        serverSubject.getPrincipals().add(krbPrincipal);
+      }
       String nameRules = config.getProperty(NAME_RULES, null);
       if (nameRules != null) {
         KerberosName.setRules(nameRules);
       }
-      
-      for (String spnegoPrincipal : spnegoPrincipals) {
-        LOG.info("Login using keytab {}, for principal {}",
-            keytab, spnegoPrincipal);
-        final KerberosConfiguration kerberosConfiguration =
-            new KerberosConfiguration(keytab, spnegoPrincipal);
-        final LoginContext loginContext =
-            new LoginContext("", serverSubject, null, kerberosConfiguration);
-        try {
-          loginContext.login();
-        } catch (LoginException le) {
-          LOG.warn("Failed to login as [{}]", spnegoPrincipal, le);
-          throw new AuthenticationException(le);          
-        }
-        loginContexts.add(loginContext);
-        KerberosName kerbName = new KerberosName(spnegoPrincipal);
-        if (kerbName.getHostName() != null
-            && kerbName.getServiceName() != null
-            && kerbName.getServiceName().equals("HTTP")) {
-          boolean added = serverPrincipalMap.put(kerbName.getHostName(),
-              spnegoPrincipal);
-          LOG.info("Map server: {} to principal: [{}], added = {}",
-              kerbName.getHostName(), spnegoPrincipal, added);
-        } else {
-          LOG.warn("HTTP principal: [{}] is invalid for SPNEGO!",
-              spnegoPrincipal);
-        }
-      }
+
       try {
         gssManager = Subject.doAs(serverSubject,
             new PrivilegedExceptionAction<GSSManager>() {
@@ -310,14 +189,6 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
   public void destroy() {
     keytab = null;
     serverSubject = null;
-    for (LoginContext loginContext : loginContexts) {
-      try {
-        loginContext.logout();
-      } catch (LoginException ex) {
-        LOG.warn(ex.getMessage(), ex);
-      }
-    }
-    loginContexts.clear();
   }
 
   /**
@@ -409,40 +280,20 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
           KerberosAuthenticator.NEGOTIATE.length()).trim();
       final Base64 base64 = new Base64(0);
       final byte[] clientToken = base64.decode(authorization);
-      final String serverName = InetAddress.getByName(request.getServerName())
-                                           .getCanonicalHostName();
       try {
+        final String serverPrincipal =
+            KerberosUtil.getTokenServerName(clientToken);
+        if (!serverPrincipal.startsWith("HTTP/")) {
+          throw new IllegalArgumentException(
+              "Invalid server principal " + serverPrincipal +
+              "decoded from client request");
+        }
         token = Subject.doAs(serverSubject,
             new PrivilegedExceptionAction<AuthenticationToken>() {
-              private Set<String> serverPrincipals =
-                  serverPrincipalMap.get(serverName);
               @Override
               public AuthenticationToken run() throws Exception {
-                if (LOG.isTraceEnabled()) {
-                  LOG.trace("SPNEGO with server principals: {} for {}",
-                      serverPrincipals.toString(), serverName);
-                }
-                AuthenticationToken token = null;
-                Exception lastException = null;
-                for (String serverPrincipal : serverPrincipals) {
-                  try {
-                    token = runWithPrincipal(serverPrincipal, clientToken,
-                        base64, response);
-                  } catch (Exception ex) {
-                    lastException = ex;
-                    LOG.trace("Auth {} failed with {}", serverPrincipal, ex);
-                  } finally {
-                      if (token != null) {
-                        LOG.trace("Auth {} successfully", serverPrincipal);
-                        break;
-                    }
-                  }
-                }
-                if (token != null) {
-                  return token;
-                } else {
-                  throw new AuthenticationException(lastException);
-                }
+                return runWithPrincipal(serverPrincipal, clientToken,
+                      base64, response);
               }
             });
       } catch (PrivilegedActionException ex) {
@@ -451,6 +302,8 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
         } else {
           throw new AuthenticationException(ex.getException());
         }
+      } catch (Exception ex) {
+        throw new AuthenticationException(ex);
       }
     }
     return token;
@@ -458,8 +311,7 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
 
   private AuthenticationToken runWithPrincipal(String serverPrincipal,
       byte[] clientToken, Base64 base64, HttpServletResponse response) throws
-      IOException, AuthenticationException, ClassNotFoundException,
-      GSSException, IllegalAccessException, NoSuchFieldException {
+      IOException, GSSException {
     GSSContext gssContext = null;
     GSSCredential gssCreds = null;
     AuthenticationToken token = null;
@@ -467,11 +319,11 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
       LOG.trace("SPNEGO initiated with server principal [{}]", serverPrincipal);
       gssCreds = this.gssManager.createCredential(
           this.gssManager.createName(serverPrincipal,
-              KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL")),
+              KerberosUtil.NT_GSS_KRB5_PRINCIPAL_OID),
           GSSCredential.INDEFINITE_LIFETIME,
           new Oid[]{
-              KerberosUtil.getOidInstance("GSS_SPNEGO_MECH_OID"),
-              KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID")},
+              KerberosUtil.GSS_SPNEGO_MECH_OID,
+              KerberosUtil.GSS_KRB5_MECH_OID },
           GSSCredential.ACCEPT_ONLY);
       gssContext = this.gssManager.createContext(gssCreds);
       byte[] serverToken = gssContext.acceptSecContext(clientToken, 0,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e806c6e0/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
index 6d33c2d..c011045 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
@@ -21,15 +21,20 @@ import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
 
 import java.io.File;
 import java.io.IOException;
+import java.io.UnsupportedEncodingException;
 import java.lang.reflect.Field;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.net.InetAddress;
 import java.net.UnknownHostException;
+import java.nio.ByteBuffer;
+import java.nio.charset.IllegalCharsetNameException;
 import java.util.ArrayList;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Locale;
+import java.util.NoSuchElementException;
 import java.util.Set;
 import java.util.regex.Pattern;
 
@@ -50,7 +55,24 @@ public class KerberosUtil {
       ? "com.ibm.security.auth.module.Krb5LoginModule"
       : "com.sun.security.auth.module.Krb5LoginModule";
   }
-  
+
+  public static final Oid GSS_SPNEGO_MECH_OID =
+      getNumericOidInstance("1.3.6.1.5.5.2");
+  public static final Oid GSS_KRB5_MECH_OID =
+      getNumericOidInstance("1.2.840.113554.1.2.2");
+  public static final Oid NT_GSS_KRB5_PRINCIPAL_OID =
+      getNumericOidInstance("1.2.840.113554.1.2.2.1");
+
+  // numeric oids will never generate a GSSException for a malformed oid.
+  // use to initialize statics.
+  private static Oid getNumericOidInstance(String oidName) {
+    try {
+      return new Oid(oidName);
+    } catch (GSSException ex) {
+      throw new IllegalArgumentException(ex);
+    }
+  }
+
   public static Oid getOidInstance(String oidName) 
       throws ClassNotFoundException, GSSException, NoSuchFieldException,
       IllegalAccessException {
@@ -255,4 +277,178 @@ public class KerberosUtil {
   public static boolean hasKerberosTicket(Subject subject) {
     return !subject.getPrivateCredentials(KerberosTicket.class).isEmpty();
   }
+
+  /**
+   * Extract the TGS server principal from the given gssapi kerberos or spnego
+   * wrapped token.
+   * @param rawToken bytes of the gss token
+   * @return String of server principal
+   * @throws IllegalArgumentException if token is undecodable
+   */
+  public static String getTokenServerName(byte[] rawToken) {
+    // subsequent comments include only relevant portions of the kerberos
+    // DER encoding that will be extracted.
+    DER token = new DER(rawToken);
+    // InitialContextToken ::= [APPLICATION 0] IMPLICIT SEQUENCE {
+    //     mech   OID
+    //     mech-token  (NegotiationToken or InnerContextToken)
+    // }
+    DER oid = token.next();
+    if (oid.equals(DER.SPNEGO_MECH_OID)) {
+      // NegotiationToken ::= CHOICE {
+      //     neg-token-init[0] NegTokenInit
+      // }
+      // NegTokenInit ::= SEQUENCE {
+      //     mech-token[2]     InitialContextToken
+      // }
+      token = token.next().get(0xa0, 0x30, 0xa2, 0x04).next();
+      oid = token.next();
+    }
+    if (!oid.equals(DER.KRB5_MECH_OID)) {
+      throw new IllegalArgumentException("Malformed gss token");
+    }
+    // InnerContextToken ::= {
+    //     token-id[1]
+    //     AP-REQ
+    // }
+    if (token.next().getTag() != 1) {
+      throw new IllegalArgumentException("Not an AP-REQ token");
+    }
+    // AP-REQ ::= [APPLICATION 14] SEQUENCE {
+    //     ticket[3]      Ticket
+    // }
+    DER ticket = token.next().get(0x6e, 0x30, 0xa3, 0x61, 0x30);
+    // Ticket ::= [APPLICATION 1] SEQUENCE {
+    //     realm[1]       String
+    //     sname[2]       PrincipalName
+    // }
+    // PrincipalName ::= SEQUENCE {
+    //     name-string[1] SEQUENCE OF String
+    // }
+    String realm = ticket.get(0xa1, 0x1b).getAsString();
+    DER names = ticket.get(0xa2, 0x30, 0xa1, 0x30);
+    StringBuilder sb = new StringBuilder();
+    while (names.hasNext()) {
+      if (sb.length() > 0) {
+        sb.append('/');
+      }
+      sb.append(names.next().getAsString());
+    }
+    return sb.append('@').append(realm).toString();
+  }
+
+  // basic ASN.1 DER decoder to traverse encoded byte arrays.
+  private static class DER implements Iterator<DER> {
+    static final DER SPNEGO_MECH_OID = getDER(GSS_SPNEGO_MECH_OID);
+    static final DER KRB5_MECH_OID = getDER(GSS_KRB5_MECH_OID);
+
+    private static DER getDER(Oid oid) {
+      try {
+        return new DER(oid.getDER());
+      } catch (GSSException ex) {
+        // won't happen.  a proper OID is encodable.
+        throw new IllegalArgumentException(ex);
+      }
+    }
+
+    private final int tag;
+    private final ByteBuffer bb;
+
+    DER(byte[] buf) {
+      this(ByteBuffer.wrap(buf));
+    }
+
+    DER(ByteBuffer srcbb) {
+      tag = srcbb.get() & 0xff;
+      int length = readLength(srcbb);
+      bb = srcbb.slice();
+      bb.limit(length);
+      srcbb.position(srcbb.position() + length);
+    }
+
+    int getTag() {
+      return tag;
+    }
+
+    // standard ASN.1 encoding.
+    private static int readLength(ByteBuffer bb) {
+      int length = bb.get();
+      if ((length & (byte)0x80) != 0) {
+        int varlength = length & 0x7f;
+        length = 0;
+        for (int i=0; i < varlength; i++) {
+          length = (length << 8) | (bb.get() & 0xff);
+        }
+      }
+      return length;
+    }
+
+    DER choose(int subtag) {
+      while (hasNext()) {
+        DER der = next();
+        if (der.getTag() == subtag) {
+          return der;
+        }
+      }
+      return null;
+    }
+
+    DER get(int... tags) {
+      DER der = this;
+      for (int i=0; i < tags.length; i++) {
+        int expectedTag = tags[i];
+        // lookup for exact match, else scan if it's sequenced.
+        if (der.getTag() != expectedTag) {
+          der = der.hasNext() ? der.choose(expectedTag) : null;
+        }
+        if (der == null) {
+          StringBuilder sb = new StringBuilder("Tag not found:");
+          for (int ii=0; ii <= i; ii++) {
+            sb.append(" 0x").append(Integer.toHexString(tags[ii]));
+          }
+          throw new IllegalStateException(sb.toString());
+        }
+      }
+      return der;
+    }
+
+    String getAsString() {
+      try {
+        return new String(bb.array(), bb.arrayOffset() + bb.position(),
+            bb.remaining(), "UTF-8");
+      } catch (UnsupportedEncodingException e) {
+        throw new IllegalCharsetNameException("UTF-8"); // won't happen.
+      }
+    }
+
+    @Override
+    public int hashCode() {
+      return 31 * tag + bb.hashCode();
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      return (o instanceof DER) &&
+          tag == ((DER)o).tag && bb.equals(((DER)o).bb);
+    }
+
+    @Override
+    public boolean hasNext() {
+      // it's a sequence or an embedded octet.
+      return ((tag & 0x30) != 0 || tag == 0x04) && bb.hasRemaining();
+    }
+
+    @Override
+    public DER next() {
+      if (!hasNext()) {
+        throw new NoSuchElementException();
+      }
+      return new DER(bb);
+    }
+
+    @Override
+    public String toString() {
+      return "[tag=0x"+Integer.toHexString(tag)+" bb="+bb+"]";
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e806c6e0/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestMultiSchemeAuthenticationHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestMultiSchemeAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestMultiSchemeAuthenticationHandler.java
index 8f9668a..735cb43 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestMultiSchemeAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestMultiSchemeAuthenticationHandler.java
@@ -182,7 +182,7 @@ public class TestMultiSchemeAuthenticationHandler
     } catch (AuthenticationException ex) {
       // Expected
     } catch (Exception ex) {
-      Assert.fail();
+      Assert.fail("Wrong exception :"+ex);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e806c6e0/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java
index 63df9ea..9999eb2 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java
@@ -16,11 +16,14 @@
  */
 package org.apache.hadoop.security.authentication.util;
 
+import static org.junit.Assert.assertEquals;
+
 import java.io.File;
 import java.io.IOException;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Base64;
 import java.util.List;
 import java.util.Locale;
 import java.util.regex.Pattern;
@@ -178,4 +181,74 @@ public class TestKerberosUtil {
     keytab.addKeytabEntries(lstEntries);
     keytab.store(new File(testKeytab));
   }
+
+  @Test
+  public void testServicePrincipalDecode() throws Exception {
+    // test decoding standard krb5 tokens and spnego wrapped tokens
+    // for principals with the default realm, and a non-default realm.
+    String krb5Default =
+        "YIIB2AYJKoZIhvcSAQICAQBuggHHMIIBw6ADAgEFoQMCAQ6iBwMFACAAAACj" +
+        "gethgegwgeWgAwIBBaENGwtFWEFNUExFLkNPTaIcMBqgAwIBAKETMBEbBEhU" +
+        "VFAbCWxvY2FsaG9zdKOBsDCBraADAgERoQMCAQGigaAEgZ23QsT1+16T23ni" +
+        "JI1uFRU0FN13hhPSLAl4+oAqpV5s1Z6E+G2VKGx2+rUF21utOdlwUK/J5CKF" +
+        "HxM4zfNsmzRFhdk5moJW6AWHuRqGJ9hrZgTxA2vOBIn/tju+n/vJVEcUvW0f" +
+        "DiPfjPIPFOlc7V9GlWvZFyr5NMJSFwspKJXYh/FSNpSVTecfGskjded9TZzR" +
+        "2tOVzgpjFvAu/DETpIG/MIG8oAMCARGigbQEgbGWnbKlV1oo7/gzT4hi/Q41" +
+        "ff2luDnSxADEmo6M8LC42scsYMLNgU4iLJhuf4YLb7ueh790HrbB6Kdes71/" +
+        "gSBiLI2/mn3BqNE43gt94dQ8VFBix4nJCsYnuORYxLJjRSJE+3ImJNsSjqaf" +
+        "GRI0sp9w3hc4IVm8afb3Ggm6PgRIyyGNdTzK/p03v+zA01MJh3htuOgLKUOV" +
+        "z002pHnGzu/purZ5mOyaQT12vHxJ2T+Cwi8=";
+
+    String krb5Other =
+        "YIIB2AYJKoZIhvcSAQICAQBuggHHMIIBw6ADAgEFoQMCAQ6iBwMFACAAAACj" +
+        "gethgegwgeWgAwIBBaENGwtBQkNERUZHLk9SR6IcMBqgAwIBAKETMBEbBEhU" +
+        "VFAbCW90aGVyaG9zdKOBsDCBraADAgERoQMCAQGigaAEgZ23QsT1+16T23ni" +
+        "JI1uFRU0FN13hhPSLAl4+oAqpV5s1Z6E+G2VKGx2+rUF21utOdlwUK/J5CKF" +
+        "HxM4zfNsmzRFhdk5moJW6AWHuRqGJ9hrZgTxA2vOBIn/tju+n/vJVEcUvW0f" +
+        "DiPfjPIPFOlc7V9GlWvZFyr5NMJSFwspKJXYh/FSNpSVTecfGskjded9TZzR" +
+        "2tOVzgpjFvAu/DETpIG/MIG8oAMCARGigbQEgbGWnbKlV1oo7/gzT4hi/Q41" +
+        "ff2luDnSxADEmo6M8LC42scsYMLNgU4iLJhuf4YLb7ueh790HrbB6Kdes71/" +
+        "gSBiLI2/mn3BqNE43gt94dQ8VFBix4nJCsYnuORYxLJjRSJE+3ImJNsSjqaf" +
+        "GRI0sp9w3hc4IVm8afb3Ggm6PgRIyyGNdTzK/p03v+zA01MJh3htuOgLKUOV" +
+        "z002pHnGzu/purZ5mOyaQT12vHxJ2T+Cwi8K";
+
+    String spnegoDefault =
+        "YIICCQYGKwYBBQUCoIIB/TCCAfmgDTALBgkqhkiG9xIBAgKhBAMCAXaiggHg" +
+        "BIIB3GCCAdgGCSqGSIb3EgECAgEAboIBxzCCAcOgAwIBBaEDAgEOogcDBQAg" +
+        "AAAAo4HrYYHoMIHloAMCAQWhDRsLRVhBTVBMRS5DT02iHDAaoAMCAQChEzAR" +
+        "GwRIVFRQGwlsb2NhbGhvc3SjgbAwga2gAwIBEaEDAgEBooGgBIGdBWbzvV1R" +
+        "Iqb7WuPIW3RTkFtwjU9P/oFAbujGPd8h/qkCszroNdvHhUkPntuOqhFBntMo" +
+        "bilgTqNEdDUGvBbfkJaRklNGqT/IAOUV6tlGpBUCXquR5UdPzPpUvGZiVRUu" +
+        "FGH5DGGHvYF1CwXPp2l1Jq373vSLQ1kBl6TXl+aKLsZYhVUjKvE7Auippclb" +
+        "hv/GGGex/TcjNH48k47OQaSBvzCBvKADAgERooG0BIGxeChp3TMVtWbCdFGo" +
+        "YL+35r2762j+OEwZRfcj4xCK7j0mUTcxLtyVGxyY9Ax+ljl5gTwzRhXcJq0T" +
+        "TjiQwKJckeZ837mXQAURbfJpFc3VLAXGfNkMFCR7ZkWpGA1Vzc3PeUNczn2D" +
+        "Lpu8sme55HFFQDi/0akW6Lwv/iCrpwIkZPyZPjaEmwLVALu4E8m0Ka3fJkPV" +
+        "GAhamg9OQpuREIK0pCk3ZSHhJz8qMwduzRZHc4vN";
+
+    String spnegoOther =
+        "YIICCQYGKwYBBQUCoIIB/TCCAfmgDTALBgkqhkiG9xIBAgKhBAMCAXaiggHg" +
+        "BIIB3GCCAdgGCSqGSIb3EgECAgEAboIBxzCCAcOgAwIBBaEDAgEOogcDBQAg" +
+        "AAAAo4HrYYHoMIHloAMCAQWhDRsLQUJDREVGRy5PUkeiHDAaoAMCAQChEzAR" +
+        "GwRIVFRQGwlvdGhlcmhvc3SjgbAwga2gAwIBEaEDAgEBooGgBIGdBWbzvV1R" +
+        "Iqb7WuPIW3RTkFtwjU9P/oFAbujGPd8h/qkCszroNdvHhUkPntuOqhFBntMo" +
+        "bilgTqNEdDUGvBbfkJaRklNGqT/IAOUV6tlGpBUCXquR5UdPzPpUvGZiVRUu" +
+        "FGH5DGGHvYF1CwXPp2l1Jq373vSLQ1kBl6TXl+aKLsZYhVUjKvE7Auippclb" +
+        "hv/GGGex/TcjNH48k47OQaSBvzCBvKADAgERooG0BIGxeChp3TMVtWbCdFGo" +
+        "YL+35r2762j+OEwZRfcj4xCK7j0mUTcxLtyVGxyY9Ax+ljl5gTwzRhXcJq0T" +
+        "TjiQwKJckeZ837mXQAURbfJpFc3VLAXGfNkMFCR7ZkWpGA1Vzc3PeUNczn2D" +
+        "Lpu8sme55HFFQDi/0akW6Lwv/iCrpwIkZPyZPjaEmwLVALu4E8m0Ka3fJkPV" +
+        "GAhamg9OQpuREIK0pCk3ZSHhJz8qMwduzRZHc4vNCg==";
+
+
+    assertEquals("HTTP/localhost@EXAMPLE.COM", getPrincipal(krb5Default));
+    assertEquals("HTTP/otherhost@ABCDEFG.ORG", getPrincipal(krb5Other));
+    assertEquals("HTTP/localhost@EXAMPLE.COM", getPrincipal(spnegoDefault));
+    assertEquals("HTTP/otherhost@ABCDEFG.ORG", getPrincipal(spnegoOther));
+  }
+
+  private static String getPrincipal(String token) {
+    return KerberosUtil.getTokenServerName(
+        Base64.getDecoder().decode(token));
+  }
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] hadoop git commit: HDFS-11789. Maintain Short-Circuit Read Statistics. Contributed by Hanisha Koneru.

Posted by st...@apache.org.
HDFS-11789. Maintain Short-Circuit Read Statistics. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d116ffa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d116ffa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d116ffa

Branch: refs/heads/HADOOP-13345
Commit: 6d116ffad23b470f8e9ca131d8e89cbbbb4378d7
Parents: 49aa60e
Author: Arpit Agarwal <ar...@apache.org>
Authored: Thu Jun 22 13:35:56 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Thu Jun 22 13:35:56 2017 -0700

----------------------------------------------------------------------
 .../hdfs/client/HdfsClientConfigKeys.java       |   4 +
 .../hdfs/client/impl/BlockReaderLocal.java      |  52 ++++-
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |  26 +++
 .../impl/metrics/BlockReaderIoProvider.java     |  89 ++++++++
 .../impl/metrics/BlockReaderLocalMetrics.java   |  78 +++++++
 .../hdfs/client/impl/metrics/package-info.java  |  27 +++
 .../client/impl/TestBlockReaderIoProvider.java  |  75 ++++++
 .../impl/TestBlockReaderLocalMetrics.java       | 227 +++++++++++++++++++
 8 files changed, 566 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d116ffa/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index fbc8d89..5667989 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -343,6 +343,10 @@ public interface HdfsClientConfigKeys {
       int     STREAMS_CACHE_SIZE_DEFAULT = 256;
       String  STREAMS_CACHE_EXPIRY_MS_KEY = PREFIX + "streams.cache.expiry.ms";
       long    STREAMS_CACHE_EXPIRY_MS_DEFAULT = 5*MINUTE;
+
+      String  METRICS_SAMPLING_PERCENTAGE_KEY =
+          PREFIX + "metrics.sampling.percentage";
+      int     METRICS_SAMPLING_PERCENTAGE_DEFAULT = 0;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d116ffa/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java
index 1b38996..df0f65f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java
@@ -17,17 +17,16 @@
  */
 package org.apache.hadoop.hdfs.client.impl;
 
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
-import java.util.EnumSet;
-
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
+import org.apache.hadoop.hdfs.client.impl.metrics.BlockReaderIoProvider;
+import org.apache.hadoop.hdfs.client.impl.metrics.BlockReaderLocalMetrics;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
@@ -35,15 +34,19 @@ import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DirectBufferPool;
+import org.apache.hadoop.util.Timer;
 import org.apache.htrace.core.TraceScope;
 import org.apache.htrace.core.Tracer;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.util.EnumSet;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
 /**
  * BlockReaderLocal enables local short circuited reads. If the DFS client is on
  * the same machine as the datanode, then the client can read files directly
@@ -66,6 +69,11 @@ class BlockReaderLocal implements BlockReader {
 
   private static final DirectBufferPool bufferPool = new DirectBufferPool();
 
+  private static BlockReaderLocalMetrics metrics;
+  private static Lock metricsInitializationLock = new ReentrantLock();
+  private final BlockReaderIoProvider blockReaderIoProvider;
+  private static final Timer TIMER = new Timer();
+
   public static class Builder {
     private final int bufferSize;
     private boolean verifyChecksum;
@@ -76,8 +84,10 @@ class BlockReaderLocal implements BlockReader {
     private ExtendedBlock block;
     private StorageType storageType;
     private Tracer tracer;
+    private ShortCircuitConf shortCircuitConf;
 
     public Builder(ShortCircuitConf conf) {
+      this.shortCircuitConf = conf;
       this.maxReadahead = Integer.MAX_VALUE;
       this.verifyChecksum = !conf.isSkipShortCircuitChecksums();
       this.bufferSize = conf.getShortCircuitBufferSize();
@@ -269,6 +279,20 @@ class BlockReaderLocal implements BlockReader {
     this.maxReadaheadLength = maxReadaheadChunks * bytesPerChecksum;
     this.storageType = builder.storageType;
     this.tracer = builder.tracer;
+
+    if (builder.shortCircuitConf.isScrMetricsEnabled()) {
+      metricsInitializationLock.lock();
+      try {
+        if (metrics == null) {
+          metrics = BlockReaderLocalMetrics.create();
+        }
+      } finally {
+        metricsInitializationLock.unlock();
+      }
+    }
+
+    this.blockReaderIoProvider = new BlockReaderIoProvider(
+        builder.shortCircuitConf, metrics, TIMER);
   }
 
   private synchronized void createDataBufIfNeeded() {
@@ -342,7 +366,7 @@ class BlockReaderLocal implements BlockReader {
       long startDataPos = dataPos;
       int startBufPos = buf.position();
       while (buf.hasRemaining()) {
-        int nRead = dataIn.read(buf, dataPos);
+        int nRead = blockReaderIoProvider.read(dataIn, buf, dataPos);
         if (nRead < 0) {
           break;
         }
@@ -435,7 +459,7 @@ class BlockReaderLocal implements BlockReader {
     freeChecksumBufIfExists();
     int total = 0;
     while (buf.hasRemaining()) {
-      int nRead = dataIn.read(buf, dataPos);
+      int nRead = blockReaderIoProvider.read(dataIn, buf, dataPos);
       if (nRead <= 0) break;
       dataPos += nRead;
       total += nRead;
@@ -574,7 +598,8 @@ class BlockReaderLocal implements BlockReader {
         int len) throws IOException {
     freeDataBufIfExists();
     freeChecksumBufIfExists();
-    int nRead = dataIn.read(ByteBuffer.wrap(arr, off, len), dataPos);
+    int nRead = blockReaderIoProvider.read(
+        dataIn, ByteBuffer.wrap(arr, off, len), dataPos);
     if (nRead > 0) {
       dataPos += nRead;
     } else if ((nRead == 0) && (dataPos == dataIn.size())) {
@@ -627,6 +652,9 @@ class BlockReaderLocal implements BlockReader {
     replica.unref();
     freeDataBufIfExists();
     freeChecksumBufIfExists();
+    if (metrics != null) {
+      metrics.collectThreadLocalStates();
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d116ffa/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
index b2fd487..332abb5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
@@ -593,6 +593,10 @@ public class DfsClientConf {
     private final long shortCircuitStreamsCacheExpiryMs;
     private final int shortCircuitSharedMemoryWatcherInterruptCheckMs;
 
+    // Short Circuit Read Metrics
+    private final boolean scrMetricsEnabled;
+    private final int scrMetricsSamplingPercentage;
+
     private final boolean shortCircuitMmapEnabled;
     private final int shortCircuitMmapCacheSize;
     private final long shortCircuitMmapCacheExpiryMs;
@@ -615,6 +619,20 @@ public class DfsClientConf {
       shortCircuitLocalReads = conf.getBoolean(
           Read.ShortCircuit.KEY,
           Read.ShortCircuit.DEFAULT);
+      int scrSamplingPercentage = conf.getInt(
+          Read.ShortCircuit.METRICS_SAMPLING_PERCENTAGE_KEY,
+          Read.ShortCircuit.METRICS_SAMPLING_PERCENTAGE_DEFAULT);
+      if (scrSamplingPercentage <= 0) {
+        scrMetricsSamplingPercentage = 0;
+        scrMetricsEnabled = false;
+      } else if (scrSamplingPercentage > 100) {
+        scrMetricsSamplingPercentage = 100;
+        scrMetricsEnabled = true;
+      } else {
+        scrMetricsSamplingPercentage = scrSamplingPercentage;
+        scrMetricsEnabled = true;
+      }
+
       domainSocketDataTraffic = conf.getBoolean(
           DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
           DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT);
@@ -693,6 +711,14 @@ public class DfsClientConf {
       return shortCircuitLocalReads;
     }
 
+    public boolean isScrMetricsEnabled() {
+      return scrMetricsEnabled;
+    }
+
+    public int getScrMetricsSamplingPercentage() {
+      return scrMetricsSamplingPercentage;
+    }
+
     public boolean isDomainSocketDataTraffic() {
       return domainSocketDataTraffic;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d116ffa/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/metrics/BlockReaderIoProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/metrics/BlockReaderIoProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/metrics/BlockReaderIoProvider.java
new file mode 100644
index 0000000..0792db8
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/metrics/BlockReaderIoProvider.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client.impl.metrics;
+
+import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
+import org.apache.hadoop.util.Timer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nullable;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.util.concurrent.ThreadLocalRandom;
+
+/**
+ * Profiles {@link org.apache.hadoop.hdfs.client.impl.BlockReaderLocal} short
+ * circuit read latencies when ShortCircuit read metrics is enabled through
+ * {@link ShortCircuitConf#scrMetricsEnabled}.
+ */
+public class BlockReaderIoProvider {
+  public static final Logger LOG = LoggerFactory.getLogger(
+      BlockReaderIoProvider.class);
+
+  private final BlockReaderLocalMetrics metrics;
+  private final boolean isEnabled;
+  private final int sampleRangeMax;
+  private final Timer timer;
+
+  // Threshold in milliseconds above which a warning should be flagged.
+  private static final long SLOW_READ_WARNING_THRESHOLD_MS = 1000;
+  private boolean isWarningLogged = false;
+
+  public BlockReaderIoProvider(@Nullable ShortCircuitConf conf,
+      BlockReaderLocalMetrics metrics, Timer timer) {
+    if (conf != null) {
+      isEnabled = conf.isScrMetricsEnabled();
+      sampleRangeMax = (Integer.MAX_VALUE / 100) *
+          conf.getScrMetricsSamplingPercentage();
+      this.metrics = metrics;
+      this.timer = timer;
+    } else {
+      this.isEnabled = false;
+      this.sampleRangeMax = 0;
+      this.metrics = null;
+      this.timer = null;
+    }
+  }
+
+  public int read(FileChannel dataIn, ByteBuffer dst, long position)
+      throws IOException{
+    final int nRead;
+    if (isEnabled && (ThreadLocalRandom.current().nextInt() < sampleRangeMax)) {
+      long begin = timer.monotonicNow();
+      nRead = dataIn.read(dst, position);
+      long latency = timer.monotonicNow() - begin;
+      addLatency(latency);
+    } else {
+      nRead = dataIn.read(dst, position);
+    }
+    return nRead;
+  }
+
+  private void addLatency(long latency) {
+    metrics.addShortCircuitReadLatency(latency);
+    if (latency > SLOW_READ_WARNING_THRESHOLD_MS && !isWarningLogged) {
+      LOG.warn(String.format("The Short Circuit Local Read latency, %d ms, " +
+          "is higher then the threshold (%d ms). Suppressing further warnings" +
+          " for this BlockReaderLocal.",
+          latency, SLOW_READ_WARNING_THRESHOLD_MS));
+      isWarningLogged = true;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d116ffa/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/metrics/BlockReaderLocalMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/metrics/BlockReaderLocalMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/metrics/BlockReaderLocalMetrics.java
new file mode 100644
index 0000000..61b497e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/metrics/BlockReaderLocalMetrics.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client.impl.metrics;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MutableRollingAverages;
+
+/**
+ * This class maintains a metric of rolling average latency for short circuit
+ * reads.
+ */
+@InterfaceAudience.Private
+@Metrics(name="HdfsShortCircuitReads",
+         about="Block Reader Local's Short Circuit Read latency",
+         context="dfs")
+public class BlockReaderLocalMetrics {
+
+  @Metric(value = "short circuit read operation rate", valueName = "LatencyMs")
+  private MutableRollingAverages shortCircuitReadRollingAverages;
+
+  private static final String SHORT_CIRCUIT_READ_METRIC_REGISTERED_NAME =
+      "HdfsShortCircuitReads";
+  private static final String SHORT_CIRCUIT_LOCAL_READS_METRIC_VALUE_NAME =
+      "ShortCircuitLocalReads";
+
+  public static BlockReaderLocalMetrics create() {
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    BlockReaderLocalMetrics metrics = new BlockReaderLocalMetrics();
+
+    ms.register(
+        SHORT_CIRCUIT_READ_METRIC_REGISTERED_NAME, null, metrics);
+    return metrics;
+  }
+
+  /**
+   * Adds short circuit read elapsed time.
+   */
+  public void addShortCircuitReadLatency(final long latency) {
+    shortCircuitReadRollingAverages.add(
+        SHORT_CIRCUIT_LOCAL_READS_METRIC_VALUE_NAME, latency);
+  }
+
+  /**
+   * Collects states maintained in {@link ThreadLocal}, if any.
+   */
+  public void collectThreadLocalStates() {
+    shortCircuitReadRollingAverages.collectThreadLocalStates();
+  }
+
+  /**
+   * Get the MutableRollingAverage metric for testing only.
+   * @return
+   */
+  @VisibleForTesting
+  public MutableRollingAverages getShortCircuitReadRollingAverages() {
+    return shortCircuitReadRollingAverages;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d116ffa/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/metrics/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/metrics/package-info.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/metrics/package-info.java
new file mode 100644
index 0000000..a97ed43
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/metrics/package-info.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Support for tracking Block Reader Local's latencies.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+package org.apache.hadoop.hdfs.client.impl.metrics;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d116ffa/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderIoProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderIoProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderIoProvider.java
new file mode 100644
index 0000000..3eae516
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderIoProvider.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client.impl;
+
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.client.impl.metrics.BlockReaderIoProvider;
+import org.apache.hadoop.hdfs.client.impl.metrics.BlockReaderLocalMetrics;
+import org.apache.hadoop.util.FakeTimer;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Mockito.times;
+
+/**
+ * Tests {@link BlockReaderIoProvider}'s profiling of short circuit read
+ * latencies.
+ */
+public class TestBlockReaderIoProvider {
+
+  private static final long SLOW_READ_THRESHOLD = 5000;
+
+  private static final FakeTimer TIMER = new FakeTimer();
+
+  @Test(timeout = 300_000)
+  public void testSlowShortCircuitReadsIsRecorded() throws IOException {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.setInt(HdfsClientConfigKeys.Read.ShortCircuit
+        .METRICS_SAMPLING_PERCENTAGE_KEY, 100);
+    DfsClientConf clientConf = new DfsClientConf(conf);
+
+    BlockReaderLocalMetrics metrics = Mockito.mock(
+        BlockReaderLocalMetrics.class);
+
+    FileChannel dataIn = Mockito.mock(FileChannel.class);
+    Mockito.when(dataIn.read(any(ByteBuffer.class), anyLong())).thenAnswer(
+        new Answer<Object>() {
+          @Override
+          public Object answer(InvocationOnMock invocation) throws Throwable {
+            TIMER.advance(SLOW_READ_THRESHOLD);
+            return 0;
+          }
+        });
+
+    BlockReaderIoProvider blockReaderIoProvider = new BlockReaderIoProvider(
+        clientConf.getShortCircuitConf(), metrics, TIMER);
+
+    blockReaderIoProvider.read(dataIn, any(ByteBuffer.class), anyLong());
+
+    Mockito.verify(metrics, times(1)).addShortCircuitReadLatency(anyLong());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d116ffa/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalMetrics.java
new file mode 100644
index 0000000..b461f2e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalMetrics.java
@@ -0,0 +1,227 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client.impl;
+
+import com.google.common.base.Supplier;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.client.impl.metrics.BlockReaderIoProvider;
+import org.apache.hadoop.hdfs.client.impl.metrics.BlockReaderLocalMetrics;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.lib.MetricsTestHelper;
+import org.apache.hadoop.metrics2.lib.MutableRollingAverages;
+import org.apache.hadoop.test.GenericTestUtils;
+import static org.apache.hadoop.test.MetricsAsserts.getDoubleGauge;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import org.apache.hadoop.util.FakeTimer;
+import static org.junit.Assert.assertTrue;
+import org.junit.Test;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyLong;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Tests {@link BlockReaderLocalMetrics}'s statistics.
+ */
+public class TestBlockReaderLocalMetrics {
+  private static final long ROLLING_AVERAGES_WINDOW_LENGTH_MS = 1000;
+  private static final int ROLLING_AVERAGE_NUM_WINDOWS = 5;
+  private static final long SLOW_READ_DELAY = 2000;
+  private static final String SHORT_CIRCUIT_READ_METRIC_REGISTERED_NAME =
+      "HdfsShortCircuitReads";
+  private static final String SHORT_CIRCUIT_LOCAL_READS_METRIC_VALUE_FULL_NAME =
+      "[ShortCircuitLocalReads]RollingAvgLatencyMs";
+
+  private static final FakeTimer TIMER = new FakeTimer();
+
+  private static HdfsConfiguration conf = new HdfsConfiguration();
+  private static DfsClientConf clientConf;
+
+  static {
+    conf = new HdfsConfiguration();
+    conf.setInt(HdfsClientConfigKeys.Read.ShortCircuit
+        .METRICS_SAMPLING_PERCENTAGE_KEY, 100);
+    clientConf = new DfsClientConf(conf);
+  }
+
+  @Test(timeout = 300_000)
+  public void testSlowShortCircuitReadsStatsRecorded() throws IOException,
+      InterruptedException, TimeoutException {
+
+    BlockReaderLocalMetrics metrics = BlockReaderLocalMetrics.create();
+    MutableRollingAverages shortCircuitReadRollingAverages = metrics
+        .getShortCircuitReadRollingAverages();
+    MetricsTestHelper.replaceRollingAveragesScheduler(
+        shortCircuitReadRollingAverages,
+        ROLLING_AVERAGE_NUM_WINDOWS, ROLLING_AVERAGES_WINDOW_LENGTH_MS,
+        TimeUnit.MILLISECONDS);
+
+    FileChannel dataIn = Mockito.mock(FileChannel.class);
+    Mockito.when(dataIn.read(any(ByteBuffer.class), anyLong())).thenAnswer(
+        new Answer<Object>() {
+          @Override
+          public Object answer(InvocationOnMock invocation) throws Throwable {
+            TIMER.advance(SLOW_READ_DELAY);
+            return 0;
+          }
+        });
+
+    BlockReaderIoProvider blockReaderIoProvider = new BlockReaderIoProvider(
+        clientConf.getShortCircuitConf(), metrics, TIMER);
+
+    blockReaderIoProvider.read(dataIn, any(ByteBuffer.class), anyLong());
+    blockReaderIoProvider.read(dataIn, any(ByteBuffer.class), anyLong());
+
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        metrics.collectThreadLocalStates();
+        return shortCircuitReadRollingAverages.getStats(0).size() > 0;
+      }
+    }, 500, 10000);
+
+    MetricsRecordBuilder rb = getMetrics(
+        SHORT_CIRCUIT_READ_METRIC_REGISTERED_NAME);
+    double averageLatency = getDoubleGauge(
+        SHORT_CIRCUIT_LOCAL_READS_METRIC_VALUE_FULL_NAME, rb);
+    assertTrue("Average Latency of Short Circuit Reads lower than expected",
+        averageLatency >= SLOW_READ_DELAY);
+  }
+
+  @Test(timeout = 300_000)
+  public void testMutlipleBlockReaderIoProviderStats() throws IOException,
+      InterruptedException, TimeoutException {
+
+    BlockReaderLocalMetrics metrics = BlockReaderLocalMetrics.create();
+    MutableRollingAverages shortCircuitReadRollingAverages = metrics
+        .getShortCircuitReadRollingAverages();
+    MetricsTestHelper.replaceRollingAveragesScheduler(
+        shortCircuitReadRollingAverages,
+        ROLLING_AVERAGE_NUM_WINDOWS, ROLLING_AVERAGES_WINDOW_LENGTH_MS,
+        TimeUnit.MILLISECONDS);
+
+    FileChannel dataIn1 = Mockito.mock(FileChannel.class);
+    FileChannel dataIn2 = Mockito.mock(FileChannel.class);
+
+    Mockito.when(dataIn1.read(any(ByteBuffer.class), anyLong())).thenAnswer(
+        new Answer<Object>() {
+          @Override
+          public Object answer(InvocationOnMock invocation) throws Throwable {
+            TIMER.advance(SLOW_READ_DELAY);
+            return 0;
+          }
+        });
+
+    Mockito.when(dataIn2.read(any(ByteBuffer.class), anyLong())).thenAnswer(
+        new Answer<Object>() {
+          @Override
+          public Object answer(InvocationOnMock invocation) throws Throwable {
+            TIMER.advance(SLOW_READ_DELAY*3);
+            return 0;
+          }
+        });
+
+    BlockReaderIoProvider blockReaderIoProvider1 = new BlockReaderIoProvider(
+        clientConf.getShortCircuitConf(), metrics, TIMER);
+    BlockReaderIoProvider blockReaderIoProvider2 = new BlockReaderIoProvider(
+        clientConf.getShortCircuitConf(), metrics, TIMER);
+
+    blockReaderIoProvider1.read(dataIn1, any(ByteBuffer.class), anyLong());
+    blockReaderIoProvider2.read(dataIn2, any(ByteBuffer.class), anyLong());
+
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        metrics.collectThreadLocalStates();
+        return shortCircuitReadRollingAverages.getStats(0).size() > 0;
+      }
+    }, 500, 10000);
+
+    MetricsRecordBuilder rb = getMetrics(
+        SHORT_CIRCUIT_READ_METRIC_REGISTERED_NAME);
+    double averageLatency = getDoubleGauge(
+        SHORT_CIRCUIT_LOCAL_READS_METRIC_VALUE_FULL_NAME, rb);
+
+    assertTrue("Average Latency of Short Circuit Reads lower than expected",
+        averageLatency >= SLOW_READ_DELAY*2);
+  }
+
+  @Test(timeout = 300_000)
+  public void testSlowShortCircuitReadsAverageLatencyValue() throws IOException,
+      InterruptedException, TimeoutException {
+
+    BlockReaderLocalMetrics metrics = BlockReaderLocalMetrics.create();
+    final MutableRollingAverages shortCircuitReadRollingAverages = metrics
+        .getShortCircuitReadRollingAverages();
+    MetricsTestHelper.replaceRollingAveragesScheduler(
+        shortCircuitReadRollingAverages,
+        ROLLING_AVERAGE_NUM_WINDOWS, ROLLING_AVERAGES_WINDOW_LENGTH_MS,
+        TimeUnit.MILLISECONDS);
+
+    Random random = new Random();
+    FileChannel[] dataIns = new FileChannel[5];
+    long totalDelay = 0;
+
+    for (int i = 0; i < 5; i++) {
+      dataIns[i] = Mockito.mock(FileChannel.class);
+      long delay = SLOW_READ_DELAY * random.nextInt(5);
+      Mockito.when(dataIns[i].read(any(ByteBuffer.class), anyLong()))
+          .thenAnswer(new Answer<Object>() {
+            @Override
+            public Object answer(InvocationOnMock invocation) throws Throwable {
+              TIMER.advance(delay);
+              return 0;
+            }
+          });
+      totalDelay += delay;
+    }
+    long expectedAvgLatency = totalDelay / 5;
+
+    BlockReaderIoProvider blockReaderIoProvider = new BlockReaderIoProvider(
+        clientConf.getShortCircuitConf(), metrics, TIMER);
+
+    for (int i = 0; i < 5; i++) {
+      blockReaderIoProvider.read(dataIns[i], any(ByteBuffer.class), anyLong());
+    }
+
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        metrics.collectThreadLocalStates();
+        return shortCircuitReadRollingAverages.getStats(0).size() > 0;
+      }
+    }, 500, 10000);
+
+    MetricsRecordBuilder rb = getMetrics(
+        SHORT_CIRCUIT_READ_METRIC_REGISTERED_NAME);
+    double averageLatency = getDoubleGauge(
+        SHORT_CIRCUIT_LOCAL_READS_METRIC_VALUE_FULL_NAME, rb);
+
+    assertTrue("Average Latency of Short Circuit Reads lower than expected",
+        averageLatency >= expectedAvgLatency);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] hadoop git commit: Revert "MAPREDUCE-6898. TestKill.testKillTask is flaky. Contributed by Peter Bacsko"

Posted by st...@apache.org.
Revert "MAPREDUCE-6898. TestKill.testKillTask is flaky. Contributed by Peter Bacsko"

This reverts commit e78542f4bea90e8545e9e06af9c07e89d2be43d9.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ef2743f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ef2743f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ef2743f

Branch: refs/heads/HADOOP-13345
Commit: 9ef2743fa18e350bf32ba1f811b2a5ffb20f3110
Parents: e78542f
Author: Jason Lowe <jl...@yahoo-inc.com>
Authored: Fri Jun 16 14:48:51 2017 -0500
Committer: Jason Lowe <jl...@yahoo-inc.com>
Committed: Fri Jun 16 14:48:51 2017 -0500

----------------------------------------------------------------------
 .../hadoop/mapreduce/v2/app/TestKill.java       | 36 +++-----------------
 1 file changed, 5 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ef2743f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
index 176ac01..f681cf8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
@@ -21,15 +21,11 @@ package org.apache.hadoop.mapreduce.v2.app;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.service.Service;
 import org.junit.Assert;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.jobhistory.EventType;
-import org.apache.hadoop.mapreduce.jobhistory.TaskStartedEvent;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
@@ -104,34 +100,12 @@ public class TestKill {
   @Test
   public void testKillTask() throws Exception {
     final CountDownLatch latch = new CountDownLatch(1);
-    final CountDownLatch jobRunning = new CountDownLatch(1);
-
-    final Dispatcher dispatcher = new AsyncDispatcher() {
-      protected void dispatch(Event event) {
-        // We have to wait until the internal state is RUNNING and not SETUP
-        // because it can cause a transition failure. If the dispatchable
-        // event is TASK_STARTED, we can continue because the job must be in
-        // RUNNING at that point.
-        if (event.getType() == EventType.TASK_STARTED) {
-          jobRunning.countDown();
-        }
-        super.dispatch(event);
-      }
-    };
-
-    MRApp app = new BlockingMRApp(2, 0, latch) {
-      @Override
-      public Dispatcher createDispatcher() {
-        return dispatcher;
-      }
-    };
-
+    MRApp app = new BlockingMRApp(2, 0, latch);
     //this will start the job but job won't complete as Task is blocked
     Job job = app.submit(new Configuration());
-
-    // wait until the job transitions to JobInternalState.RUNNING
-    jobRunning.await(10, TimeUnit.SECONDS);
-
+    
+    //wait and vailidate for Job to become RUNNING
+    app.waitForInternalState((JobImpl) job, JobStateInternal.RUNNING);
     Map<TaskId,Task> tasks = job.getTasks();
     Assert.assertEquals("No of tasks is not correct", 2, 
         tasks.size());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] hadoop git commit: Merge branch 'trunk' into HADOOP-13345

Posted by st...@apache.org.
Merge branch 'trunk' into HADOOP-13345


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b3c4b84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b3c4b84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b3c4b84

Branch: refs/heads/HADOOP-13345
Commit: 2b3c4b840cf52e15d05b23bf41c9064fc3337995
Parents: be922ba 48f4a22
Author: Steve Loughran <st...@apache.org>
Authored: Mon Jun 26 13:48:28 2017 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Mon Jun 26 13:48:28 2017 +0100

----------------------------------------------------------------------
 .../apache/hadoop/example/ITUseMiniCluster.java |   8 +-
 hadoop-common-project/hadoop-auth/pom.xml       |   5 +
 .../authentication/client/AuthenticatedURL.java |   8 +
 .../client/KerberosAuthenticator.java           |   4 +-
 .../server/AuthenticationFilter.java            |  12 +-
 .../server/KerberosAuthenticationHandler.java   | 204 +---
 .../MultiSchemeAuthenticationHandler.java       |  11 +-
 .../authentication/util/KerberosUtil.java       | 198 +++-
 .../util/RolloverSignerSecretProvider.java      |   2 +
 .../server/TestAuthenticationFilter.java        |   1 +
 .../TestMultiSchemeAuthenticationHandler.java   |   2 +-
 .../authentication/util/TestKerberosUtil.java   |  73 ++
 .../dev-support/findbugsExcludeFile.xml         |  26 +
 hadoop-common-project/hadoop-common/pom.xml     |   4 +-
 .../org/apache/hadoop/conf/Configuration.java   |   6 +-
 .../apache/hadoop/crypto/AesCtrCryptoCodec.java |   6 +
 .../org/apache/hadoop/crypto/CryptoCodec.java   |   3 +-
 .../apache/hadoop/crypto/CryptoInputStream.java |   1 +
 .../hadoop/crypto/CryptoOutputStream.java       |   1 +
 .../hadoop/crypto/OpensslAesCtrCryptoCodec.java |  22 +-
 .../crypto/key/KeyProviderCryptoExtension.java  |  27 +-
 .../crypto/key/kms/KMSClientProvider.java       |  10 +
 .../hadoop/crypto/random/OsSecureRandom.java    |   5 +
 .../org/apache/hadoop/fs/ContentSummary.java    |  16 +-
 .../hadoop/fs/FSDataOutputStreamBuilder.java    | 171 +++-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  41 +-
 .../java/org/apache/hadoop/fs/FileUtil.java     |   6 +-
 .../org/apache/hadoop/fs/FilterFileSystem.java  |   9 +-
 .../org/apache/hadoop/fs/HarFileSystem.java     |   9 +-
 .../apache/hadoop/fs/RawLocalFileSystem.java    |  17 +-
 .../org/apache/hadoop/fs/ftp/FTPFileSystem.java |   9 +-
 .../apache/hadoop/fs/sftp/SFTPFileSystem.java   |   2 +-
 .../hadoop/fs/shell/CommandWithDestination.java |   2 +-
 .../java/org/apache/hadoop/fs/shell/Count.java  |  41 +-
 .../apache/hadoop/ha/ActiveStandbyElector.java  |   2 +-
 .../org/apache/hadoop/http/HttpRequestLog.java  |  23 +-
 .../org/apache/hadoop/io/DoubleWritable.java    |   4 +-
 .../org/apache/hadoop/io/FloatWritable.java     |   6 +-
 .../main/java/org/apache/hadoop/io/IOUtils.java |  32 +-
 .../apache/hadoop/io/erasurecode/ECSchema.java  |   4 +-
 .../rawcoder/AbstractNativeRawDecoder.java      |   8 +-
 .../rawcoder/AbstractNativeRawEncoder.java      |   8 +-
 .../org/apache/hadoop/io/file/tfile/Utils.java  |   2 +-
 .../org/apache/hadoop/ipc/CallQueueManager.java | 127 ++-
 .../main/java/org/apache/hadoop/ipc/Client.java |   3 +-
 .../org/apache/hadoop/ipc/FairCallQueue.java    |  90 +-
 .../main/java/org/apache/hadoop/ipc/Server.java |  22 +-
 .../hadoop/metrics2/lib/MetricsRegistry.java    |   9 +
 .../metrics2/lib/MutableMetricsFactory.java     |   4 +
 .../metrics2/lib/MutableRollingAverages.java    | 280 ++++++
 .../hadoop/metrics2/lib/RollingAverages.java    | 286 ------
 .../hadoop/metrics2/source/JvmMetrics.java      |  10 +
 .../hadoop/security/LdapGroupsMapping.java      |  41 +-
 .../AbstractDelegationTokenSecretManager.java   |   1 +
 .../ZKDelegationTokenSecretManager.java         |   8 +-
 .../web/DelegationTokenAuthenticatedURL.java    |  11 +
 .../DelegationTokenAuthenticationHandler.java   |   8 +
 .../web/DelegationTokenAuthenticator.java       |  11 +
 .../org/apache/hadoop/tracing/TraceAdmin.java   |   2 +-
 .../org/apache/hadoop/util/HostsFileReader.java |   4 +-
 .../apache/hadoop/util/InstrumentedLock.java    |  12 +-
 .../hadoop/util/InstrumentedReadLock.java       |   6 +-
 .../hadoop/util/InstrumentedReadWriteLock.java  |   4 +-
 .../hadoop/util/InstrumentedWriteLock.java      |   6 +-
 .../org/apache/hadoop/util/SysInfoWindows.java  |   6 +-
 .../src/main/resources/core-default.xml         |  20 +
 .../src/site/markdown/FileSystemShell.md        |   9 +-
 .../src/site/markdown/GroupsMapping.md          |   1 +
 .../src/site/markdown/filesystem/filesystem.md  |   4 +
 .../java/org/apache/hadoop/cli/TestCLI.java     |   2 +-
 .../apache/hadoop/conf/TestConfiguration.java   |  82 ++
 ...yptoStreamsWithOpensslAesCtrCryptoCodec.java |  32 +-
 .../apache/hadoop/fs/TestLocalFileSystem.java   |  10 +-
 .../fs/contract/AbstractContractCreateTest.java |  18 +
 .../apache/hadoop/fs/ftp/TestFTPFileSystem.java |  55 ++
 .../hadoop/fs/sftp/TestSFTPFileSystem.java      |  10 +
 .../hadoop/fs/shell/TestCopyPreserveFlag.java   |   4 +-
 .../org/apache/hadoop/fs/shell/TestCount.java   |   5 +-
 .../ha/TestActiveStandbyElectorRealZK.java      |  28 +
 .../apache/hadoop/ipc/TestCallQueueManager.java |  78 +-
 .../apache/hadoop/ipc/TestFairCallQueue.java    | 156 +++
 .../java/org/apache/hadoop/ipc/TestIPC.java     |   2 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java     |   4 +-
 .../hadoop/metrics2/lib/MetricsTestHelper.java  |  49 +
 .../lib/TestMutableRollingAverages.java         | 192 ++++
 .../metrics2/lib/TestRollingAverages.java       | 124 ---
 .../hadoop/security/TestLdapGroupsMapping.java  |  64 +-
 .../apache/hadoop/test/GenericTestUtils.java    |  24 +-
 .../hadoop/test/TestGenericTestUtils.java       |  32 +
 .../hadoop/util/Crc32PerformanceTest.java       | 253 +++--
 .../hadoop/util/TestInstrumentedLock.java       |   6 +-
 .../util/TestInstrumentedReadWriteLock.java     |   7 +-
 .../src/test/resources/testConf.xml             |   2 +-
 .../hadoop/crypto/key/kms/server/KMS.java       |   2 +-
 .../key/kms/server/KMSExceptionsProvider.java   |   4 +
 .../src/test/resources/log4j.properties         |   5 +-
 .../java/org/apache/hadoop/minikdc/MiniKdc.java |   7 +-
 .../main/java/org/apache/hadoop/fs/Hdfs.java    |   2 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  25 +-
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  38 +-
 .../hadoop/hdfs/DFSStripedInputStream.java      |   2 +-
 .../hadoop/hdfs/DistributedFileSystem.java      | 228 ++++-
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    |  44 +-
 .../hdfs/client/HdfsClientConfigKeys.java       |   4 +
 .../hdfs/client/impl/BlockReaderLocal.java      |  52 +-
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |  26 +
 .../impl/metrics/BlockReaderIoProvider.java     |  89 ++
 .../impl/metrics/BlockReaderLocalMetrics.java   |  78 ++
 .../hdfs/client/impl/metrics/package-info.java  |  27 +
 .../hadoop/hdfs/protocol/BlocksStats.java       |  90 ++
 .../hadoop/hdfs/protocol/ClientProtocol.java    |  57 +-
 .../hdfs/protocol/ECBlockGroupsStats.java       |  83 ++
 .../hdfs/protocol/ErasureCodingPolicy.java      |   2 +
 .../hadoop/hdfs/protocol/OpenFileEntry.java     |  58 ++
 .../hadoop/hdfs/protocol/OpenFilesIterator.java |  59 ++
 .../datatransfer/sasl/DataTransferSaslUtil.java |   1 +
 .../ClientNamenodeProtocolTranslatorPB.java     |  70 ++
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  78 +-
 .../src/main/proto/ClientNamenodeProtocol.proto |  47 +
 .../src/main/proto/erasurecoding.proto          |   7 +
 .../src/main/proto/hdfs.proto                   |   1 +
 .../hdfs/protocol/TestErasureCodingPolicy.java  |  22 +
 .../libhdfs-tests/test_libhdfs_threaded.c       |  26 +
 .../src/main/native/libhdfs/os/posix/mutexes.c  |   9 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  22 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java |  60 ++
 .../hdfs/server/balancer/NameNodeConnector.java |   8 +-
 .../AvailableSpaceBlockPlacementPolicy.java     |  21 +
 .../server/blockmanagement/BlockManager.java    |  62 +-
 .../blockmanagement/BlockManagerSafeMode.java   |  29 +-
 .../blockmanagement/CorruptReplicasMap.java     | 103 +-
 .../blockmanagement/InvalidateBlocks.java       | 198 +++-
 .../blockmanagement/LowRedundancyBlocks.java    | 125 ++-
 .../apache/hadoop/hdfs/server/common/Util.java  |   6 +-
 .../hdfs/server/datanode/BPOfferService.java    |  12 +-
 .../hdfs/server/datanode/BPServiceActor.java    |   6 +-
 .../server/datanode/BlockRecoveryWorker.java    |  14 +-
 .../hdfs/server/datanode/BlockSender.java       |  13 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |   2 +-
 .../server/datanode/DataNodeFaultInjector.java  |   4 +
 .../datanode/fsdataset/impl/BlockPoolSlice.java |   3 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  10 +-
 .../datanode/metrics/DataNodePeerMetrics.java   |  42 +-
 .../apache/hadoop/hdfs/server/mover/Mover.java  |   1 +
 .../ContentSummaryComputationContext.java       |  48 +
 .../namenode/ErasureCodingPolicyManager.java    |  25 +
 .../server/namenode/FSDirErasureCodingOp.java   |  13 +
 .../hdfs/server/namenode/FSNamesystem.java      | 270 +++++-
 .../hdfs/server/namenode/FSNamesystemLock.java  |   5 +-
 .../hadoop/hdfs/server/namenode/INode.java      |   1 +
 .../hdfs/server/namenode/LeaseManager.java      |  78 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |  11 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  32 +-
 .../hdfs/server/namenode/TransferFsImage.java   |  23 +-
 .../metrics/ECBlockGroupsStatsMBean.java        |  59 ++
 .../namenode/metrics/FSNamesystemMBean.java     |  26 +-
 .../namenode/metrics/NameNodeMetrics.java       |  38 +-
 .../metrics/ReplicatedBlocksStatsMBean.java     |  63 ++
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  67 +-
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  54 +-
 .../FileDistributionCalculator.java             |   4 +-
 .../offlineImageViewer/OfflineImageViewer.java  |   2 +-
 .../src/main/resources/hdfs-default.xml         |  22 +-
 .../src/site/markdown/HDFSCommands.md           |   9 +-
 .../src/site/markdown/HDFSErasureCoding.md      |   7 +-
 .../hadoop-hdfs/src/site/markdown/HdfsDesign.md |  33 +-
 .../src/site/markdown/HdfsImageViewer.md        |   2 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     | 133 ++-
 .../hadoop/hdfs/TestDistributedFileSystem.java  | 150 ++-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  85 +-
 ...ErasureCodingPoliciesWithRandomECPolicy.java |  48 +
 .../TestErasureCodingPolicyWithSnapshot.java    |  50 +-
 ...ingPolicyWithSnapshotWithRandomECPolicy.java |  49 +
 .../apache/hadoop/hdfs/TestFileCorruption.java  |   2 +-
 .../org/apache/hadoop/hdfs/TestFileStatus.java  |  27 +-
 .../org/apache/hadoop/hdfs/TestHdfsAdmin.java   |  59 ++
 .../apache/hadoop/hdfs/TestLeaseRecovery2.java  |  12 +-
 .../hadoop/hdfs/TestMaintenanceState.java       |   2 +-
 .../apache/hadoop/hdfs/TestMiniDFSCluster.java  |   2 +-
 .../hadoop/hdfs/TestMissingBlocksAlert.java     |   4 +-
 .../java/org/apache/hadoop/hdfs/TestPread.java  | 146 +++
 .../java/org/apache/hadoop/hdfs/TestQuota.java  |  13 +-
 .../org/apache/hadoop/hdfs/TestSafeMode.java    |   4 +-
 .../client/impl/TestBlockReaderIoProvider.java  |  75 ++
 .../impl/TestBlockReaderLocalMetrics.java       | 227 +++++
 .../hdfs/server/balancer/TestBalancer.java      |  46 +-
 .../TestComputeInvalidateWork.java              | 151 ++-
 .../blockmanagement/TestCorruptReplicaInfo.java | 199 ++--
 .../TestLowRedundancyBlockQueues.java           |  73 +-
 .../TestPendingReconstruction.java              |  44 +
 .../TestReplicationPolicyWithNodeGroup.java     |   4 +
 .../TestUnderReplicatedBlocks.java              |  42 +-
 .../server/datanode/TestBlockReplacement.java   |  40 +-
 .../datanode/TestDataNodeFaultInjector.java     |   2 +-
 .../server/datanode/TestDataNodeMetrics.java    |  62 ++
 .../datanode/TestDataNodePeerMetrics.java       |  10 +-
 .../datanode/TestReadOnlySharedStorage.java     |   4 +-
 .../fsdataset/impl/TestLazyPersistFiles.java    |   2 +-
 .../TestDataNodeOutlierDetectionViaMetrics.java |  18 +-
 .../server/namenode/TestAddStripedBlocks.java   |   2 +
 .../hdfs/server/namenode/TestCheckpoint.java    |   4 +-
 .../namenode/TestDecommissioningStatus.java     |   8 +-
 .../server/namenode/TestFSNamesystemLock.java   |   7 +-
 .../namenode/TestFavoredNodesEndToEnd.java      |   2 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |   2 +-
 .../hdfs/server/namenode/TestLeaseManager.java  |  26 +-
 .../hdfs/server/namenode/TestListOpenFiles.java | 234 +++++
 .../server/namenode/TestNameNodeMXBean.java     |  52 +-
 .../namenode/TestReconstructStripedBlocks.java  |  83 ++
 .../namenode/ha/TestStandbyCheckpoints.java     |   8 +-
 .../namenode/metrics/TestNameNodeMetrics.java   | 366 +++++++-
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 108 +++
 .../TestOfflineImageViewer.java                 |   7 +-
 .../hadoop/tools/TestHdfsConfigFields.java      |   4 -
 .../org/apache/hadoop/tools/TestJMXGet.java     |   2 +-
 .../apache/hadoop/tracing/TestTraceAdmin.java   |   8 +
 .../test/resources/testErasureCodingConf.xml    |  41 +
 .../hadoop/mapreduce/v2/app/JobEndNotifier.java |   5 -
 .../hadoop/mapreduce/v2/app/MRAppMaster.java    |  51 +-
 .../mapreduce/v2/app/TestJobEndNotifier.java    |  25 +-
 .../apache/hadoop/mapreduce/CryptoUtils.java    |  14 +-
 .../java/org/apache/hadoop/hdfs/NNBench.java    |  10 +-
 .../apache/hadoop/test/MapredTestDriver.java    |   2 +-
 hadoop-project/pom.xml                          |  10 +-
 .../apache/hadoop/tools/TestHadoopArchives.java |  12 +-
 .../NativeS3FileSystemContractBaseTest.java     |   8 +-
 .../fs/azure/AzureNativeFileSystemStore.java    |  58 +-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |  41 +-
 .../fs/azure/SecureStorageInterfaceImpl.java    |  30 +-
 .../hadoop/fs/azure/StorageInterfaceImpl.java   |  28 +-
 .../hadoop-azure/src/site/markdown/index.md     | 233 ++---
 .../fs/azure/AzureBlobStorageTestAccount.java   |  37 +-
 .../TestAzureFileSystemErrorConditions.java     |  13 +-
 ...estFileSystemOperationExceptionHandling.java |   1 +
 ...TestFileSystemOperationExceptionMessage.java |  13 +-
 ...perationsExceptionHandlingMultiThreaded.java |   1 +
 .../TestFileSystemOperationsWithThreads.java    |   8 +-
 .../TestNativeAzureFileSystemClientLogging.java |  12 +-
 .../TestReadAndSeekPageBlobAfterWrite.java      |  10 -
 .../org/apache/hadoop/tools/OptionsParser.java  |   3 +
 .../apache/hadoop/tools/TestOptionsParser.java  |   2 +-
 .../org/apache/hadoop/tools/TestDistCh.java     |  12 +-
 .../mapred/gridmix/TestGridmixSubmission.java   |  10 +-
 .../hadoop/mapred/gridmix/TestLoadJob.java      |  19 +-
 .../hadoop/mapred/gridmix/TestSleepJob.java     |  14 +-
 hadoop-tools/hadoop-sls/pom.xml                 |   2 +
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   |  56 +-
 .../hadoop/yarn/sls/appmaster/AMSimulator.java  |  16 +-
 .../yarn/sls/appmaster/MRAMSimulator.java       |  68 +-
 .../yarn/sls/nodemanager/NMSimulator.java       |  30 +-
 .../yarn/sls/scheduler/SchedulerMetrics.java    |   8 +-
 .../hadoop/yarn/sls/BaseSLSRunnerTest.java      |  41 +-
 .../sls/TestReservationSystemInvariants.java    |   4 +
 .../apache/hadoop/yarn/sls/TestSLSRunner.java   |  17 +-
 .../src/test/resources/exit-invariants.txt      |   8 +
 .../src/test/resources/log4j.properties         |  19 +
 .../src/test/resources/ongoing-invariants.txt   |  54 ++
 .../hadoop/yarn/api/records/Resource.java       |  53 +-
 .../hadoop/yarn/conf/YarnConfiguration.java     |   5 +
 .../yarn/client/api/impl/YarnClientImpl.java    |   2 +-
 .../yarn/client/api/impl/TestYarnClient.java    |   2 +-
 .../impl/pb/AllocateResponsePBImpl.java         |   2 +-
 .../pb/GetNewApplicationResponsePBImpl.java     |   9 +-
 ...RegisterApplicationMasterResponsePBImpl.java |   2 +-
 .../ApplicationResourceUsageReportPBImpl.java   |  14 +-
 .../pb/ApplicationSubmissionContextPBImpl.java  |   6 +-
 .../api/records/impl/pb/ContainerPBImpl.java    |   6 +-
 .../records/impl/pb/ContainerReportPBImpl.java  |   6 +-
 .../records/impl/pb/ContainerStatusPBImpl.java  |   2 +-
 .../api/records/impl/pb/NodeReportPBImpl.java   |  10 +-
 .../yarn/api/records/impl/pb/ProtoUtils.java    |   2 +-
 .../impl/pb/ReservationRequestPBImpl.java       |   2 +-
 .../pb/ResourceAllocationRequestPBImpl.java     |   2 +-
 .../records/impl/pb/ResourceOptionPBImpl.java   |   2 +-
 .../api/records/impl/pb/ResourcePBImpl.java     |  26 +-
 .../records/impl/pb/ResourceRequestPBImpl.java  |   2 +-
 .../logaggregation/AggregatedLogFormat.java     |  11 +-
 .../nodelabels/CommonNodeLabelsManager.java     |   8 +-
 .../yarn/security/ContainerTokenIdentifier.java |   2 +-
 .../pb/AddToClusterNodeLabelsRequestPBImpl.java |   7 +
 .../resource/DefaultResourceCalculator.java     |   6 +
 .../resource/DominantResourceCalculator.java    |   8 +
 .../yarn/util/resource/ResourceCalculator.java  |  23 +
 .../hadoop/yarn/util/resource/Resources.java    |  16 +-
 .../src/main/resources/yarn-default.xml         |   8 +
 .../client/api/impl/TestTimelineClient.java     |   8 +-
 .../impl/pb/ContainerStartDataPBImpl.java       |   7 +-
 .../impl/pb/NMContainerStatusPBImpl.java        |   6 +-
 .../impl/pb/NodeHeartbeatResponsePBImpl.java    |   2 +-
 .../pb/RegisterNodeManagerRequestPBImpl.java    |   3 +-
 .../pb/RegisterNodeManagerResponsePBImpl.java   |   3 +-
 .../protocolrecords/TestProtocolRecords.java    |  20 +
 .../AMRMProxyApplicationContextImpl.java        |  17 +-
 .../nodemanager/amrmproxy/AMRMProxyService.java | 179 +++-
 .../amrmproxy/AMRMProxyTokenSecretManager.java  |  93 +-
 .../amrmproxy/AbstractRequestInterceptor.java   |  37 +-
 .../amrmproxy/RequestInterceptor.java           |  20 +-
 .../containermanager/ContainerManagerImpl.java  |   4 +
 .../recovery/NMLeveldbStateStoreService.java    | 185 +++-
 .../recovery/NMNullStateStoreService.java       |  29 +
 .../recovery/NMStateStoreService.java           |  86 ++
 .../amrmproxy/BaseAMRMProxyTest.java            |  78 +-
 .../amrmproxy/TestAMRMProxyService.java         | 119 ++-
 .../TestAMRMProxyTokenSecretManager.java        |  62 +-
 .../recovery/NMMemoryStateStoreService.java     |  65 +-
 .../TestNMLeveldbStateStoreService.java         | 105 ++-
 .../resourcemanager/ResourceTrackerService.java |   6 +-
 .../nodelabels/RMNodeLabelsManager.java         |   8 +-
 .../resourcemanager/recovery/RMStateStore.java  |  13 +-
 .../recovery/StoreLimitException.java           |  33 +
 .../recovery/ZKRMStateStore.java                |  23 +-
 .../rmapp/RMAppCollectorUpdateEvent.java        |  40 -
 .../resourcemanager/rmapp/RMAppEvent.java       |  25 +
 .../server/resourcemanager/rmapp/RMAppImpl.java |  42 +-
 .../scheduler/AppSchedulingInfo.java            |  69 +-
 .../resourcemanager/scheduler/QueueMetrics.java | 194 ++--
 .../scheduler/ResourceUsage.java                |  14 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  12 +-
 .../scheduler/capacity/AbstractCSQueue.java     |  23 +
 .../scheduler/capacity/CSQueue.java             |   7 +
 .../scheduler/capacity/CSQueueMetrics.java      |  56 +-
 .../scheduler/capacity/CSQueueUtils.java        |  16 +-
 .../CapacitySchedulerConfiguration.java         |  34 +
 .../scheduler/capacity/LeafQueue.java           |  45 +-
 .../scheduler/capacity/UserInfo.java            |  15 +-
 .../scheduler/capacity/UsersManager.java        | 172 +++-
 .../scheduler/common/AssignmentInformation.java |  60 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java |  37 +-
 .../scheduler/fair/FSAppAttempt.java            |  28 +-
 .../scheduler/fair/FairSchedulerEventLog.java   |   4 +-
 .../webapp/CapacitySchedulerPage.java           |   9 +-
 .../resourcemanager/webapp/RMWSConsts.java      | 202 ++++
 .../webapp/RMWebServiceProtocol.java            | 635 +++++++++++++
 .../resourcemanager/webapp/RMWebServices.java   | 937 +++++++++----------
 .../yarn/server/resourcemanager/MockRM.java     |  27 +-
 .../resourcemanager/TestRMAdminService.java     |  59 ++
 .../invariants/TestMetricsInvariantChecker.java |   7 +-
 .../nodelabels/TestRMNodeLabelsManager.java     |  49 +
 .../recovery/TestZKRMStateStore.java            |  37 +
 .../scheduler/TestQueueMetrics.java             |  61 +-
 .../TestSchedulerApplicationAttempt.java        |   7 +-
 .../capacity/TestCapacityScheduler.java         |   6 +-
 .../scheduler/capacity/TestLeafQueue.java       | 123 +++
 .../TestNodeLabelContainerAllocation.java       |  10 +-
 .../resourcemanager/webapp/TestRMWebApp.java    |   8 +-
 .../server/ContainerTokenIdentifierForTest.java |   5 +-
 .../hadoop/yarn/server/MiniYARNCluster.java     |   6 +-
 .../storage/TimelineSchemaCreator.java          |  36 +-
 .../src/site/markdown/CapacityScheduler.md      |   1 +
 349 files changed, 11360 insertions(+), 3118 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b3c4b84/hadoop-common-project/hadoop-common/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b3c4b84/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b3c4b84/hadoop-project/pom.xml
----------------------------------------------------------------------


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org