You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by as...@apache.org on 2017/12/30 01:37:45 UTC

[01/49] hadoop git commit: HDFS-12347. TestBalancerRPCDelay#testBalancerRPCDelay fails very frequently. Contributed by Bharat Viswanadham [Forced Update!]

Repository: hadoop
Updated Branches:
  refs/heads/YARN-6592 2b81e801d -> 1c5fa65b9 (forced update)


HDFS-12347. TestBalancerRPCDelay#testBalancerRPCDelay fails very frequently.  Contributed by Bharat Viswanadham


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7499f2d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7499f2d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7499f2d

Branch: refs/heads/YARN-6592
Commit: c7499f2d242c64bee8f822a22161d956525f7153
Parents: c7a4dda
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Tue Dec 19 10:02:30 2017 +0800
Committer: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Committed: Tue Dec 19 10:02:30 2017 +0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7499f2d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index a900ad1..9452b8b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -2084,7 +2084,7 @@ public class TestBalancer {
     initConf(conf);
     conf.setInt(DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_KEY, 30);
 
-    int numDNs = 40;
+    int numDNs = 20;
     long[] capacities = new long[numDNs];
     String[] racks = new String[numDNs];
     for(int i = 0; i < numDNs; i++) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/49] hadoop git commit: HADOOP-15086. NativeAzureFileSystem file rename is not atomic. Contributed by Thomas Marquardt

Posted by as...@apache.org.
HADOOP-15086. NativeAzureFileSystem file rename is not atomic.
Contributed by Thomas Marquardt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/52babbb4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/52babbb4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/52babbb4

Branch: refs/heads/YARN-6592
Commit: 52babbb4a0e3c89f2025bf6e9a1b51a96e8f8fb0
Parents: 76e664e
Author: Steve Loughran <st...@apache.org>
Authored: Fri Dec 22 11:39:55 2017 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Fri Dec 22 11:39:55 2017 +0000

----------------------------------------------------------------------
 .../fs/azure/AzureNativeFileSystemStore.java    | 16 +++--
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 25 +++++--
 .../fs/azure/NativeAzureFileSystemHelper.java   | 18 +++++
 .../hadoop/fs/azure/NativeFileSystemStore.java  |  4 ++
 .../fs/azure/SecureStorageInterfaceImpl.java    |  8 ++-
 .../hadoop/fs/azure/StorageInterface.java       |  2 +-
 .../hadoop/fs/azure/StorageInterfaceImpl.java   |  8 ++-
 .../azure/ITestNativeAzureFileSystemLive.java   | 72 ++++++++++++++++++++
 .../hadoop/fs/azure/MockStorageInterface.java   |  9 ++-
 9 files changed, 145 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/52babbb4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index f1031b4..9396a51 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -2605,12 +2605,18 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
 
   @Override
   public void rename(String srcKey, String dstKey) throws IOException {
-    rename(srcKey, dstKey, false, null);
+    rename(srcKey, dstKey, false, null, true);
   }
 
   @Override
   public void rename(String srcKey, String dstKey, boolean acquireLease,
-      SelfRenewingLease existingLease) throws IOException {
+                     SelfRenewingLease existingLease) throws IOException {
+    rename(srcKey, dstKey, acquireLease, existingLease, true);
+  }
+
+    @Override
+  public void rename(String srcKey, String dstKey, boolean acquireLease,
+      SelfRenewingLease existingLease, boolean overwriteDestination) throws IOException {
 
     LOG.debug("Moving {} to {}", srcKey, dstKey);
 
@@ -2672,7 +2678,8 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
       // a more intensive exponential retry policy when the cluster is getting
       // throttled.
       try {
-        dstBlob.startCopyFromBlob(srcBlob, null, getInstrumentedContext());
+        dstBlob.startCopyFromBlob(srcBlob, null,
+            getInstrumentedContext(), overwriteDestination);
       } catch (StorageException se) {
         if (se.getHttpStatusCode() == HttpURLConnection.HTTP_UNAVAILABLE) {
           int copyBlobMinBackoff = sessionConfiguration.getInt(
@@ -2695,7 +2702,8 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
           options.setRetryPolicyFactory(new RetryExponentialRetry(
             copyBlobMinBackoff, copyBlobDeltaBackoff, copyBlobMaxBackoff,
             copyBlobMaxRetries));
-          dstBlob.startCopyFromBlob(srcBlob, options, getInstrumentedContext());
+          dstBlob.startCopyFromBlob(srcBlob, options,
+              getInstrumentedContext(), overwriteDestination);
         } else {
           throw se;
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52babbb4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index 85a46ea..3d44b20 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -3269,16 +3269,27 @@ public class NativeAzureFileSystem extends FileSystem {
     } else if (!srcMetadata.isDir()) {
       LOG.debug("Source {} found as a file, renaming.", src);
       try {
-        store.rename(srcKey, dstKey);
+        // HADOOP-15086 - file rename must ensure that the destination does
+        // not exist.  The fix is targeted to this call only to avoid
+        // regressions.  Other call sites are attempting to rename temporary
+        // files, redo a failed rename operation, or rename a directory
+        // recursively; for these cases the destination may exist.
+        store.rename(srcKey, dstKey, false, null,
+            false);
       } catch(IOException ex) {
-
         Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(ex);
 
-        if (innerException instanceof StorageException
-            && NativeAzureFileSystemHelper.isFileNotFoundException((StorageException) innerException)) {
-
-          LOG.debug("BlobNotFoundException encountered. Failing rename", src);
-          return false;
+        if (innerException instanceof StorageException) {
+          if (NativeAzureFileSystemHelper.isFileNotFoundException(
+              (StorageException) innerException)) {
+            LOG.debug("BlobNotFoundException encountered. Failing rename", src);
+            return false;
+          }
+          if (NativeAzureFileSystemHelper.isBlobAlreadyExistsConflict(
+              (StorageException) innerException)) {
+            LOG.debug("Destination BlobAlreadyExists. Failing rename", src);
+            return false;
+          }
         }
 
         throw ex;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52babbb4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
index 57af1f8..754f343 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemHelper.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.azure;
 
 import java.io.EOFException;
 import java.io.IOException;
+import java.net.HttpURLConnection;
 import java.util.Map;
 
 import com.google.common.base.Preconditions;
@@ -96,6 +97,23 @@ final class NativeAzureFileSystemHelper {
   }
 
   /*
+   * Determines if a conditional request failed because the blob already
+   * exists.
+   *
+   * @param e - the storage exception thrown by the failed operation.
+   *
+   * @return true if a conditional request failed because the blob already
+   * exists; otherwise, returns false.
+   */
+  static boolean isBlobAlreadyExistsConflict(StorageException e) {
+    if (e.getHttpStatusCode() == HttpURLConnection.HTTP_CONFLICT
+        && StorageErrorCodeStrings.BLOB_ALREADY_EXISTS.equals(e.getErrorCode())) {
+      return true;
+    }
+    return false;
+  }
+
+  /*
    * Helper method that logs stack traces from all live threads.
    */
   public static void logAllLiveStackTraces() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52babbb4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java
index 57a729d..b67ab1b 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeFileSystemStore.java
@@ -91,6 +91,10 @@ interface NativeFileSystemStore {
   void rename(String srcKey, String dstKey, boolean acquireLease, SelfRenewingLease existingLease)
       throws IOException;
 
+  void rename(String srcKey, String dstKey, boolean acquireLease,
+              SelfRenewingLease existingLease, boolean overwriteDestination)
+      throws IOException;
+
   /**
    * Delete all keys with the given prefix. Used for testing.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52babbb4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java
index 7c2722e..0f54249 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureStorageInterfaceImpl.java
@@ -503,10 +503,14 @@ public class SecureStorageInterfaceImpl extends StorageInterface {
 
     @Override
     public void startCopyFromBlob(CloudBlobWrapper sourceBlob, BlobRequestOptions options,
-        OperationContext opContext)
+        OperationContext opContext, boolean overwriteDestination)
             throws StorageException, URISyntaxException {
+      AccessCondition dstAccessCondition =
+          overwriteDestination
+              ? null
+              : AccessCondition.generateIfNotExistsCondition();
       getBlob().startCopy(sourceBlob.getBlob().getQualifiedUri(),
-          null, null, options, opContext);
+          null, dstAccessCondition, options, opContext);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52babbb4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
index e03d731..dbb3849 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterface.java
@@ -406,7 +406,7 @@ abstract class StorageInterface {
      *
      */
     public abstract void startCopyFromBlob(CloudBlobWrapper sourceBlob,
-        BlobRequestOptions options, OperationContext opContext)
+        BlobRequestOptions options, OperationContext opContext, boolean overwriteDestination)
         throws StorageException, URISyntaxException;
     
     /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52babbb4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
index 41a4dbb..e600f9e 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/StorageInterfaceImpl.java
@@ -425,10 +425,14 @@ class StorageInterfaceImpl extends StorageInterface {
 
     @Override
     public void startCopyFromBlob(CloudBlobWrapper sourceBlob, BlobRequestOptions options,
-        OperationContext opContext)
+        OperationContext opContext, boolean overwriteDestination)
             throws StorageException, URISyntaxException {
+      AccessCondition dstAccessCondition =
+          overwriteDestination
+              ? null
+              : AccessCondition.generateIfNotExistsCondition();
       getBlob().startCopy(sourceBlob.getBlob().getQualifiedUri(),
-          null, null, options, opContext);
+          null, dstAccessCondition, options, opContext);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52babbb4/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java
index f969968..9033674 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java
@@ -18,8 +18,16 @@
 
 package org.apache.hadoop.fs.azure;
 
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
 
+import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
@@ -40,6 +48,70 @@ public class ITestNativeAzureFileSystemLive extends
     return AzureBlobStorageTestAccount.create();
   }
 
+  /**
+   * Tests the rename file operation to ensure that when there are multiple
+   * attempts to rename a file to the same destination, only one rename
+   * operation is successful (HADOOP-15086).
+   */
+  @Test
+  public void testMultipleRenameFileOperationsToSameDestination()
+      throws IOException, InterruptedException {
+    final CountDownLatch latch = new CountDownLatch(1);
+    final AtomicInteger successfulRenameCount = new AtomicInteger(0);
+    final AtomicReference<IOException> unexpectedError = new AtomicReference<IOException>();
+    final Path dest = path("dest");
+
+    // Run 10 threads to rename multiple files to the same target path
+    List<Thread> threads = new ArrayList<>();
+
+    for (int i = 0; i < 10; i++) {
+      final int threadNumber = i;
+      Path src = path("test" + threadNumber);
+      threads.add(new Thread(() -> {
+        try {
+          latch.await(Long.MAX_VALUE, TimeUnit.SECONDS);
+        } catch (InterruptedException e) {
+        }
+        try {
+          try (OutputStream output = fs.create(src)) {
+            output.write(("Source file number " + threadNumber).getBytes());
+          }
+
+          if (fs.rename(src, dest)) {
+            LOG.info("rename succeeded for thread " + threadNumber);
+            successfulRenameCount.incrementAndGet();
+          }
+        } catch (IOException e) {
+          unexpectedError.compareAndSet(null, e);
+          ContractTestUtils.fail("Exception unexpected", e);
+        }
+      }));
+    }
+
+    // Start each thread
+    threads.forEach(t -> t.start());
+
+    // Wait for threads to start and wait on latch
+    Thread.sleep(2000);
+
+    // Now start to rename
+    latch.countDown();
+
+    // Wait for all threads to complete
+    threads.forEach(t -> {
+      try {
+        t.join();
+      } catch (InterruptedException e) {
+      }
+    });
+
+    if (unexpectedError.get() != null) {
+      throw unexpectedError.get();
+    }
+    assertEquals(1, successfulRenameCount.get());
+    LOG.info("Success, only one rename operation succeeded!");
+  }
+
   @Test
   public void testLazyRenamePendingCanOverwriteExistingFile()
     throws Exception {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52babbb4/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
index e0ae7b4..d5f6437 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
@@ -425,7 +425,14 @@ public class MockStorageInterface extends StorageInterface {
 
     @Override
     public void startCopyFromBlob(CloudBlobWrapper sourceBlob, BlobRequestOptions options,
-        OperationContext opContext) throws StorageException, URISyntaxException {
+        OperationContext opContext, boolean overwriteDestination) throws StorageException, URISyntaxException {
+      if (!overwriteDestination && backingStore.exists(convertUriToDecodedString(uri))) {
+        throw new StorageException("BlobAlreadyExists",
+            "The blob already exists.",
+            HttpURLConnection.HTTP_CONFLICT,
+            null,
+            null);
+      }
       backingStore.copy(convertUriToDecodedString(sourceBlob.getUri()), convertUriToDecodedString(uri));
       //TODO: set the backingStore.properties.CopyState and
       //      update azureNativeFileSystemStore.waitForCopyToComplete


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/49] hadoop git commit: Add 2.8.3 release jdiff files.

Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f8caf5/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_2.8.3.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_2.8.3.xml b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_2.8.3.xml
new file mode 100644
index 0000000..a63b3ac
--- /dev/null
+++ b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_2.8.3.xml
@@ -0,0 +1,16 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Dec 05 05:57:09 UTC 2017 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop MapReduce JobClient 2.8.3"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/hadoop-annotations.jar:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/jdiff.jar -verbose -classpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/classes:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/hadoop-mapreduce-client-common-2.8.3.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.8.3.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/hadoop-yarn-client-2.8.3.jar:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/hadoop-mapreduce-client-core-2.8.3.jar:/build/source/h
 adoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/target/hadoop-mapreduce-client-shuffle-2.8.3.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.8.3.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/maven/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/maven/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/
 org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/com/google/inject/guice/3.0/guice-3.0.jar:/maven/javax/inject/javax.inject/1/javax.inject-1.jar:/maven/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/jline/jline/0.9.94/jline-0.9.94.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/xmlenc/xmlenc/0.52/xmlenc
 -0.52.jar:/maven/org/apache/htrace/htrace-core4/4.0.1-incubating/htrace-core4-4.0.1-incubating.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.8.3.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-configuration/commons-confi
 guration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.8.3.jar:/maven/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/maven/net/jcip/jcip-annotations/1.0/jcip-annotations-1.0.jar:/maven/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/maven/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/curator/curator-framework/2.7.1/curato
 r-framework-2.7.1.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/maven/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.8.3.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/maven/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar -sourcepath /build/sou
 rce/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java -apidir /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/site/jdiff/xml -apiname Apache Hadoop MapReduce JobClient 2.8.3 -->
+<package name="org.apache.hadoop.mapred">
+</package>
+
+</api>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/49] hadoop git commit: Add 2.8.3 release jdiff files.

Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f8caf5/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.8.3.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.8.3.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.8.3.xml
new file mode 100644
index 0000000..3f6c5eb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.8.3.xml
@@ -0,0 +1,2316 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Dec 05 05:39:30 UTC 2017 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop YARN Client 2.8.3"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/hadoop-annotations.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/jdiff.jar -verbose -classpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/classes:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.8.3.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/java
 x/servlet/servlet-api/2.5/servlet-api-2.5.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanuti
 ls-core-1.8.0.jar:/maven/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/maven/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.8.3.jar:/maven/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/maven/net/jcip/jcip-annotations/1.0/jcip-annotations-1.0.jar:/maven/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/maven/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/dir
 ectory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/htrace/htrace-core4/4.0.1-incubating/htrace-core4-4.0.1-incubating.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.
 6.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.8.3.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.8.3.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.8.3.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/maven/com/google/inject/guice/3.0/guice-3.0.jar:/
 maven/javax/inject/javax.inject/1/javax.inject-1.jar:/maven/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/maven/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar -sourcepath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java -apidir /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/site/jdiff/xml -apiname Apache Hadoop YARN Client 2.8.3 -->
+<package name="org.apache.hadoop.yarn.client">
+</package>
+<package name="org.apache.hadoop.yarn.client.api">
+  <!-- start class org.apache.hadoop.yarn.client.api.AHSClient -->
+  <class name="AHSClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AHSClient" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAHSClient" return="org.apache.hadoop.yarn.client.api.AHSClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of AHSClient.]]>
+      </doc>
+    </method>
+    <method name="getApplicationReport" return="org.apache.hadoop.yarn.api.records.ApplicationReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get a report of the given Application.
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ <p>
+ If the user does not have <code>VIEW_APP</code> access then the following
+ fields in the report will be set to stubbed values:
+ <ul>
+   <li>host - set to "N/A"</li>
+   <li>RPC port - set to -1</li>
+   <li>client token - set to "N/A"</li>
+   <li>diagnostics - set to "N/A"</li>
+   <li>tracking URL - set to "N/A"</li>
+   <li>original tracking URL - set to "N/A"</li>
+   <li>resource usage report - all values are -1</li>
+ </ul>
+ 
+ @param appId
+          {@link ApplicationId} of the application that needs a report
+ @return application report
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of all Applications in the cluster.
+ </p>
+ 
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+ 
+ @return a list of reports for all applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttemptReport" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given ApplicationAttempt.
+ </p>
+ 
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+ 
+ @param applicationAttemptId
+          {@link ApplicationAttemptId} of the application attempt that needs
+          a report
+ @return application attempt report
+ @throws YarnException
+ @throws ApplicationAttemptNotFoundException if application attempt
+         not found
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttempts" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (ApplicationAttempts) of Application in the cluster.
+ </p>
+ 
+ @param applicationId
+ @return a list of reports for all application attempts for specified
+         application
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerReport" return="org.apache.hadoop.yarn.api.records.ContainerReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given Container.
+ </p>
+ 
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+ 
+ @param containerId
+          {@link ContainerId} of the container that needs a report
+ @return container report
+ @throws YarnException
+ @throws ContainerNotFoundException if container not found
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (Containers) of ApplicationAttempt in the cluster.
+ </p>
+ 
+ @param applicationAttemptId
+ @return a list of reports of all containers for specified application
+         attempt
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.AHSClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.AMRMClient -->
+  <class name="AMRMClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AMRMClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAMRMClient" return="org.apache.hadoop.yarn.client.api.AMRMClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of AMRMClient.
+ For usage:
+ <pre>
+ {@code
+ AMRMClient.<T>createAMRMClientContainerRequest()
+ }</pre>
+ @return the newly create AMRMClient instance.]]>
+      </doc>
+    </method>
+    <method name="registerApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appHostName" type="java.lang.String"/>
+      <param name="appHostPort" type="int"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Register the application master. This must be called before any 
+ other interaction
+ @param appHostName Name of the host on which master is running
+ @param appHostPort Port master is listening on
+ @param appTrackingUrl URL at which the master info can be seen
+ @return <code>RegisterApplicationMasterResponse</code>
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="allocate" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="progressIndicator" type="float"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Request additional containers and receive new container allocations.
+ Requests made via <code>addContainerRequest</code> are sent to the
+ <code>ResourceManager</code>. New containers assigned to the master are
+ retrieved. Status of completed containers and node health updates are also
+ retrieved. This also doubles up as a heartbeat to the ResourceManager and
+ must be made periodically. The call may not always return any new
+ allocations of containers. App should not make concurrent allocate
+ requests. May cause request loss.
+ 
+ <p>
+ Note : If the user has not removed container requests that have already
+ been satisfied, then the re-register may end up sending the entire
+ container requests to the RM (including matched requests). Which would mean
+ the RM could end up giving it a lot of new allocated containers.
+ </p>
+ 
+ @param progressIndicator Indicates progress made by the master
+ @return the response of the allocate request
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="unregisterApplicationMaster"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appStatus" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"/>
+      <param name="appMessage" type="java.lang.String"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Unregister the application master. This must be called in the end.
+ @param appStatus Success/Failure status of the master
+ @param appMessage Diagnostics message on failure
+ @param appTrackingUrl New URL to get master info
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="addContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="T"/>
+      <doc>
+      <![CDATA[Request containers for resources before calling <code>allocate</code>
+ @param req Resource request]]>
+      </doc>
+    </method>
+    <method name="removeContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="T"/>
+      <doc>
+      <![CDATA[Remove previous container request. The previous container request may have 
+ already been sent to the ResourceManager. So even after the remove request 
+ the app must be prepared to receive an allocation for the previous request 
+ even after the remove request
+ @param req Resource request]]>
+      </doc>
+    </method>
+    <method name="requestContainerResourceChange"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <doc>
+      <![CDATA[Request container resource change before calling <code>allocate</code>.
+ Any previous pending resource change request of the same container will be
+ removed.
+
+ Application that calls this method is expected to maintain the
+ <code>Container</code>s that are returned from previous successful
+ allocations or resource changes. By passing in the existing container and a
+ target resource capability to this method, the application requests the
+ ResourceManager to change the existing resource allocation to the target
+ resource allocation.
+
+ @param container The container returned from the last successful resource
+                  allocation or resource change
+ @param capability  The target resource capability of the container]]>
+      </doc>
+    </method>
+    <method name="releaseAssignedContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <doc>
+      <![CDATA[Release containers assigned by the Resource Manager. If the app cannot use
+ the container or wants to give up the container then it can release them.
+ The app needs to make new requests for the released resource capability if
+ it still needs it. eg. it released non-local resources
+ @param containerId]]>
+      </doc>
+    </method>
+    <method name="getAvailableResources" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the currently available resources in the cluster.
+ A valid value is available after a call to allocate has been made
+ @return Currently available resources]]>
+      </doc>
+    </method>
+    <method name="getClusterNodeCount" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the current number of nodes in the cluster.
+ A valid values is available after a call to allocate has been made
+ @return Current number of nodes in the cluster]]>
+      </doc>
+    </method>
+    <method name="getMatchingRequests" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="resourceName" type="java.lang.String"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <doc>
+      <![CDATA[Get outstanding <code>ContainerRequest</code>s matching the given 
+ parameters. These ContainerRequests should have been added via
+ <code>addContainerRequest</code> earlier in the lifecycle. For performance,
+ the AMRMClient may return its internal collection directly without creating 
+ a copy. Users should not perform mutable operations on the return value.
+ Each collection in the list contains requests with identical 
+ <code>Resource</code> size that fit in the given capability. In a 
+ collection, requests will be returned in the same order as they were added.
+ @return Collection of request matching the parameters]]>
+      </doc>
+    </method>
+    <method name="updateBlacklist"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="blacklistAdditions" type="java.util.List"/>
+      <param name="blacklistRemovals" type="java.util.List"/>
+      <doc>
+      <![CDATA[Update application's blacklist with addition or removal resources.
+ 
+ @param blacklistAdditions list of resources which should be added to the 
+        application blacklist
+ @param blacklistRemovals list of resources which should be removed from the 
+        application blacklist]]>
+      </doc>
+    </method>
+    <method name="setNMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nmTokenCache" type="org.apache.hadoop.yarn.client.api.NMTokenCache"/>
+      <doc>
+      <![CDATA[Set the NM token cache for the <code>AMRMClient</code>. This cache must
+ be shared with the {@link NMClient} used to manage containers for the
+ <code>AMRMClient</code>
+ <p>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @param nmTokenCache the NM token cache to use.]]>
+      </doc>
+    </method>
+    <method name="getNMTokenCache" return="org.apache.hadoop.yarn.client.api.NMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the NM token cache of the <code>AMRMClient</code>. This cache must be
+ shared with the {@link NMClient} used to manage containers for the
+ <code>AMRMClient</code>.
+ <p>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @return the NM token cache.]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="com.google.common.base.Supplier"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each 1000 ms.
+ See also {@link #waitFor(com.google.common.base.Supplier, int)}
+ and {@link #waitFor(com.google.common.base.Supplier, int, int)}
+ @param check]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="com.google.common.base.Supplier"/>
+      <param name="checkEveryMillis" type="int"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each
+ <code>checkEveryMillis</code> ms.
+ See also {@link #waitFor(com.google.common.base.Supplier, int, int)}
+ @param check user defined checker
+ @param checkEveryMillis interval to call <code>check</code>]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="com.google.common.base.Supplier"/>
+      <param name="checkEveryMillis" type="int"/>
+      <param name="logInterval" type="int"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each
+ <code>checkEveryMillis</code> ms. In the main loop, this method will log
+ the message "waiting in main loop" for each <code>logInterval</code> times
+ iteration to confirm the thread is alive.
+ @param check user defined checker
+ @param checkEveryMillis interval to call <code>check</code>
+ @param logInterval interval to log for each]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.AMRMClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.NMClient -->
+  <class name="NMClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createNMClient" return="org.apache.hadoop.yarn.client.api.NMClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of NMClient.]]>
+      </doc>
+    </method>
+    <method name="createNMClient" return="org.apache.hadoop.yarn.client.api.NMClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Create a new instance of NMClient.]]>
+      </doc>
+    </method>
+    <method name="startContainer" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+      <param name="containerLaunchContext" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Start an allocated container.</p>
+
+ <p>The <code>ApplicationMaster</code> or other applications that use the
+ client must provide the details of the allocated container, including the
+ Id, the assigned node's Id and the token via {@link Container}. In
+ addition, the AM needs to provide the {@link ContainerLaunchContext} as
+ well.</p>
+
+ @param container the allocated container
+ @param containerLaunchContext the context information needed by the
+                               <code>NodeManager</code> to launch the
+                               container
+ @return a map between the auxiliary service names and their outputs
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="increaseContainerResource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Increase the resource of a container.</p>
+
+ <p>The <code>ApplicationMaster</code> or other applications that use the
+ client must provide the details of the container, including the Id and
+ the target resource encapsulated in the updated container token via
+ {@link Container}.
+ </p>
+
+ @param container the container with updated token
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="stopContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Stop an started container.</p>
+
+ @param containerId the Id of the started container
+ @param nodeId the Id of the <code>NodeManager</code>
+ 
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerStatus" return="org.apache.hadoop.yarn.api.records.ContainerStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Query the status of a container.</p>
+
+ @param containerId the Id of the started container
+ @param nodeId the Id of the <code>NodeManager</code>
+ 
+ @return the status of a container
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="cleanupRunningContainersOnStop"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="enabled" type="boolean"/>
+      <doc>
+      <![CDATA[<p>Set whether the containers that are started by this client, and are
+ still running should be stopped when the client stops. By default, the
+ feature should be enabled.</p> However, containers will be stopped only  
+ when service is stopped. i.e. after {@link NMClient#stop()}. 
+
+ @param enabled whether the feature is enabled or not]]>
+      </doc>
+    </method>
+    <method name="setNMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nmTokenCache" type="org.apache.hadoop.yarn.client.api.NMTokenCache"/>
+      <doc>
+      <![CDATA[Set the NM Token cache of the <code>NMClient</code>. This cache must be
+ shared with the {@link AMRMClient} that requested the containers managed
+ by this <code>NMClient</code>
+ <p>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @param nmTokenCache the NM token cache to use.]]>
+      </doc>
+    </method>
+    <method name="getNMTokenCache" return="org.apache.hadoop.yarn.client.api.NMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the NM token cache of the <code>NMClient</code>. This cache must be
+ shared with the {@link AMRMClient} that requested the containers managed
+ by this <code>NMClient</code>
+ <p>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @return the NM token cache]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.NMClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.NMTokenCache -->
+  <class name="NMTokenCache" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMTokenCache"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Creates a NM token cache instance.]]>
+      </doc>
+    </constructor>
+    <method name="getSingleton" return="org.apache.hadoop.yarn.client.api.NMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the singleton NM token cache.
+
+ @return the singleton NM token cache.]]>
+      </doc>
+    </method>
+    <method name="getNMToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns NMToken, null if absent. Only the singleton obtained from
+ {@link #getSingleton()} is looked at for the tokens. If you are using your
+ own NMTokenCache that is different from the singleton, use
+ {@link #getToken(String) }
+ 
+ @param nodeAddr
+ @return {@link Token} NMToken required for communicating with node manager]]>
+      </doc>
+    </method>
+    <method name="setNMToken"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <param name="token" type="org.apache.hadoop.yarn.api.records.Token"/>
+      <doc>
+      <![CDATA[Sets the NMToken for node address only in the singleton obtained from
+ {@link #getSingleton()}. If you are using your own NMTokenCache that is
+ different from the singleton, use {@link #setToken(String, Token) }
+ 
+ @param nodeAddr
+          node address (host:port)
+ @param token
+          NMToken]]>
+      </doc>
+    </method>
+    <method name="getToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns NMToken, null if absent
+ @param nodeAddr
+ @return {@link Token} NMToken required for communicating with node
+         manager]]>
+      </doc>
+    </method>
+    <method name="setToken"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <param name="token" type="org.apache.hadoop.yarn.api.records.Token"/>
+      <doc>
+      <![CDATA[Sets the NMToken for node address
+ @param nodeAddr node address (host:port)
+ @param token NMToken]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[NMTokenCache manages NMTokens required for an Application Master
+ communicating with individual NodeManagers.
+ <p>
+ By default Yarn client libraries {@link AMRMClient} and {@link NMClient} use
+ {@link #getSingleton()} instance of the cache.
+ <ul>
+   <li>
+     Using the singleton instance of the cache is appropriate when running a
+     single ApplicationMaster in the same JVM.
+   </li>
+   <li>
+     When using the singleton, users don't need to do anything special,
+     {@link AMRMClient} and {@link NMClient} are already set up to use the
+     default singleton {@link NMTokenCache}
+     </li>
+ </ul>
+ If running multiple Application Masters in the same JVM, a different cache
+ instance should be used for each Application Master.
+ <ul>
+   <li>
+     If using the {@link AMRMClient} and the {@link NMClient}, setting up
+     and using an instance cache is as follows:
+ <pre>
+   NMTokenCache nmTokenCache = new NMTokenCache();
+   AMRMClient rmClient = AMRMClient.createAMRMClient();
+   NMClient nmClient = NMClient.createNMClient();
+   nmClient.setNMTokenCache(nmTokenCache);
+   ...
+ </pre>
+   </li>
+   <li>
+     If using the {@link AMRMClientAsync} and the {@link NMClientAsync},
+     setting up and using an instance cache is as follows:
+ <pre>
+   NMTokenCache nmTokenCache = new NMTokenCache();
+   AMRMClient rmClient = AMRMClient.createAMRMClient();
+   NMClient nmClient = NMClient.createNMClient();
+   nmClient.setNMTokenCache(nmTokenCache);
+   AMRMClientAsync rmClientAsync = new AMRMClientAsync(rmClient, 1000, [AMRM_CALLBACK]);
+   NMClientAsync nmClientAsync = new NMClientAsync("nmClient", nmClient, [NM_CALLBACK]);
+   ...
+ </pre>
+   </li>
+   <li>
+     If using {@link ApplicationMasterProtocol} and
+     {@link ContainerManagementProtocol} directly, setting up and using an
+     instance cache is as follows:
+ <pre>
+   NMTokenCache nmTokenCache = new NMTokenCache();
+   ...
+   ApplicationMasterProtocol amPro = ClientRMProxy.createRMProxy(conf, ApplicationMasterProtocol.class);
+   ...
+   AllocateRequest allocateRequest = ...
+   ...
+   AllocateResponse allocateResponse = rmClient.allocate(allocateRequest);
+   for (NMToken token : allocateResponse.getNMTokens()) {
+     nmTokenCache.setToken(token.getNodeId().toString(), token.getToken());
+   }
+   ...
+   ContainerManagementProtocolProxy nmPro = ContainerManagementProtocolProxy(conf, nmTokenCache);
+   ...
+   nmPro.startContainer(container, containerContext);
+   ...
+ </pre>
+   </li>
+ </ul>
+ It is also possible to mix the usage of a client ({@code AMRMClient} or
+ {@code NMClient}, or the async versions of them) with a protocol proxy
+ ({@code ContainerManagementProtocolProxy} or
+ {@code ApplicationMasterProtocol}).]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.NMTokenCache -->
+  <!-- start class org.apache.hadoop.yarn.client.api.SharedCacheClient -->
+  <class name="SharedCacheClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SharedCacheClient" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createSharedCacheClient" return="org.apache.hadoop.yarn.client.api.SharedCacheClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="use" return="org.apache.hadoop.fs.Path"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="resourceKey" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ The method to claim a resource with the <code>SharedCacheManager.</code>
+ The client uses a checksum to identify the resource and an
+ {@link ApplicationId} to identify which application will be using the
+ resource.
+ </p>
+ 
+ <p>
+ The <code>SharedCacheManager</code> responds with whether or not the
+ resource exists in the cache. If the resource exists, a <code>Path</code>
+ to the resource in the shared cache is returned. If the resource does not
+ exist, null is returned instead.
+ </p>
+ 
+ @param applicationId ApplicationId of the application using the resource
+ @param resourceKey the key (i.e. checksum) that identifies the resource
+ @return Path to the resource, or null if it does not exist]]>
+      </doc>
+    </method>
+    <method name="release"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="resourceKey" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ The method to release a resource with the <code>SharedCacheManager.</code>
+ This method is called once an application is no longer using a claimed
+ resource in the shared cache. The client uses a checksum to identify the
+ resource and an {@link ApplicationId} to identify which application is
+ releasing the resource.
+ </p>
+ 
+ <p>
+ Note: This method is an optimization and the client is not required to call
+ it for correctness.
+ </p>
+ 
+ @param applicationId ApplicationId of the application releasing the
+          resource
+ @param resourceKey the key (i.e. checksum) that identifies the resource]]>
+      </doc>
+    </method>
+    <method name="getFileChecksum" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="sourceFile" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[A convenience method to calculate the checksum of a specified file.
+ 
+ @param sourceFile A path to the input file
+ @return A hex string containing the checksum digest
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This is the client for YARN's shared cache.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.SharedCacheClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.YarnClient -->
+  <class name="YarnClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="YarnClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createYarnClient" return="org.apache.hadoop.yarn.client.api.YarnClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of YarnClient.]]>
+      </doc>
+    </method>
+    <method name="createApplication" return="org.apache.hadoop.yarn.client.api.YarnClientApplication"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Obtain a {@link YarnClientApplication} for a new application,
+ which in turn contains the {@link ApplicationSubmissionContext} and
+ {@link org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse}
+ objects.
+ </p>
+
+ @return {@link YarnClientApplication} built for a new application
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="submitApplication" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appContext" type="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Submit a new application to <code>YARN.</code> It is a blocking call - it
+ will not return {@link ApplicationId} until the submitted application is
+ submitted successfully and accepted by the ResourceManager.
+ </p>
+ 
+ <p>
+ Users should provide an {@link ApplicationId} as part of the parameter
+ {@link ApplicationSubmissionContext} when submitting a new application,
+ otherwise it will throw the {@link ApplicationIdNotProvidedException}.
+ </p>
+
+ <p>This internally calls {@link ApplicationClientProtocol#submitApplication
+ (SubmitApplicationRequest)}, and after that, it internally invokes
+ {@link ApplicationClientProtocol#getApplicationReport
+ (GetApplicationReportRequest)} and waits till it can make sure that the
+ application gets properly submitted. If RM fails over or RM restart
+ happens before ResourceManager saves the application's state,
+ {@link ApplicationClientProtocol
+ #getApplicationReport(GetApplicationReportRequest)} will throw
+ the {@link ApplicationNotFoundException}. This API automatically resubmits
+ the application with the same {@link ApplicationSubmissionContext} when it
+ catches the {@link ApplicationNotFoundException}</p>
+
+ @param appContext
+          {@link ApplicationSubmissionContext} containing all the details
+          needed to submit a new application
+ @return {@link ApplicationId} of the accepted application
+ @throws YarnException
+ @throws IOException
+ @see #createApplication()]]>
+      </doc>
+    </method>
+    <method name="failApplicationAttempt"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Fail an application attempt identified by given ID.
+ </p>
+
+ @param applicationAttemptId
+          {@link ApplicationAttemptId} of the attempt to fail.
+ @throws YarnException
+           in case of errors or if YARN rejects the request due to
+           access-control restrictions.
+ @throws IOException
+ @see #getQueueAclsInfo()]]>
+      </doc>
+    </method>
+    <method name="killApplication"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Kill an application identified by given ID.
+ </p>
+ 
+ @param applicationId
+          {@link ApplicationId} of the application that needs to be killed
+ @throws YarnException
+           in case of errors or if YARN rejects the request due to
+           access-control restrictions.
+ @throws IOException
+ @see #getQueueAclsInfo()]]>
+      </doc>
+    </method>
+    <method name="killApplication"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="diagnostics" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Kill an application identified by given ID.
+ </p>
+ @param applicationId {@link ApplicationId} of the application that needs to
+          be killed
+ @param diagnostics for killing an application.
+ @throws YarnException in case of errors or if YARN rejects the request due
+           to access-control restrictions.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationReport" return="org.apache.hadoop.yarn.api.records.ApplicationReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given Application.
+ </p>
+ 
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+ 
+ <p>
+ If the user does not have <code>VIEW_APP</code> access then the following
+ fields in the report will be set to stubbed values:
+ <ul>
+ <li>host - set to "N/A"</li>
+ <li>RPC port - set to -1</li>
+ <li>client token - set to "N/A"</li>
+ <li>diagnostics - set to "N/A"</li>
+ <li>tracking URL - set to "N/A"</li>
+ <li>original tracking URL - set to "N/A"</li>
+ <li>resource usage report - all values are -1</li>
+ </ul>
+ 
+ @param appId
+          {@link ApplicationId} of the application that needs a report
+ @return application report
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getAMRMToken" return="org.apache.hadoop.security.token.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the AMRM token of the application.
+ <p>
+ The AMRM token is required for AM to RM scheduling operations. For 
+ managed Application Masters Yarn takes care of injecting it. For unmanaged
+ Applications Masters, the token must be obtained via this method and set
+ in the {@link org.apache.hadoop.security.UserGroupInformation} of the
+ current user.
+ <p>
+ The AMRM token will be returned only if all the following conditions are
+ met:
+ <ul>
+   <li>the requester is the owner of the ApplicationMaster</li>
+   <li>the application master is an unmanaged ApplicationMaster</li>
+   <li>the application master is in ACCEPTED state</li>
+ </ul>
+ Else this method returns NULL.
+
+ @param appId {@link ApplicationId} of the application to get the AMRM token
+ @return the AMRM token if available
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of all Applications in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @return a list of reports of all running applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationTypes" type="java.util.Set"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of Applications
+ matching the given application types in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param applicationTypes set of application types you are interested in
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationStates" type="java.util.EnumSet"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of Applications matching the given
+ application states in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param applicationStates set of application states you are interested in
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationTypes" type="java.util.Set"/>
+      <param name="applicationStates" type="java.util.EnumSet"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of Applications matching the given
+ application types and application states in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param applicationTypes set of application types you are interested in
+ @param applicationStates set of application states you are interested in
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queues" type="java.util.Set"/>
+      <param name="users" type="java.util.Set"/>
+      <param name="applicationTypes" type="java.util.Set"/>
+      <param name="applicationStates" type="java.util.EnumSet"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of Applications matching the given users,
+ queues, application types and application states in the cluster. If any of
+ the params is set to null, it is not used when filtering.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param queues set of queues you are interested in
+ @param users set of users you are interested in
+ @param applicationTypes set of application types you are interested in
+ @param applicationStates set of application states you are interested in
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getYarnClusterMetrics" return="org.apache.hadoop.yarn.api.records.YarnClusterMetrics"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get metrics ({@link YarnClusterMetrics}) about the cluster.
+ </p>
+ 
+ @return cluster metrics
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getNodeReports" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="states" type="org.apache.hadoop.yarn.api.records.NodeState[]"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of nodes ({@link NodeReport}) in the cluster.
+ </p>
+ 
+ @param states The {@link NodeState}s to filter on. If no filter states are
+          given, nodes in all states will be returned.
+ @return A list of node reports
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getRMDelegationToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="renewer" type="org.apache.hadoop.io.Text"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a delegation token so as to be able to talk to YARN using those tokens.
+ 
+ @param renewer
+          Address of the renewer who can renew these tokens when needed by
+          securely talking to YARN.
+ @return a delegation token ({@link Token}) that can be used to
+         talk to YARN
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getQueueInfo" return="org.apache.hadoop.yarn.api.records.QueueInfo"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queueName" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about a given <em>queue</em>.
+ </p>
+ 
+ @param queueName
+          Name of the queue whose information is needed
+ @return queue information
+ @throws YarnException
+           in case of errors or if YARN rejects the request due to
+           access-control restrictions.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getAllQueues" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about all queues, recursively if there
+ is a hierarchy
+ </p>
+ 
+ @return a list of queue-information for all queues
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getRootQueueInfos" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about top level queues.
+ </p>
+ 
+ @return a list of queue-information for all the top-level queues
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getChildQueueInfos" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="parent" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about all the immediate children queues
+ of the given queue
+ </p>
+ 
+ @param parent
+          Name of the queue whose child-queues' information is needed
+ @return a list of queue-information for all queues who are direct children
+         of the given parent queue.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getQueueAclsInfo" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information about <em>acls</em> for <em>current user</em> on all the
+ existing queues.
+ </p>
+ 
+ @return a list of queue acls ({@link QueueUserACLInfo}) for
+         <em>current user</em>
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttemptReport" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given ApplicationAttempt.
+ </p>
+ 
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+ 
+ @param applicationAttemptId
+          {@link ApplicationAttemptId} of the application attempt that needs
+          a report
+ @return application attempt report
+ @throws YarnException
+ @throws ApplicationAttemptNotFoundException if application attempt
+         not found
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttempts" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (ApplicationAttempts) of Application in the cluster.
+ </p>
+ 
+ @param applicationId application id of the app
+ @return a list of reports for all application attempts for specified
+         application.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerReport" return="org.apache.hadoop.yarn.api.records.ContainerReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given Container.
+ </p>
+ 
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+ 
+ @param containerId
+          {@link ContainerId} of the container that needs a report
+ @return container report
+ @throws YarnException
+ @throws ContainerNotFoundException if container not found.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (Containers) of ApplicationAttempt in the cluster.
+ </p>
+ 
+ @param applicationAttemptId application attempt id
+ @return a list of reports of all containers for specified application
+         attempts
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="moveApplicationAcrossQueues"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="queue" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Attempts to move the given application to the given queue.
+ </p>
+ 
+ @param appId
+    Application to move.
+ @param queue
+    Queue to place it in to.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="createReservation" return="org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Obtain a {@link GetNewReservationResponse} for a new reservation,
+ which contains the {@link ReservationId} object.
+ </p>
+
+ @return The {@link GetNewReservationResponse} containing a new
+         {@link ReservationId} object.
+ @throws YarnException if reservation cannot be created.
+ @throws IOException if reservation cannot be created.]]>
+      </doc>
+    </method>
+    <method name="submitReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to submit a new reservation to the
+ {@code ResourceManager}.
+ </p>
+ 
+ <p>
+ The client packages all details of its request in a
+ {@link ReservationSubmissionRequest} object. This contains information
+ about the amount of capacity, temporal constraints, and gang needs.
+ Furthermore, the reservation might be composed of multiple stages, with
+ ordering dependencies among them.
+ </p>
+ 
+ <p>
+ In order to respond, a new admission control component in the
+ {@code ResourceManager} performs an analysis of the resources that have
+ been committed over the period of time the user is requesting, verify that
+ the user requests can be fulfilled, and that it respect a sharing policy
+ (e.g., {@code CapacityOverTimePolicy}). Once it has positively determined
+ that the ReservationRequest is satisfiable the {@code ResourceManager}
+ answers with a {@link ReservationSubmissionResponse} that includes a
+ {@link ReservationId}. Upon failure to find a valid allocation the response
+ is an exception with the message detailing the reason of failure.
+ </p>
+ 
+ <p>
+ The semantics guarantees that the {@link ReservationId} returned,
+ corresponds to a valid reservation existing in the time-range request by
+ the user. The amount of capacity dedicated to such reservation can vary
+ overtime, depending of the allocation that has been determined. But it is
+ guaranteed to satisfy all the constraint expressed by the user in the
+ {@link ReservationDefinition}
+ </p>
+ 
+ @param request request to submit a new Reservation
+ @return response contains the {@link ReservationId} on accepting the
+         submission
+ @throws YarnException if the reservation cannot be created successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="updateReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to update an existing Reservation. This is
+ referred to as a re-negotiation process, in which a user that has
+ previously submitted a Reservation.
+ </p>
+ 
+ <p>
+ The allocation is attempted by virtually substituting all previous
+ allocations related to this Reservation with new ones, that satisfy the new
+ {@link ReservationDefinition}. Upon success the previous allocation is
+ atomically substituted by the new one, and on failure (i.e., if the system
+ cannot find a valid allocation for the updated request), the previous
+ allocation remains valid.
+ </p>
+ 
+ @param request to update an existing Reservation (the
+          {@link ReservationUpdateRequest} should refer to an existing valid
+          {@link ReservationId})
+ @return response empty on successfully updating the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+           updated successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="deleteReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to remove an existing Reservation.
+ </p>
+ 
+ @param request to remove an existing Reservation (the
+          {@link ReservationDeleteRequest} should refer to an existing valid
+          {@link ReservationId})
+ @return response empty on successfully deleting the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+           deleted successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="listReservations" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationListResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to get the list of reservations in a plan.
+ The reservationId will be used to search for reservations to list if it is
+ provided. Otherwise, it will select active reservations within the
+ startTime and endTime (inclusive).
+ </p>
+
+ @param request to list reservations in a plan. Contains fields to select
+                String queue, ReservationId reservationId, long startTime,
+                long endTime, and a bool includeReservationAllocations.
+
+                queue: Required. Cannot be null or empty. Refers to the
+                reservable queue in the scheduler that was selected when
+                creating a reservation submission
+                {@link ReservationSubmissionRequest}.
+
+                reservationId: Optional. If provided, other fields will
+                be ignored.
+
+                startTime: Optional. If provided, only reservations that
+                end after the startTime will be selected. This defaults
+                to 0 if an invalid number is used.
+
+                endTime: Optional. If provided, only reservations that
+                start on or before endTime will be selected. This defaults
+                to Long.MAX_VALUE if an invalid number is used.
+
+                includeReservationAllocations: Optional. Flag that
+                determines whether the entire reservation allocations are
+                to be returned. Reservation allocations are subject to
+                change in the event of re-planning as described by
+                {@link ReservationDefinition}.
+
+ @return response that contains information about reservations that are
+                being searched for.
+ @throws YarnException if the request is invalid
+ @throws IOException if the request failed otherwise]]>
+      </doc>
+    </method>
+    <method name="getNodeToLabels" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get node to labels mappings in existing cluster
+ </p>
+ 
+ @return node to labels mappings
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getLabelsToNodes" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get labels to nodes mapping
+ in existing cluster
+ </p>
+
+ @return node to labels mappings
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getLabelsToNodes" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="labels" type="java.util.Set"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get labels to nodes mapping
+ for specified labels in existing cluster
+ </p>
+
+ @param labels labels for which labels to nodes mapping has to be retrieved
+ @return labels to nodes mappings for specific labels
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getClusterNodeLabels" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get node labels in the cluster
+ </p>
+
+ @return cluster node labels collection
+ @throws YarnException when there is a failure in
+           {@link ApplicationClientProtocol}
+ @throws IOException when there is a failure in
+           {@link ApplicationClientProtocol}]]>
+      </doc>
+    </method>
+    <method name="updateApplicationPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to set priority of an application
+ </p>
+ @param applicationId
+ @param priority
+ @return updated priority of an application.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="signalToContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="command" type="org.apache.hadoop.yarn.api.records.SignalContainerCommand"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Signal a container identified by given ID.
+ </p>
+
+ @param containerId
+          {@link ContainerId} of the container that needs to be signaled
+ @param command the signal container command
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.YarnClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.YarnClientApplication -->
+  <class name="YarnClientApplication" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="YarnClientApplication" type="org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse, org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getNewApplicationResponse" return="org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationSubmissionContext" return="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.YarnClientApplication -->
+</package>
+<package name="org.apache.hadoop.yarn.client.api.async">
+  <!-- start class org.apache.hadoop.yarn.client.api.async.AMRMClientAsync -->
+  <class name="AMRMClientAsync" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AMRMClientAsync" type="int, org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.AbstractCallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AMRMClientAsync" type="org.apache.hadoop.yarn.client.api.AMRMClient, int, org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.AbstractCallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AMRMClientAsync" type="int, org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AMRMClientAsync" type="org.apache.hadoop.yarn.client.api.AMRMClient, int, org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAMRMClientAsync" return="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="intervalMs" type="int"/>
+      <param name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.AbstractCallbackHandler"/>
+      <doc>
+      <![CDATA[<p>Create a new instance of AMRMClientAsync.</p>
+
+ @param intervalMs heartbeat interval in milliseconds between AM and RM
+ @param callbackHandler callback handler that processes responses from
+                        the <code>ResourceManager</code>]]>
+      </doc>
+    </method>
+    <method name="createAMRMClientAsync" return="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="client" type="org.apache.hadoop.yarn.client.api.AMRMClient"/>
+      <param name="intervalMs" type="int"/>
+      <param name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.AbstractCallbackHandler"/>
+      <doc>
+      <![CDATA[<p>Create a new instance of AMRMClientAsync.</p>
+
+ @param client the AMRMClient instance
+ @param intervalMs heartbeat interval in milliseconds between AM and RM
+ @param callbackHandler callback handler that processes responses from
+                        the <code>ResourceManager</code>]]>
+      </doc>
+    </method>
+    <method name="createAMRMClientAsync" return="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #createAMRMClientAsync(int,
+             AMRMClientAsync.AbstractCallbackHandler)} instead.">
+      <param name="intervalMs" type="int"/>
+      <param name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"/>
+      <doc>
+      <![CDATA[@deprecated Use {@link #createAMRMClientAsync(int,
+             AMRMClientAsync.AbstractCallbackHandler)} instead.]]>
+      </doc>
+    </method>
+    <method name="createAMRMClientAsync" return="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #createAMRMClientAsync(AMRMClient,
+             int, AMRMClientAsync.AbstractCallbackHandler)} instead.">
+      <param name="client" type="org.apache.hadoop.yarn.client.api.AMRMClient"/>
+      <param name="intervalMs" type="int"/>
+      <param name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"/>
+      <doc>
+      <![CDATA[@deprecated Use {@link #createAMRMClientAsync(AMRMClient,
+             int, AMRMClientAsync.AbstractCallbackHandler)} instead.]]>
+      </doc>
+    </method>
+    <method name="setHeartbeatInterval"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="interval" type="int"/>
+    </method>
+    <method name="getMatchingRequests" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="resourceName" type="java.lang.String"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+    </method>
+    <method name="registerApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appHostName" type="java.lang.String"/>
+      <param name="appHostPort" type="int"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Registers this application master with the resource manager. On successful
+ registration, starts the heartbeating thread.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="unregisterApplicationMaster"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appStatus" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"/>
+      <param name="appMessage" type="java.lang.String"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Unregister the application master. This must be called in the end.
+ @param appStatus Success/Failure status of the master
+ @param appMessage Diagnostics message on failure
+ @param appTrackingUrl New URL to get master info
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="addContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="T"/>
+      <doc>

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/49] hadoop git commit: Add 2.8.3 release jdiff files.

Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f8caf5/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.8.3.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.8.3.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.8.3.xml
new file mode 100644
index 0000000..331dd1e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.8.3.xml
@@ -0,0 +1,312 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Dec 05 05:19:32 UTC 2017 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop HDFS 2.8.3"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -verbose -classpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/classes:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.8.3.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.8.3.jar:/maven/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/maven/net/jcip/jcip-annotations/1.0/jcip-annotations-1.0.jar:/maven/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/maven/org/apache/directory/server/apacheds-kerberos-codec
 /2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/jline/jline/0.9.94/jline-0.9.94.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.8.3.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/org/codehaus/jettison/jettison/1.1/jet
 tison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.4
 .1/snappy-java-1.0.4.1.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-2.8.3.jar:/maven/com/squareup/okhttp/okhttp/2.4.0/okhttp-2.4.0.jar:/maven/com/squareup/okio/okio/1.4.0/okio-1.4.0.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/commons-cli/commons-
 cli/1.2/commons-cli-1.2.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/maven/org/apache/directory/api/api-i18n/1.0.0-M20/api-i18n-1.0.0-M20.jar:/maven/org/apache/directory/api/api-ldap-model/1.0.0-M20/api-ldap-model-1.0.0-M20.jar:/maven/org/apache/mina/mina-core/2.0.0-M5/mina-core-2.0.0-M5.jar:/maven/net/sf/ehcache/ehcache-core/2.4.4/ehcache-core-2.4.4.jar:/maven/antlr/antlr/2.7.7/antlr-2.7.7.jar:/maven/org/apache/directory/api/api-asn1-ber/1.0.0-M20/api-asn1-ber-1.0.0-M20.jar:/maven/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/maven/org/
 codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/maven/io/netty/netty-all/4.0.23.Final/netty-all-4.0.23.Final.jar:/maven/xerces/xercesImpl/2.9.1/xercesImpl-2.9.1.jar:/maven/xml-apis/xml-apis/1.3.04/xml-apis-1.3.04.jar:/maven/org/apache/htrace/htrace-core4/4.0.1-incubating/htrace-core4-4.0.1-incubating.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar -sourcepath /build/source/hadoop-hdfs-project/hadoop-hdfs/src/main/java -apidir /build/source/hadoop-hdfs-project/hadoop-hdfs/target/site/jdiff/xml -apiname Apache Hadoop HDFS 2.8.3 -->
+<package name="org.apache.hadoop.hdfs">
+  <doc>
+  <![CDATA[<p>A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem}.  This is loosely modelled after
+Google's <a href="http://research.google.com/archive/gfs.html">GFS</a>.</p>
+
+<p>The most important difference is that unlike GFS, Hadoop DFS files 
+have strictly one writer at any one time.  Bytes are always appended 
+to the end of the writer's stream.  There is no notion of "record appends"
+or "mutations" that are then checked or reordered.  Writers simply emit 
+a byte stream.  That byte stream is guaranteed to be stored in the 
+order written.</p>]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.hdfs.net">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer.sasl">
+</package>
+<package name="org.apache.hadoop.hdfs.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.client">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.server">
+  <!-- start interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+  <interface name="JournalNodeMXBean"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getJournalsStatus" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get status information (e.g., whether formatted) of JournalNode's journals.
+ 
+ @return A string presenting status for each journal]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This is the JMX management interface for JournalNode information]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.block">
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.delegation">
+</package>
+<package name="org.apache.hadoop.hdfs.server.balancer">
+</package>
+<package name="org.apache.hadoop.hdfs.server.blockmanagement">
+</package>
+<package name="org.apache.hadoop.hdfs.server.common">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web.webhdfs">
+</package>
+<package name="org.apache.hadoop.hdfs.server.mover">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode">
+  <!-- start interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+  <interface name="AuditLogger"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Called during initialization of the logger.
+
+ @param conf The configuration object.]]>
+      </doc>
+    </method>
+    <method name="logAuditEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <doc>
+      <![CDATA[Called to log an audit event.
+ <p>
+ This method must return as quickly as possible, since it's called
+ in a critical section of the NameNode's operation.
+
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's
+             metadata (permissions, owner, times, etc).]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Interface defining an audit logger.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+  <!-- start class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+  <class name="HdfsAuditLogger" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.hdfs.server.namenode.AuditLogger"/>
+    <constructor name="HdfsAuditLogger"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="logAuditEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="status" type="org.apache.hadoop.fs.FileStatus"/>
+    </method>
+    <method name="logAuditEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="callerContext" type="org.apache.hadoop.ipc.CallerContext"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+      <doc>
+      <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String, String,
+ FileStatus)} with additional parameters related to logging delegation token
+ tracking IDs.
+ 
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's metadata
+          (permissions, owner, times, etc).
+ @param callerContext Context information of the caller
+ @param ugi UserGroupInformation of the current user, or null if not logging
+          token tracking information
+ @param dtSecretManager The token secret manager, or null if not logging
+          token tracking information]]>
+      </doc>
+    </method>
+    <method name="logAuditEvent"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+      <doc>
+      <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String,
+ String, FileStatus, CallerContext, UserGroupInformation,
+ DelegationTokenSecretManager)} without {@link CallerContext} information.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Extension of {@link AuditLogger}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+  <!-- start class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+  <class name="INodeAttributeProvider" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="INodeAttributeProvider"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="start"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Initialize the provider. This method is called at NameNode startup
+ time.]]>
+      </doc>
+    </method>
+    <method name="stop"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Shutdown the provider. This method is called at NameNode shutdown time.]]>
+      </doc>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fullPath" type="java.lang.String"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pathElements" type="java.lang.String[]"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="components" type="byte[][]"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getExternalAccessControlEnforcer" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="defaultEnforcer" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"/>
+      <doc>
+      <![CDATA[Can be over-ridden by implementations to provide a custom Access Control
+ Enforcer that can provide an alternate implementation of the
+ default permission checking logic.
+ @param defaultEnforcer The Default AccessControlEnforcer
+ @return The AccessControlEnforcer to use]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.ha">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.window">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.web.resources">
+</package>
+<package name="org.apache.hadoop.hdfs.server.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.tools">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineEditsViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineImageViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.util">
+</package>
+<package name="org.apache.hadoop.hdfs.web">
+</package>
+<package name="org.apache.hadoop.hdfs.web.resources">
+</package>
+
+</api>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f8caf5/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.8.3.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.8.3.xml b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.8.3.xml
new file mode 100644
index 0000000..b3d52bf
--- /dev/null
+++ b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_2.8.3.xml
@@ -0,0 +1,113 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Dec 05 05:51:53 UTC 2017 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop MapReduce Common 2.8.3"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/hadoop-annotations.jar:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/jdiff.jar -verbose -classpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/classes:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.8.3.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.8.3.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/maven/javax/servlet/serv
 let-api/2.5/servlet-api-2.5.jar:/maven/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/com/google/inject/guice/3.0/guice-3.0.jar:/maven/javax/inject/javax.inject/1/javax.inject-1.jar:/maven/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.
 2.3-1.jar:/maven/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/hadoop-yarn-client-2.8.3.jar:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/hadoop-mapreduce-client-core-2.8.3.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/hadoop-yarn-server-common-2.8.3.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.8.3.jar:/maven/org/apache/commons/c
 ommons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/build/source/hadoop-common-projec
 t/hadoop-auth/target/hadoop-auth-2.8.3.jar:/maven/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/maven/net/jcip/jcip-annotations/1.0/jcip-annotations-1.0.jar:/maven/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/maven/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/htrace/htrace-core4/4.0.1-incubating/htrace-core4-4.0.1-incubating.jar:
 /maven/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/maven/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.8.3.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/maven/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar -sourcepath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java -apidir /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/targ
 et/site/jdiff/xml -apiname Apache Hadoop MapReduce Common 2.8.3 -->
+<package name="org.apache.hadoop.mapred">
+</package>
+<package name="org.apache.hadoop.mapreduce">
+</package>
+<package name="org.apache.hadoop.mapreduce.v2.api.protocolrecords">
+  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest -->
+  <interface name="CancelDelegationTokenRequest"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getDelegationToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setDelegationToken"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dToken" type="org.apache.hadoop.yarn.api.records.Token"/>
+    </method>
+    <doc>
+    <![CDATA[The request issued by the client to the {@code ResourceManager} to cancel a
+ delegation token.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest -->
+  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse -->
+  <interface name="CancelDelegationTokenResponse"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <doc>
+    <![CDATA[The response from the {@code ResourceManager} to a cancelDelegationToken
+ request.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse -->
+  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest -->
+  <interface name="GetDelegationTokenRequest"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getRenewer" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setRenewer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="renewer" type="java.lang.String"/>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest -->
+  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest -->
+  <interface name="RenewDelegationTokenRequest"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getDelegationToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setDelegationToken"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dToken" type="org.apache.hadoop.yarn.api.records.Token"/>
+    </method>
+    <doc>
+    <![CDATA[The request issued by the client to renew a delegation token from
+ the {@code ResourceManager}.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest -->
+  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenResponse -->
+  <interface name="RenewDelegationTokenResponse"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getNextExpirationTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setNextExpirationTime"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="expTime" type="long"/>
+    </method>
+    <doc>
+    <![CDATA[The response to a renewDelegationToken call to the {@code ResourceManager}.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenResponse -->
+</package>
+<package name="org.apache.hadoop.mapreduce.v2.security">
+</package>
+<package name="org.apache.hadoop.yarn.proto">
+</package>
+
+</api>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/49] hadoop git commit: YARN-7522. Introduce AllocationTagsManager to associate allocation tags to nodes. (Wangda Tan via asuresh)

Posted by as...@apache.org.
YARN-7522. Introduce AllocationTagsManager to associate allocation tags to nodes. (Wangda Tan via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ff35b7a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ff35b7a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ff35b7a

Branch: refs/heads/YARN-6592
Commit: 1ff35b7ad616cf6e0c4645e020bd4a85022dc16b
Parents: 1866f28
Author: Arun Suresh <as...@apache.org>
Authored: Fri Dec 8 00:24:00 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Fri Dec 29 17:29:15 2017 -0800

----------------------------------------------------------------------
 .../resourcemanager/RMActiveServiceContext.java |  15 +
 .../yarn/server/resourcemanager/RMContext.java  |   5 +
 .../server/resourcemanager/RMContextImpl.java   |  12 +
 .../server/resourcemanager/ResourceManager.java |   9 +
 .../constraint/AllocationTagsManager.java       | 431 +++++++++++++++++++
 .../constraint/AllocationTagsNamespaces.java    |  31 ++
 .../InvalidAllocationTagsQueryException.java    |  35 ++
 .../rmcontainer/RMContainer.java                |   8 +
 .../rmcontainer/RMContainerImpl.java            |  21 +
 .../constraint/TestAllocationTagsManager.java   | 328 ++++++++++++++
 .../rmcontainer/TestRMContainerImpl.java        | 124 ++++++
 .../scheduler/capacity/TestUtils.java           |   9 +
 .../scheduler/fifo/TestFifoScheduler.java       |   5 +
 13 files changed, 1033 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ff35b7a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
index 9dc5945..6ee3a4c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
+import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
@@ -107,6 +108,7 @@ public class RMActiveServiceContext {
 
   private RMAppLifetimeMonitor rmAppLifetimeMonitor;
   private QueueLimitCalculator queueLimitCalculator;
+  private AllocationTagsManager allocationTagsManager;
 
   public RMActiveServiceContext() {
     queuePlacementManager = new PlacementManager();
@@ -398,6 +400,19 @@ public class RMActiveServiceContext {
 
   @Private
   @Unstable
+  public AllocationTagsManager getAllocationTagsManager() {
+    return allocationTagsManager;
+  }
+
+  @Private
+  @Unstable
+  public void setAllocationTagsManager(
+      AllocationTagsManager allocationTagsManager) {
+    this.allocationTagsManager = allocationTagsManager;
+  }
+
+  @Private
+  @Unstable
   public RMDelegatedNodeLabelsUpdater getRMDelegatedNodeLabelsUpdater() {
     return rmDelegatedNodeLabelsUpdater;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ff35b7a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
index ec94030..62899d9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWri
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater;
+import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
@@ -166,4 +167,8 @@ public interface RMContext extends ApplicationMasterServiceContext {
   void setResourceProfilesManager(ResourceProfilesManager mgr);
 
   String getAppProxyUrl(Configuration conf, ApplicationId applicationId);
+
+  AllocationTagsManager getAllocationTagsManager();
+
+  void setAllocationTagsManager(AllocationTagsManager allocationTagsManager);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ff35b7a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
index 80a9109..315fdc1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWri
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater;
+import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
@@ -504,6 +505,17 @@ public class RMContextImpl implements RMContext {
   }
 
   @Override
+  public AllocationTagsManager getAllocationTagsManager() {
+    return activeServiceContext.getAllocationTagsManager();
+  }
+
+  @Override
+  public void setAllocationTagsManager(
+      AllocationTagsManager allocationTagsManager) {
+    activeServiceContext.setAllocationTagsManager(allocationTagsManager);
+  }
+
+  @Override
   public RMDelegatedNodeLabelsUpdater getRMDelegatedNodeLabelsUpdater() {
     return activeServiceContext.getRMDelegatedNodeLabelsUpdater();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ff35b7a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index a0317f6..8d1000e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -70,6 +70,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.metrics.TimelineServiceV1Pu
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.TimelineServiceV2Publisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
@@ -490,6 +491,10 @@ public class ResourceManager extends CompositeService implements Recoverable {
       throws InstantiationException, IllegalAccessException {
     return new RMNodeLabelsManager();
   }
+
+  protected AllocationTagsManager createAllocationTagsManager() {
+    return new AllocationTagsManager();
+  }
   
   protected DelegationTokenRenewer createDelegationTokenRenewer() {
     return new DelegationTokenRenewer();
@@ -609,6 +614,10 @@ public class ResourceManager extends CompositeService implements Recoverable {
       addService(nlm);
       rmContext.setNodeLabelManager(nlm);
 
+      AllocationTagsManager allocationTagsManager =
+          createAllocationTagsManager();
+      rmContext.setAllocationTagsManager(allocationTagsManager);
+
       RMDelegatedNodeLabelsUpdater delegatedNodeLabelsUpdater =
           createRMDelegatedNodeLabelsUpdater();
       if (delegatedNodeLabelsUpdater != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ff35b7a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsManager.java
new file mode 100644
index 0000000..b67fab9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsManager.java
@@ -0,0 +1,431 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.constraint;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.log4j.Logger;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.function.LongBinaryOperator;
+
+/**
+ * Support storing maps between container-tags/applications and
+ * nodes. This will be required by affinity/anti-affinity implementation and
+ * cardinality.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class AllocationTagsManager {
+
+  private static final Logger LOG = Logger.getLogger(
+      AllocationTagsManager.class);
+
+  private ReentrantReadWriteLock.ReadLock readLock;
+  private ReentrantReadWriteLock.WriteLock writeLock;
+
+  // Application's tags to node
+  private Map<ApplicationId, NodeToCountedTags> perAppMappings =
+      new HashMap<>();
+
+  // Global tags to node mapping (used to fast return aggregated tags
+  // cardinality across apps)
+  private NodeToCountedTags globalMapping = new NodeToCountedTags();
+
+  /**
+   * Store node to counted tags.
+   */
+  @VisibleForTesting
+  static class NodeToCountedTags {
+    // Map<NodeId, Map<Tag, Count>>
+    private Map<NodeId, Map<String, Long>> nodeToTagsWithCount =
+        new HashMap<>();
+
+    // protected by external locks
+    private void addTagsToNode(NodeId nodeId, Set<String> tags) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.computeIfAbsent(nodeId,
+          k -> new HashMap<>());
+
+      for (String tag : tags) {
+        Long count = innerMap.get(tag);
+        if (count == null) {
+          innerMap.put(tag, 1L);
+        } else{
+          innerMap.put(tag, count + 1);
+        }
+      }
+    }
+
+    // protected by external locks
+    private void addTagToNode(NodeId nodeId, String tag) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.computeIfAbsent(nodeId,
+          k -> new HashMap<>());
+
+      Long count = innerMap.get(tag);
+      if (count == null) {
+        innerMap.put(tag, 1L);
+      } else{
+        innerMap.put(tag, count + 1);
+      }
+    }
+
+    private void removeTagFromInnerMap(Map<String, Long> innerMap, String tag) {
+      Long count = innerMap.get(tag);
+      if (count > 1) {
+        innerMap.put(tag, count - 1);
+      } else {
+        if (count <= 0) {
+          LOG.warn(
+              "Trying to remove tags from node, however the count already"
+                  + " becomes 0 or less, it could be a potential bug.");
+        }
+        innerMap.remove(tag);
+      }
+    }
+
+    private void removeTagsFromNode(NodeId nodeId, Set<String> tags) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+      if (innerMap == null) {
+        LOG.warn("Failed to find node=" + nodeId
+            + " while trying to remove tags, please double check.");
+        return;
+      }
+
+      for (String tag : tags) {
+        removeTagFromInnerMap(innerMap, tag);
+      }
+
+      if (innerMap.isEmpty()) {
+        nodeToTagsWithCount.remove(nodeId);
+      }
+    }
+
+    private void removeTagFromNode(NodeId nodeId, String tag) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+      if (innerMap == null) {
+        LOG.warn("Failed to find node=" + nodeId
+            + " while trying to remove tags, please double check.");
+        return;
+      }
+
+      removeTagFromInnerMap(innerMap, tag);
+
+      if (innerMap.isEmpty()) {
+        nodeToTagsWithCount.remove(nodeId);
+      }
+    }
+
+    private long getCardinality(NodeId nodeId, String tag) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+      if (innerMap == null) {
+        return 0;
+      }
+      Long value = innerMap.get(tag);
+      return value == null ? 0 : value;
+    }
+
+    private long getCardinality(NodeId nodeId, Set<String> tags,
+        LongBinaryOperator op) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+      if (innerMap == null) {
+        return 0;
+      }
+
+      long returnValue = 0;
+      boolean firstTag = true;
+
+      if (tags != null && !tags.isEmpty()) {
+        for (String tag : tags) {
+          Long value = innerMap.get(tag);
+          if (value == null) {
+            value = 0L;
+          }
+
+          if (firstTag) {
+            returnValue = value;
+            firstTag = false;
+            continue;
+          }
+
+          returnValue = op.applyAsLong(returnValue, value);
+        }
+      } else {
+        // Similar to above if, but only iterate values for better performance
+        for (long value : innerMap.values()) {
+          // For the first value, we will not apply op
+          if (firstTag) {
+            returnValue = value;
+            firstTag = false;
+            continue;
+          }
+          returnValue = op.applyAsLong(returnValue, value);
+        }
+      }
+      return returnValue;
+    }
+
+    private boolean isEmpty() {
+      return nodeToTagsWithCount.isEmpty();
+    }
+
+    @VisibleForTesting
+    public Map<NodeId, Map<String, Long>> getNodeToTagsWithCount() {
+      return nodeToTagsWithCount;
+    }
+  }
+
+  @VisibleForTesting
+  Map<ApplicationId, NodeToCountedTags> getPerAppMappings() {
+    return perAppMappings;
+  }
+
+  @VisibleForTesting
+  NodeToCountedTags getGlobalMapping() {
+    return globalMapping;
+  }
+
+  public AllocationTagsManager() {
+    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+    readLock = lock.readLock();
+    writeLock = lock.writeLock();
+  }
+
+  /**
+   * Notify container allocated on a node.
+   *
+   * @param nodeId         allocated node.
+   * @param applicationId  applicationId
+   * @param containerId    container id.
+   * @param allocationTags allocation tags, see
+   *                       {@link SchedulingRequest#getAllocationTags()}
+   *                       application_id will be added to allocationTags.
+   */
+  public void addContainer(NodeId nodeId, ApplicationId applicationId,
+      ContainerId containerId, Set<String> allocationTags) {
+    String applicationIdTag =
+        AllocationTagsNamespaces.APP_ID + applicationId.toString();
+
+    boolean useSet = false;
+    if (allocationTags != null && !allocationTags.isEmpty()) {
+      // Copy before edit it.
+      allocationTags = new HashSet<>(allocationTags);
+      allocationTags.add(applicationIdTag);
+      useSet = true;
+    }
+
+    writeLock.lock();
+    try {
+      NodeToCountedTags perAppTagsMapping = perAppMappings.computeIfAbsent(
+          applicationId, k -> new NodeToCountedTags());
+
+      if (useSet) {
+        perAppTagsMapping.addTagsToNode(nodeId, allocationTags);
+        globalMapping.addTagsToNode(nodeId, allocationTags);
+      } else {
+        perAppTagsMapping.addTagToNode(nodeId, applicationIdTag);
+        globalMapping.addTagToNode(nodeId, applicationIdTag);
+      }
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(
+            "Added container=" + containerId + " with tags=[" + StringUtils
+                .join(allocationTags, ",") + "]");
+      }
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  /**
+   * Notify container removed.
+   *
+   * @param nodeId         nodeId
+   * @param applicationId  applicationId
+   * @param containerId    containerId.
+   * @param allocationTags allocation tags for given container
+   */
+  public void removeContainer(NodeId nodeId, ApplicationId applicationId,
+      ContainerId containerId, Set<String> allocationTags) {
+    String applicationIdTag =
+        AllocationTagsNamespaces.APP_ID + applicationId.toString();
+    boolean useSet = false;
+
+    if (allocationTags != null && !allocationTags.isEmpty()) {
+      // Copy before edit it.
+      allocationTags = new HashSet<>(allocationTags);
+      allocationTags.add(applicationIdTag);
+      useSet = true;
+    }
+
+    writeLock.lock();
+    try {
+      NodeToCountedTags perAppTagsMapping = perAppMappings.get(applicationId);
+      if (perAppTagsMapping == null) {
+        return;
+      }
+
+      if (useSet) {
+        perAppTagsMapping.removeTagsFromNode(nodeId, allocationTags);
+        globalMapping.removeTagsFromNode(nodeId, allocationTags);
+      } else {
+        perAppTagsMapping.removeTagFromNode(nodeId, applicationIdTag);
+        globalMapping.removeTagFromNode(nodeId, applicationIdTag);
+      }
+
+      if (perAppTagsMapping.isEmpty()) {
+        perAppMappings.remove(applicationId);
+      }
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(
+            "Removed container=" + containerId + " with tags=[" + StringUtils
+                .join(allocationTags, ",") + "]");
+      }
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  /**
+   * Get cardinality for following conditions. External can pass-in a binary op
+   * to implement customized logic.   *
+   * @param nodeId        nodeId, required.
+   * @param applicationId applicationId. When null is specified, return
+   *                      aggregated cardinality among all nodes.
+   * @param tag           allocation tag, see
+   *                      {@link SchedulingRequest#getAllocationTags()},
+   *                      When multiple tags specified. Returns cardinality
+   *                      depends on op. If a specified tag doesn't exist,
+   *                      0 will be its cardinality.
+   *                      When null/empty tags specified, all tags
+   *                      (of the node/app) will be considered.
+   * @return cardinality of specified query on the node.
+   * @throws InvalidAllocationTagsQueryException when illegal query
+   *                                            parameter specified
+   */
+  public long getNodeCardinality(NodeId nodeId, ApplicationId applicationId,
+      String tag) throws InvalidAllocationTagsQueryException {
+    readLock.lock();
+
+    try {
+      if (nodeId == null) {
+        throw new InvalidAllocationTagsQueryException(
+            "Must specify nodeId/tags/op to query cardinality");
+      }
+
+      NodeToCountedTags mapping;
+      if (applicationId != null) {
+        mapping = perAppMappings.get(applicationId);
+      } else{
+        mapping = globalMapping;
+      }
+
+      if (mapping == null) {
+        return 0;
+      }
+
+      return mapping.getCardinality(nodeId, tag);
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  /**
+   * Check if given tag exists on node.
+   *
+   * @param nodeId        nodeId, required.
+   * @param applicationId applicationId. When null is specified, return
+   *                      aggregated cardinality among all nodes.
+   * @param tag           allocation tag, see
+   *                      {@link SchedulingRequest#getAllocationTags()},
+   *                      When multiple tags specified. Returns cardinality
+   *                      depends on op. If a specified tag doesn't exist,
+   *                      0 will be its cardinality.
+   *                      When null/empty tags specified, all tags
+   *                      (of the node/app) will be considered.
+   * @return cardinality of specified query on the node.
+   * @throws InvalidAllocationTagsQueryException when illegal query
+   *                                            parameter specified
+   */
+  public boolean allocationTagExistsOnNode(NodeId nodeId,
+      ApplicationId applicationId, String tag)
+      throws InvalidAllocationTagsQueryException {
+    return getNodeCardinality(nodeId, applicationId, tag) > 0;
+  }
+
+  /**
+   * Get cardinality for following conditions. External can pass-in a binary op
+   * to implement customized logic.
+   *
+   * @param nodeId        nodeId, required.
+   * @param applicationId applicationId. When null is specified, return
+   *                      aggregated cardinality among all nodes.
+   * @param tags          allocation tags, see
+   *                      {@link SchedulingRequest#getAllocationTags()},
+   *                      When multiple tags specified. Returns cardinality
+   *                      depends on op. If a specified tag doesn't exist, 0
+   *                      will be its cardinality. When null/empty tags
+   *                      specified, all tags (of the node/app) will be
+   *                      considered.
+   * @param op            operator. Such as Long::max, Long::sum, etc. Required.
+   *                      This sparameter only take effect when #values >= 2.
+   * @return cardinality of specified query on the node.
+   * @throws InvalidAllocationTagsQueryException when illegal query
+   *                                            parameter specified
+   */
+  public long getNodeCardinalityByOp(NodeId nodeId, ApplicationId applicationId,
+      Set<String> tags, LongBinaryOperator op)
+      throws InvalidAllocationTagsQueryException {
+    readLock.lock();
+
+    try {
+      if (nodeId == null || op == null) {
+        throw new InvalidAllocationTagsQueryException(
+            "Must specify nodeId/tags/op to query cardinality");
+      }
+
+      NodeToCountedTags mapping;
+      if (applicationId != null) {
+        mapping = perAppMappings.get(applicationId);
+      } else{
+        mapping = globalMapping;
+      }
+
+      if (mapping == null) {
+        return 0;
+      }
+
+      return mapping.getCardinality(nodeId, tags, op);
+    } finally {
+      readLock.unlock();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ff35b7a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsNamespaces.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsNamespaces.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsNamespaces.java
new file mode 100644
index 0000000..893ff1c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsNamespaces.java
@@ -0,0 +1,31 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.constraint;
+
+/**
+ * Predefined namespaces for tags
+ *
+ * Same as namespace  of resource types. Namespaces of placement tags are start
+ * with alphabets and ended with "/"
+ */
+public class AllocationTagsNamespaces {
+  public static final String APP_ID = "yarn_app_id/";
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ff35b7a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/InvalidAllocationTagsQueryException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/InvalidAllocationTagsQueryException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/InvalidAllocationTagsQueryException.java
new file mode 100644
index 0000000..5519e39
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/InvalidAllocationTagsQueryException.java
@@ -0,0 +1,35 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.constraint;
+
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+/**
+ * Exception when invalid parameter specified to do placement tags related
+ * queries.
+ */
+public class InvalidAllocationTagsQueryException extends YarnException {
+  private static final long serialVersionUID = 12312831974894L;
+
+  public InvalidAllocationTagsQueryException(String msg) {
+    super(msg);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ff35b7a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
index f3cbf63..8f751b0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.server.resourcemanager.rmcontainer;
 
 import java.util.List;
+import java.util.Set;
 
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Container;
@@ -30,6 +31,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerRequest;
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
@@ -115,4 +117,10 @@ public interface RMContainer extends EventHandler<RMContainerEvent>,
   boolean completed();
 
   NodeId getNodeId();
+
+  /**
+   * Return {@link SchedulingRequest#getAllocationTags()} specified by AM.
+   * @return allocation tags, could be null/empty
+   */
+  Set<String> getAllocationTags();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ff35b7a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index e26689e..184cdfc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmcontainer;
 import java.util.Collections;
 import java.util.EnumSet;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
@@ -189,6 +190,9 @@ public class RMContainerImpl implements RMContainer {
   private boolean isExternallyAllocated;
   private SchedulerRequestKey allocatedSchedulerKey;
 
+  // TODO, set it when container allocated by scheduler (From SchedulingRequest)
+  private Set<String> allocationTags = null;
+
   public RMContainerImpl(Container container, SchedulerRequestKey schedulerKey,
       ApplicationAttemptId appAttemptId, NodeId nodeId, String user,
       RMContext rmContext) {
@@ -501,6 +505,11 @@ public class RMContainerImpl implements RMContainer {
     return nodeId;
   }
 
+  @Override
+  public Set<String> getAllocationTags() {
+    return allocationTags;
+  }
+
   private static class BaseTransition implements
       SingleArcTransition<RMContainerImpl, RMContainerEvent> {
 
@@ -565,6 +574,12 @@ public class RMContainerImpl implements RMContainer {
 
     @Override
     public void transition(RMContainerImpl container, RMContainerEvent event) {
+      // Notify placementManager
+      container.rmContext.getAllocationTagsManager().addContainer(
+          container.getNodeId(),
+          container.getApplicationAttemptId().getApplicationId(),
+          container.getContainerId(), container.getAllocationTags());
+
       container.eventHandler.handle(new RMAppAttemptEvent(
           container.appAttemptId, RMAppAttemptEventType.CONTAINER_ALLOCATED));
     }
@@ -676,6 +691,12 @@ public class RMContainerImpl implements RMContainer {
 
     @Override
     public void transition(RMContainerImpl container, RMContainerEvent event) {
+      // Notify placementManager
+      container.rmContext.getAllocationTagsManager().removeContainer(
+          container.getNodeId(),
+          container.getApplicationAttemptId().getApplicationId(),
+          container.getContainerId(), container.getAllocationTags());
+
       RMContainerFinishedEvent finishedEvent = (RMContainerFinishedEvent) event;
 
       container.finishTime = System.currentTimeMillis();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ff35b7a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/TestAllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/TestAllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/TestAllocationTagsManager.java
new file mode 100644
index 0000000..0358792
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/TestAllocationTagsManager.java
@@ -0,0 +1,328 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.constraint;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test functionality of AllocationTagsManager.
+ */
+public class TestAllocationTagsManager {
+  @Test
+  public void testAllocationTagsManagerSimpleCases()
+      throws InvalidAllocationTagsQueryException {
+    AllocationTagsManager atm = new AllocationTagsManager();
+
+    /**
+     * Construct test case:
+     * Node1:
+     *    container_1_1 (mapper/reducer/app_1)
+     *    container_1_3 (service/app_1)
+     *
+     * Node2:
+     *    container_1_2 (mapper/reducer/app_1)
+     *    container_1_4 (reducer/app_1)
+     *    container_2_1 (service/app_2)
+     */
+
+    // 3 Containers from app1
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    // 1 Container from app2
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // Get Cardinality of app1 on node1, with tag "mapper"
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node1:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
+            Long::max));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=min
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::min));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=max
+    Assert.assertEquals(2,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::max));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
+    Assert.assertEquals(3,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::sum));
+
+    // Get Cardinality by passing single tag.
+    Assert.assertEquals(1,
+        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), "mapper"));
+
+    Assert.assertEquals(2,
+        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), "reducer"));
+
+    // Get Cardinality of app1 on node2, with tag "no_existed/reducer", op=min
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("no_existed", "reducer"), Long::min));
+
+    // Get Cardinality of app1 on node2, with tag "<applicationId>", op=max
+    // (Expect this returns #containers from app1 on node2)
+    Assert.assertEquals(2,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet
+                .of(AllocationTagsNamespaces.APP_ID + TestUtils
+                    .getMockApplicationId(1).toString()), Long::max));
+
+    // Get Cardinality of app1 on node2, with empty tag set, op=max
+    Assert.assertEquals(2,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
+
+    // Get Cardinality of all apps on node2, with empty tag set, op=sum
+    Assert.assertEquals(7,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"), null,
+            ImmutableSet.of(), Long::sum));
+
+    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    Assert.assertEquals(5,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::sum));
+
+    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    Assert.assertEquals(2,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(2), ImmutableSet.of(), Long::sum));
+
+    // Finish all containers:
+    atm.removeContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.removeContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // Expect all cardinality to be 0
+    // Get Cardinality of app1 on node1, with tag "mapper"
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node1:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
+            Long::max));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=min
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::min));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=max
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::max));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::sum));
+
+    // Get Cardinality of app1 on node2, with tag "<applicationId>", op=max
+    // (Expect this returns #containers from app1 on node2)
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of(TestUtils.getMockApplicationId(1).toString()),
+            Long::max));
+
+    Assert.assertEquals(0,
+        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            TestUtils.getMockApplicationId(1).toString()));
+
+    // Get Cardinality of app1 on node2, with empty tag set, op=max
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
+
+    // Get Cardinality of all apps on node2, with empty tag set, op=sum
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"), null,
+            ImmutableSet.of(), Long::sum));
+
+    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::sum));
+
+    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(2), ImmutableSet.of(), Long::sum));
+  }
+
+  @Test
+  public void testAllocationTagsManagerMemoryAfterCleanup()
+      throws InvalidAllocationTagsQueryException {
+    /**
+     * Make sure YARN cleans up all memory once container/app finishes.
+     */
+
+    AllocationTagsManager atm = new AllocationTagsManager();
+
+    // Add a bunch of containers
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // Remove all these containers
+    atm.removeContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.removeContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // Check internal data structure
+    Assert.assertEquals(0,
+        atm.getGlobalMapping().getNodeToTagsWithCount().size());
+    Assert.assertEquals(0, atm.getPerAppMappings().size());
+  }
+
+  @Test
+  public void testQueryCardinalityWithIllegalParameters()
+      throws InvalidAllocationTagsQueryException {
+    /**
+     * Make sure YARN cleans up all memory once container/app finishes.
+     */
+
+    AllocationTagsManager atm = new AllocationTagsManager();
+
+    // Add a bunch of containers
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // No node-id
+    boolean caughtException = false;
+    try {
+      atm.getNodeCardinalityByOp(null, TestUtils.getMockApplicationId(2),
+          ImmutableSet.of("mapper"), Long::min);
+    } catch (InvalidAllocationTagsQueryException e) {
+      caughtException = true;
+    }
+    Assert.assertTrue("should fail because of nodeId specified",
+        caughtException);
+
+    // No op
+    caughtException = false;
+    try {
+      atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+          TestUtils.getMockApplicationId(2), ImmutableSet.of("mapper"), null);
+    } catch (InvalidAllocationTagsQueryException e) {
+      caughtException = true;
+    }
+    Assert.assertTrue("should fail because of nodeId specified",
+        caughtException);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ff35b7a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
index 6c189b3..27ff311 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
+import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
@@ -109,6 +110,8 @@ public class TestRMContainerImpl {
     when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
     when(rmContext.getRMApps()).thenReturn(rmApps);
     when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
+    AllocationTagsManager ptm = mock(AllocationTagsManager.class);
+    when(rmContext.getAllocationTagsManager()).thenReturn(ptm);
     YarnConfiguration conf = new YarnConfiguration();
     conf.setBoolean(
         YarnConfiguration.APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO,
@@ -209,6 +212,8 @@ public class TestRMContainerImpl {
     when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
     when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
     when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
+    AllocationTagsManager ptm = mock(AllocationTagsManager.class);
+    when(rmContext.getAllocationTagsManager()).thenReturn(ptm);
 
     YarnConfiguration conf = new YarnConfiguration();
     conf.setBoolean(
@@ -367,4 +372,123 @@ public class TestRMContainerImpl {
     verify(publisher, times(1)).containerCreated(any(RMContainer.class), anyLong());
     verify(publisher, times(1)).containerFinished(any(RMContainer.class), anyLong());
   }
+
+  @Test
+  public void testContainerTransitionNotifyPlacementTagsManager()
+      throws Exception {
+    DrainDispatcher drainDispatcher = new DrainDispatcher();
+    EventHandler<RMAppAttemptEvent> appAttemptEventHandler = mock(
+        EventHandler.class);
+    EventHandler generic = mock(EventHandler.class);
+    drainDispatcher.register(RMAppAttemptEventType.class,
+        appAttemptEventHandler);
+    drainDispatcher.register(RMNodeEventType.class, generic);
+    drainDispatcher.init(new YarnConfiguration());
+    drainDispatcher.start();
+    NodeId nodeId = BuilderUtils.newNodeId("host", 3425);
+    ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
+    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
+        appId, 1);
+    ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
+    ContainerAllocationExpirer expirer = mock(ContainerAllocationExpirer.class);
+
+    Resource resource = BuilderUtils.newResource(512, 1);
+    Priority priority = BuilderUtils.newPriority(5);
+
+    Container container = BuilderUtils.newContainer(containerId, nodeId,
+        "host:3465", resource, priority, null);
+    ConcurrentMap<ApplicationId, RMApp> rmApps =
+        spy(new ConcurrentHashMap<ApplicationId, RMApp>());
+    RMApp rmApp = mock(RMApp.class);
+    when(rmApp.getRMAppAttempt(Matchers.any())).thenReturn(null);
+    Mockito.doReturn(rmApp).when(rmApps).get(Matchers.any());
+
+    RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
+    SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class);
+    AllocationTagsManager tagsManager = new AllocationTagsManager();
+    RMContext rmContext = mock(RMContext.class);
+    when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
+    when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
+    when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
+    when(rmContext.getRMApps()).thenReturn(rmApps);
+    when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
+    when(rmContext.getAllocationTagsManager()).thenReturn(tagsManager);
+    YarnConfiguration conf = new YarnConfiguration();
+    conf.setBoolean(
+        YarnConfiguration.APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO,
+        true);
+    when(rmContext.getYarnConfiguration()).thenReturn(conf);
+
+    /* First container: ALLOCATED -> KILLED */
+    RMContainer rmContainer = new RMContainerImpl(container,
+        SchedulerRequestKey.extractFrom(container), appAttemptId,
+        nodeId, "user", rmContext);
+
+    Assert.assertEquals(0,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+
+    rmContainer.handle(new RMContainerEvent(containerId,
+        RMContainerEventType.START));
+
+    Assert.assertEquals(1,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+
+    rmContainer.handle(new RMContainerFinishedEvent(containerId, ContainerStatus
+        .newInstance(containerId, ContainerState.COMPLETE, "", 0),
+        RMContainerEventType.KILL));
+
+    Assert.assertEquals(0,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+
+    /* Second container: ACQUIRED -> FINISHED */
+    rmContainer = new RMContainerImpl(container,
+        SchedulerRequestKey.extractFrom(container), appAttemptId,
+        nodeId, "user", rmContext);
+
+    Assert.assertEquals(0,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+
+    rmContainer.handle(new RMContainerEvent(containerId,
+        RMContainerEventType.START));
+
+    Assert.assertEquals(1,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+
+    rmContainer.handle(
+        new RMContainerEvent(containerId, RMContainerEventType.ACQUIRED));
+
+    rmContainer.handle(new RMContainerFinishedEvent(containerId, ContainerStatus
+        .newInstance(containerId, ContainerState.COMPLETE, "", 0),
+        RMContainerEventType.FINISHED));
+
+    Assert.assertEquals(0,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+
+    /* Third container: RUNNING -> FINISHED */
+    rmContainer = new RMContainerImpl(container,
+        SchedulerRequestKey.extractFrom(container), appAttemptId,
+        nodeId, "user", rmContext);
+
+    Assert.assertEquals(0,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+
+    rmContainer.handle(new RMContainerEvent(containerId,
+        RMContainerEventType.START));
+
+    Assert.assertEquals(1,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+
+    rmContainer.handle(
+        new RMContainerEvent(containerId, RMContainerEventType.ACQUIRED));
+
+    rmContainer.handle(
+        new RMContainerEvent(containerId, RMContainerEventType.LAUNCHED));
+
+    rmContainer.handle(new RMContainerFinishedEvent(containerId, ContainerStatus
+        .newInstance(containerId, ContainerState.COMPLETE, "", 0),
+        RMContainerEventType.FINISHED));
+
+    Assert.assertEquals(0,
+        tagsManager.getNodeCardinalityByOp(nodeId, appId, null, Long::max));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ff35b7a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
index e3326c7..61a5555 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
@@ -135,6 +136,9 @@ public class TestUtils {
         new DefaultResourceCalculator());
     rmContext.setScheduler(mockScheduler);
 
+    AllocationTagsManager ptm = mock(AllocationTagsManager.class);
+    rmContext.setAllocationTagsManager(ptm);
+
     return rmContext;
   }
   
@@ -234,6 +238,11 @@ public class TestUtils {
     doReturn(id).when(containerId).getContainerId();
     return containerId;
   }
+
+  public static ContainerId getMockContainerId(int appId, int containerId) {
+    ApplicationAttemptId attemptId = getMockApplicationAttemptId(appId, 1);
+    return ContainerId.newContainerId(attemptId, containerId);
+  }
   
   public static Container getMockContainer(
       ContainerId containerId, NodeId nodeId, 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ff35b7a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
index 3f97b59..4b902a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
@@ -74,6 +74,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWri
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
@@ -234,6 +235,8 @@ public class TestFifoScheduler {
     FifoScheduler scheduler = new FifoScheduler();
     RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null,
         null, containerTokenSecretManager, nmTokenSecretManager, null, scheduler);
+    AllocationTagsManager ptm = mock(AllocationTagsManager.class);
+    rmContext.setAllocationTagsManager(ptm);
     rmContext.setSystemMetricsPublisher(mock(SystemMetricsPublisher.class));
     rmContext.setRMApplicationHistoryWriter(
         mock(RMApplicationHistoryWriter.class));
@@ -312,12 +315,14 @@ public class TestFifoScheduler {
     FifoScheduler scheduler = new FifoScheduler();
     RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null,
         null, containerTokenSecretManager, nmTokenSecretManager, null, scheduler);
+    AllocationTagsManager ptm = mock(AllocationTagsManager.class);
     rmContext.setSystemMetricsPublisher(mock(SystemMetricsPublisher.class));
     rmContext.setRMApplicationHistoryWriter(mock(RMApplicationHistoryWriter.class));
     ((RMContextImpl) rmContext).setYarnConfiguration(new YarnConfiguration());
     NullRMNodeLabelsManager nlm = new NullRMNodeLabelsManager();
     nlm.init(new Configuration());
     rmContext.setNodeLabelManager(nlm);
+    rmContext.setAllocationTagsManager(ptm);
 
     scheduler.setRMContext(rmContext);
     ((RMContextImpl) rmContext).setScheduler(scheduler);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/49] hadoop git commit: YARN-7555. Support multiple resource types in YARN native services. (wangda)

Posted by as...@apache.org.
YARN-7555. Support multiple resource types in YARN native services. (wangda)

Change-Id: I330e6ee17a73962dcaadd766a3e72d2888681731


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7467e8fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7467e8fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7467e8fe

Branch: refs/heads/YARN-6592
Commit: 7467e8fe5a95230986fed9d748769304af3f2b61
Parents: 8112761
Author: Wangda Tan <wa...@apache.org>
Authored: Fri Dec 29 11:46:30 2017 -0800
Committer: Wangda Tan <wa...@apache.org>
Committed: Fri Dec 29 15:34:08 2017 -0800

----------------------------------------------------------------------
 .../dev-support/findbugs-exclude.xml            |   1 -
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |  18 ++-
 .../dev-support/findbugs-exclude.xml            |   8 ++
 .../hadoop/yarn/service/ServiceScheduler.java   |   7 ++
 .../yarn/service/api/records/Resource.java      |  48 ++++++--
 .../api/records/ResourceInformation.java        | 119 +++++++++++++++++++
 .../yarn/service/component/Component.java       |  35 +++++-
 .../hadoop/yarn/service/MockServiceAM.java      |  31 ++++-
 .../hadoop/yarn/service/TestServiceAM.java      |  63 +++++++++-
 .../yarn/service/conf/TestAppJsonResolve.java   |  17 +++
 .../hadoop/yarn/service/conf/examples/app.json  |  11 +-
 .../markdown/yarn-service/YarnServiceAPI.md     |  15 ++-
 12 files changed, 343 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index de4b0e6..6a10312 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -650,5 +650,4 @@
     <Method name="equals" />
     <Bug pattern="EQ_OVERRIDING_EQUALS_NOT_SYMMETRIC" />
   </Match>
-
 </FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
index 979883c..f142f66 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
@@ -247,7 +247,18 @@ definitions:
       kerberos_principal:
         description: The Kerberos Principal of the service
         $ref: '#/definitions/KerberosPrincipal'
-
+  ResourceInformation:
+    description:
+      ResourceInformation determines unit/value of resource types in addition to memory and vcores. It will be part of Resource object
+    properties:
+      value:
+        type: integer
+        format: int64
+        description: Integer value of the resource.
+      unit:
+        type: string
+        description:
+          Unit of the resource, acceptable values are: p/n/u/m/k/M/G/T/P/Ki/Mi/Gi/Ti/Pi. By default it is empty means no unit
   Resource:
     description:
       Resource determines the amount of resources (vcores, memory, network, etc.) usable by a container. This field determines the resource to be applied for all the containers of a component or service. The resource specified at the service (or global) level can be overriden at the component level. Only one of profile OR cpu & memory are expected. It raises a validation exception otherwise.
@@ -262,6 +273,11 @@ definitions:
       memory:
         type: string
         description: Amount of memory allocated to each container (optional but overrides memory in profile if specified). Currently accepts only an integer value and default unit is in MB.
+      additional:
+        type: object
+        additionalProperties:
+          $ref: '#/definitions/ResourceInformation'
+        description: Map of resource name to ResourceInformation
   PlacementPolicy:
     description: Placement policy of an instance of a service. This feature is in the works in YARN-6592.
     properties:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/dev-support/findbugs-exclude.xml
index 80c04c8..15ce952 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/dev-support/findbugs-exclude.xml
@@ -48,4 +48,12 @@
         <Class name="org.apache.hadoop.yarn.service.ClientAMPolicyProvider"/>
         <Bug pattern="EI_EXPOSE_REP"/>
     </Match>
+    <!-- SE_BAD_FIELD -->
+    <Match>
+      <Class name="org.apache.hadoop.yarn.service.api.records.Resource" />
+      <Or>
+        <Field name="additional"/>
+      </Or>
+      <Bug pattern="SE_BAD_FIELD" />
+  </Match>
 </FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
index 45cdd28..0a4ea07 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
@@ -76,6 +76,7 @@ import org.apache.hadoop.yarn.service.timelineservice.ServiceTimelinePublisher;
 import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
 import org.apache.hadoop.yarn.service.utils.ServiceRegistryUtils;
 import org.apache.hadoop.yarn.util.BoundedAppender;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -278,6 +279,12 @@ public class ServiceScheduler extends CompositeService {
     RegisterApplicationMasterResponse response = amRMClient
         .registerApplicationMaster(bindAddress.getHostName(),
             bindAddress.getPort(), "N/A");
+
+    // Update internal resource types according to response.
+    if (response.getResourceTypes() != null) {
+      ResourceUtils.reinitializeResources(response.getResourceTypes());
+    }
+
     if (response.getClientToAMTokenMasterKey() != null
         && response.getClientToAMTokenMasterKey().remaining() != 0) {
       context.secretManager

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Resource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Resource.java
index 8f682b2..c417ec0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Resource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Resource.java
@@ -17,16 +17,17 @@
 
 package org.apache.hadoop.yarn.service.api.records;
 
-import io.swagger.annotations.ApiModel;
-import io.swagger.annotations.ApiModelProperty;
-
-import java.util.Objects;
-
 import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 import com.fasterxml.jackson.annotation.JsonProperty;
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
+import javax.xml.bind.annotation.XmlElement;
+import java.util.Map;
+import java.util.Objects;
+
 /**
  * Resource determines the amount of resources (vcores, memory, network, etc.)
  * usable by a container. This field determines the resource to be applied for
@@ -46,6 +47,10 @@ public class Resource extends BaseResource implements Cloneable {
   private Integer cpus = 1;
   private String memory = null;
 
+  @JsonProperty("additional")
+  @XmlElement(name = "additional")
+  private Map<String, ResourceInformation> additional = null;
+
   /**
    * Each resource profile has a unique id which is associated with a
    * cluster-level predefined memory, cpus, etc.
@@ -112,6 +117,28 @@ public class Resource extends BaseResource implements Cloneable {
     return Long.parseLong(memory);
   }
 
+  public Resource setResourceInformations(
+      Map<String, ResourceInformation> resourceInformations) {
+    this.additional = resourceInformations;
+    return this;
+  }
+
+  public Resource resourceInformations(
+      Map<String, ResourceInformation> resourceInformations) {
+    this.additional = resourceInformations;
+    return this;
+  }
+
+  /**
+   * Map of resource name to ResourceInformation
+   * @return additional
+   **/
+  @ApiModelProperty(value = "Map of resource name to ResourceInformation")
+  @JsonProperty("additional")
+  public Map<String, ResourceInformation> getAdditional() {
+    return additional;
+  }
+
   @Override
   public boolean equals(java.lang.Object o) {
     if (this == o) {
@@ -121,14 +148,15 @@ public class Resource extends BaseResource implements Cloneable {
       return false;
     }
     Resource resource = (Resource) o;
-    return Objects.equals(this.profile, resource.profile)
-        && Objects.equals(this.cpus, resource.cpus)
-        && Objects.equals(this.memory, resource.memory);
+    return Objects.equals(this.profile, resource.profile) && Objects.equals(
+        this.cpus, resource.cpus) && Objects.equals(this.memory,
+        resource.memory) && Objects.equals(this.additional,
+        resource.additional);
   }
 
   @Override
   public int hashCode() {
-    return Objects.hash(profile, cpus, memory);
+    return Objects.hash(profile, cpus, memory, additional);
   }
 
   @Override
@@ -139,6 +167,8 @@ public class Resource extends BaseResource implements Cloneable {
     sb.append("    profile: ").append(toIndentedString(profile)).append("\n");
     sb.append("    cpus: ").append(toIndentedString(cpus)).append("\n");
     sb.append("    memory: ").append(toIndentedString(memory)).append("\n");
+    sb.append("    additional: ").append(
+        toIndentedString(additional)).append("\n");
     sb.append("}");
     return sb.toString();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java
new file mode 100644
index 0000000..f39b11a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ResourceInformation.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.api.records;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.google.gson.annotations.SerializedName;
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+import java.util.Objects;
+
+/**
+ * ResourceInformation determines unit/name/value of resource types in addition to memory and vcores. It will be part of Resource object
+ */
+@ApiModel(description = "ResourceInformation determines unit/value of resource types in addition to memory and vcores. It will be part of Resource object")
+@javax.annotation.Generated(value = "io.swagger.codegen.languages.JavaClientCodegen",
+                            date = "2017-11-22T15:15:49.495-08:00")
+public class ResourceInformation {
+  @SerializedName("value")
+  private Long value = null;
+
+  @SerializedName("unit")
+  private String unit = null;
+
+  public ResourceInformation value(Long value) {
+    this.value = value;
+    return this;
+  }
+
+  /**
+   * Integer value of the resource.
+   *
+   * @return value
+   **/
+  @ApiModelProperty(value = "Integer value of the resource.")
+  @JsonProperty("value")
+  public Long getValue() {
+    return value;
+  }
+
+  public void setValue(Long value) {
+    this.value = value;
+  }
+
+  public ResourceInformation unit(String unit) {
+    this.unit = unit;
+    return this;
+  }
+
+  /**
+   * @return unit
+   **/
+  @ApiModelProperty(value = "")
+  @JsonProperty("unit")
+  public String getUnit() {
+    return unit == null ? "" : unit;
+  }
+
+  public void setUnit(String unit) {
+    this.unit = unit;
+  }
+
+  @Override
+  public boolean equals(java.lang.Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    ResourceInformation resourceInformation = (ResourceInformation) o;
+    return Objects
+        .equals(this.value, resourceInformation.value) && Objects.equals(
+        this.unit, resourceInformation.unit);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(value, unit);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("class ResourceInformation {\n");
+    sb.append("    value: ").append(toIndentedString(value)).append("\n");
+    sb.append("    unit: ").append(toIndentedString(unit)).append("\n");
+    sb.append("}");
+    return sb.toString();
+  }
+
+  /**
+   * Convert the given object to string with each line indented by 4 spaces
+   * (except the first line).
+   */
+  private String toIndentedString(java.lang.Object o) {
+    if (o == null) {
+      return "null";
+    }
+    return o.toString().replace("\n", "\n    ");
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
index a84c1b1..3090692 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
 import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.service.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceId;
 import org.apache.hadoop.yarn.service.ContainerFailureTracker;
@@ -392,9 +393,37 @@ public class Component implements EventHandler<ComponentEvent> {
 
   @SuppressWarnings({ "unchecked" })
   public void requestContainers(long count) {
-    Resource resource = Resource
-        .newInstance(componentSpec.getResource().calcMemoryMB(),
-            componentSpec.getResource().getCpus());
+    org.apache.hadoop.yarn.service.api.records.Resource componentResource =
+        componentSpec.getResource();
+
+    Resource resource = Resource.newInstance(componentResource.calcMemoryMB(),
+        componentResource.getCpus());
+
+    if (componentResource.getAdditional() != null) {
+      for (Map.Entry<String, ResourceInformation> entry : componentResource
+          .getAdditional().entrySet()) {
+
+        String resourceName = entry.getKey();
+
+        // Avoid setting memory/cpu under "additional"
+        if (resourceName.equals(
+            org.apache.hadoop.yarn.api.records.ResourceInformation.MEMORY_URI)
+            || resourceName.equals(
+            org.apache.hadoop.yarn.api.records.ResourceInformation.VCORES_URI)) {
+          LOG.warn("Please set memory/vcore in the main section of resource, "
+              + "ignoring this entry=" + resourceName);
+          continue;
+        }
+
+        ResourceInformation specInfo = entry.getValue();
+        org.apache.hadoop.yarn.api.records.ResourceInformation ri =
+            org.apache.hadoop.yarn.api.records.ResourceInformation.newInstance(
+                entry.getKey(),
+                specInfo.getUnit(),
+                specInfo.getValue());
+        resource.setResourceInformation(resourceName, ri);
+      }
+    }
 
     for (int i = 0; i < count; i++) {
       //TODO Once YARN-5468 is done, use that for anti-affinity

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
index 37b18fa..3e1582d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
@@ -29,8 +29,16 @@ import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
-
-import org.apache.hadoop.yarn.api.records.*;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.NMClient;
 import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
@@ -47,11 +55,16 @@ import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException;
 import org.apache.hadoop.yarn.service.registry.YarnRegistryViewForProviders;
 import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
 import org.apache.hadoop.yarn.util.Records;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.util.*;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeoutException;
 
@@ -88,7 +101,6 @@ public class MockServiceAM extends ServiceMaster {
     this.service = service;
   }
 
-
   @Override
   protected ContainerId getAMContainerId()
       throws BadClusterStateException {
@@ -185,7 +197,11 @@ public class MockServiceAM extends ServiceMaster {
           @Override
           public RegisterApplicationMasterResponse registerApplicationMaster(
               String appHostName, int appHostPort, String appTrackingUrl) {
-            return mock(RegisterApplicationMasterResponse.class);
+            RegisterApplicationMasterResponse response = mock(
+                RegisterApplicationMasterResponse.class);
+            when(response.getResourceTypes()).thenReturn(
+                ResourceUtils.getResourcesTypeInfo());
+            return response;
           }
 
           @Override public void unregisterApplicationMaster(
@@ -195,8 +211,11 @@ public class MockServiceAM extends ServiceMaster {
           }
         };
 
-        return AMRMClientAsync.createAMRMClientAsync(client1, 1000,
+        AMRMClientAsync<AMRMClient.ContainerRequest> amrmClientAsync =
+            AMRMClientAsync.createAMRMClientAsync(client1, 1000,
                 this.new AMRMClientCallback());
+
+        return amrmClientAsync;
       }
 
       @SuppressWarnings("SuspiciousMethodCalls")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
index 2a3303e..4dc1ebd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
@@ -18,17 +18,25 @@
 
 package org.apache.hadoop.yarn.service;
 
+import com.google.common.collect.ImmutableMap;
 import org.apache.commons.io.FileUtils;
 import org.apache.curator.test.TestingCluster;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ResourceTypeInfo;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.service.api.records.Component;
+import org.apache.hadoop.yarn.service.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.service.api.records.Service;
 import org.apache.hadoop.yarn.service.component.ComponentState;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState;
 import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -38,10 +46,12 @@ import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
 import java.util.concurrent.TimeoutException;
 
-import static org.apache.hadoop.registry.client.api.RegistryConstants
-    .KEY_REGISTRY_ZK_QUORUM;
+import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_REGISTRY_ZK_QUORUM;
 
 public class TestServiceAM extends ServiceTestUtils{
 
@@ -183,12 +193,12 @@ public class TestServiceAM extends ServiceTestUtils{
     am.init(conf);
     am.start();
     Thread.sleep(100);
-    GenericTestUtils.waitFor(() -> am.getComponent(comp1Name).getState().equals(
-        ComponentState.FLEXING), 100, 2000);
+    GenericTestUtils.waitFor(() -> am.getComponent(comp1Name).getState()
+        .equals(ComponentState.FLEXING), 100, 2000);
 
     // 1 pending instance
-    Assert.assertEquals(1,
-        am.getComponent(comp1Name).getPendingInstances().size());
+    Assert.assertEquals(1, am.getComponent(comp1Name).getPendingInstances()
+        .size());
 
     am.feedContainerToComp(exampleApp, 2, comp1Name);
 
@@ -198,6 +208,47 @@ public class TestServiceAM extends ServiceTestUtils{
         org.apache.hadoop.yarn.api.records.ContainerState.RUNNING,
         am.getCompInstance(comp1Name, comp1InstName).getContainerStatus()
             .getState());
+  }
+
+  @Test
+  public void testScheduleWithMultipleResourceTypes()
+      throws TimeoutException, InterruptedException, IOException {
+    ApplicationId applicationId = ApplicationId.newInstance(123456, 1);
+    Service exampleApp = new Service();
+    exampleApp.setId(applicationId.toString());
+    exampleApp.setName("testScheduleWithMultipleResourceTypes");
+
+    List<ResourceTypeInfo> resourceTypeInfos = new ArrayList<>(
+        ResourceUtils.getResourcesTypeInfo());
+    // Add 3rd resource type.
+    resourceTypeInfos.add(ResourceTypeInfo
+        .newInstance("resource-1", "", ResourceTypes.COUNTABLE));
+    // Reinitialize resource types
+    ResourceUtils.reinitializeResources(resourceTypeInfos);
+
+    Component serviceCompoent = createComponent("compa", 1, "pwd");
+    serviceCompoent.getResource().setResourceInformations(ImmutableMap
+        .of("resource-1", new ResourceInformation().value(3333L).unit("Gi")));
+    exampleApp.addComponent(serviceCompoent);
+
+    MockServiceAM am = new MockServiceAM(exampleApp);
+    am.init(conf);
+    am.start();
+
+    ServiceScheduler serviceScheduler = am.context.scheduler;
+    AMRMClientAsync<AMRMClient.ContainerRequest> amrmClientAsync =
+        serviceScheduler.getAmRMClient();
+
+    Collection<AMRMClient.ContainerRequest> rr =
+        amrmClientAsync.getMatchingRequests(0);
+    Assert.assertEquals(1, rr.size());
+
+    org.apache.hadoop.yarn.api.records.Resource capability =
+        rr.iterator().next().getCapability();
+    Assert.assertEquals(3333L, capability.getResourceValue("resource-1"));
+    Assert.assertEquals("Gi",
+        capability.getResourceInformation("resource-1").getUnits());
+
     am.stop();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java
index 18318aa..73f7fa1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.service.conf;
 
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.service.ServiceTestUtils;
+import org.apache.hadoop.yarn.service.api.records.Resource;
 import org.apache.hadoop.yarn.service.api.records.Service;
 import org.apache.hadoop.yarn.service.api.records.ConfigFile;
 import org.apache.hadoop.yarn.service.api.records.Configuration;
@@ -194,6 +195,22 @@ public class TestAppJsonResolve extends Assert {
     assertEquals("60",
         worker.getProperty("yarn.service.failure-count-reset.window"));
 
+    // Validate worker's resources
+    Resource workerResource = orig.getComponent("worker").getResource();
+    Assert.assertEquals(1, workerResource.getCpus().intValue());
+    Assert.assertEquals(1024, workerResource.calcMemoryMB());
+    Assert.assertNotNull(workerResource.getAdditional());
+    Assert.assertEquals(2, workerResource.getAdditional().size());
+    Assert.assertEquals(3333, workerResource.getAdditional().get(
+        "resource-1").getValue().longValue());
+    Assert.assertEquals("Gi", workerResource.getAdditional().get(
+        "resource-1").getUnit());
+
+    Assert.assertEquals(5, workerResource.getAdditional().get(
+        "yarn.io/gpu").getValue().longValue());
+    Assert.assertEquals("", workerResource.getAdditional().get(
+        "yarn.io/gpu").getUnit());
+
     other = orig.getComponent("other").getConfiguration();
     assertEquals(0, other.getProperties().size());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app.json
index 2eb477f..7e56f1e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app.json
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app.json
@@ -37,7 +37,16 @@
       "launch_command": "sleep 3600",
       "resource": {
         "cpus": 1,
-        "memory": "1024"
+        "memory": "1024",
+        "additional": {
+          "resource-1": {
+            "value": 3333,
+            "unit": "Gi"
+          },
+          "yarn.io/gpu": {
+            "value": 5
+          }
+        }
       },
       "configuration": {
         "properties": {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7467e8fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
index e224e5d..65d463d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md
@@ -1,4 +1,4 @@
-<!---
+# <!---
   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at
@@ -330,6 +330,7 @@ Resource determines the amount of resources (vcores, memory, network, etc.) usab
 |profile|Each resource profile has a unique id which is associated with a cluster-level predefined memory, cpus, etc.|false|string||
 |cpus|Amount of vcores allocated to each container (optional but overrides cpus in profile if specified).|false|integer (int32)||
 |memory|Amount of memory allocated to each container (optional but overrides memory in profile if specified). Currently accepts only an integer value and default unit is in MB.|false|string||
+|additional|A map of resource type name to resource type information. Including value (integer), and unit (string). This will be used to specify resource other than cpu and memory. Please refer to example below. |  false | object ||
 
 
 ### Service
@@ -395,8 +396,14 @@ POST URL - http://localhost:8088/ws/v1/services
         "launch_command": "./start_nginx.sh",
         "resource": {
           "cpus": 1,
-          "memory": "256"
-       }
+          "memory": "256",
+          "additional" : {
+            "yarn.io/gpu" : {
+              "value" : 4,
+              "unit" : ""
+            }
+          }     
+        }
       }
     ]
 }
@@ -605,3 +612,5 @@ POST URL - http://localhost:8088:/ws/v1/services/hbase-app-1
   }
 }
 ```
+
+


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/49] hadoop git commit: HADOOP-15133. [JDK9] Ignore com.sun.javadoc.* and com.sun.tools.* in animal-sniffer-maven-plugin to compile with Java 9.

Posted by as...@apache.org.
HADOOP-15133. [JDK9] Ignore com.sun.javadoc.* and com.sun.tools.* in animal-sniffer-maven-plugin to compile with Java 9.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2d8f4ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2d8f4ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2d8f4ae

Branch: refs/heads/YARN-6592
Commit: d2d8f4aeb3e214d1a96eeaf96bbe1e9301824ccd
Parents: 5ab632b
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Dec 21 11:58:34 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu Dec 21 11:58:34 2017 +0900

----------------------------------------------------------------------
 hadoop-project/pom.xml | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2d8f4ae/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index efc8c2d..3c49182 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1569,6 +1569,8 @@
             <ignore>sun.misc.*</ignore>
             <ignore>sun.net.*</ignore>
             <ignore>sun.nio.ch.*</ignore>
+            <ignore>com.sun.javadoc.*</ignore>
+            <ignore>com.sun.tools.*</ignore>
           </ignores>
         </configuration>
       </plugin>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/49] hadoop git commit: Add 2.8.3 release jdiff files.

Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f8caf5/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.8.3.xml
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.8.3.xml b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.8.3.xml
new file mode 100644
index 0000000..e96d018
--- /dev/null
+++ b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_2.8.3.xml
@@ -0,0 +1,27495 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Dec 05 05:47:49 UTC 2017 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop MapReduce Core 2.8.3"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/hadoop-annotations.jar:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/jdiff.jar -verbose -classpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/classes:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.8.3.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.8.3.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/maven/javax/servlet/servlet-ap
 i/2.5/servlet-api-2.5.jar:/maven/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/com/google/inject/guice/3.0/guice-3.0.jar:/maven/javax/inject/javax.inject/1/javax.inject-1.jar:/maven/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.
 jar:/maven/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/maven/org/apache/htrace/htrace-core4/4.0.1-incubating/htrace-core4-4.0.1-incubating.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.8.3.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/javax/servlet/
 jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.8.3.jar:/maven/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/maven/net/jcip/jcip-annotations/1.0/jcip-annotations-1.0.jar:/maven/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/maven/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/or
 g/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/maven/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.8.3.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/maven/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.
 3.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar -sourcepath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java -apidir /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/site/jdiff/xml -apiname Apache Hadoop MapReduce Core 2.8.3 -->
+<package name="org.apache.hadoop.filecache">
+  <!-- start class org.apache.hadoop.filecache.DistributedCache -->
+  <class name="DistributedCache" extends="org.apache.hadoop.mapreduce.filecache.DistributedCache"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="DistributedCache"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="addLocalArchives"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="str" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add a archive that has been localized to the conf.  Used
+ by internal DistributedCache code.
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local archives]]>
+      </doc>
+    </method>
+    <method name="addLocalFiles"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="str" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add a file that has been localized to the conf..  Used
+ by internal DistributedCache code.
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local files]]>
+      </doc>
+    </method>
+    <method name="createAllSymlink"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Internal to MapReduce framework.  Use DistributedCacheManager
+ instead.">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="jobCacheDir" type="java.io.File"/>
+      <param name="workDir" type="java.io.File"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method create symlinks for all files in a given dir in another
+ directory. Currently symlinks cannot be disabled. This is a NO-OP.
+
+ @param conf the configuration
+ @param jobCacheDir the target directory for creating symlinks
+ @param workDir the directory in which the symlinks are created
+ @throws IOException
+ @deprecated Internal to MapReduce framework.  Use DistributedCacheManager
+ instead.]]>
+      </doc>
+    </method>
+    <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="cache" type="java.net.URI"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns {@link FileStatus} of a given cache file on hdfs. Internal to
+ MapReduce.
+ @param conf configuration
+ @param cache cache file
+ @return <code>FileStatus</code> of a given cache file on hdfs
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getTimestamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="cache" type="java.net.URI"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns mtime of a given cache file on hdfs. Internal to MapReduce.
+ @param conf configuration
+ @param cache cache file
+ @return mtime of a given cache file on hdfs
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setArchiveTimestamps"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="timestamps" type="java.lang.String"/>
+      <doc>
+      <![CDATA[This is to check the timestamp of the archives to be localized.
+ Used by internal MapReduce code.
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of archives.
+ The order should be the same as the order in which the archives are added.]]>
+      </doc>
+    </method>
+    <method name="setFileTimestamps"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="timestamps" type="java.lang.String"/>
+      <doc>
+      <![CDATA[This is to check the timestamp of the files to be localized.
+ Used by internal MapReduce code.
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of files.
+ The order should be the same as the order in which the files are added.]]>
+      </doc>
+    </method>
+    <method name="setLocalArchives"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="str" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the conf to contain the location for localized archives.  Used
+ by internal DistributedCache code.
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local archives]]>
+      </doc>
+    </method>
+    <method name="setLocalFiles"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="str" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the conf to contain the location for localized files.  Used
+ by internal DistributedCache code.
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local files]]>
+      </doc>
+    </method>
+    <field name="CACHE_FILES_SIZES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_FILES_SIZES} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_FILES_SIZES}]]>
+      </doc>
+    </field>
+    <field name="CACHE_ARCHIVES_SIZES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_ARCHIVES_SIZES} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_ARCHIVES_SIZES}]]>
+      </doc>
+    </field>
+    <field name="CACHE_ARCHIVES_TIMESTAMPS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_ARCHIVES_TIMESTAMPS} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_ARCHIVES_TIMESTAMPS}]]>
+      </doc>
+    </field>
+    <field name="CACHE_FILES_TIMESTAMPS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_FILES_TIMESTAMPS} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_FILE_TIMESTAMPS}]]>
+      </doc>
+    </field>
+    <field name="CACHE_ARCHIVES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_ARCHIVES} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_ARCHIVES}]]>
+      </doc>
+    </field>
+    <field name="CACHE_FILES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_FILES} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_FILES}]]>
+      </doc>
+    </field>
+    <field name="CACHE_LOCALARCHIVES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_LOCALARCHIVES} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_LOCALARCHIVES}]]>
+      </doc>
+    </field>
+    <field name="CACHE_LOCALFILES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_LOCALFILES} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_LOCALFILES}]]>
+      </doc>
+    </field>
+    <field name="CACHE_SYMLINK" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_SYMLINK} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_SYMLINK}]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[Distribute application-specific large, read-only files efficiently.
+ 
+ <p><code>DistributedCache</code> is a facility provided by the Map-Reduce
+ framework to cache files (text, archives, jars etc.) needed by applications.
+ </p>
+ 
+ <p>Applications specify the files, via urls (hdfs:// or http://) to be cached 
+ via the {@link org.apache.hadoop.mapred.JobConf}. The
+ <code>DistributedCache</code> assumes that the files specified via urls are
+ already present on the {@link FileSystem} at the path specified by the url
+ and are accessible by every machine in the cluster.</p>
+ 
+ <p>The framework will copy the necessary files on to the slave node before 
+ any tasks for the job are executed on that node. Its efficiency stems from 
+ the fact that the files are only copied once per job and the ability to 
+ cache archives which are un-archived on the slaves.</p> 
+
+ <p><code>DistributedCache</code> can be used to distribute simple, read-only
+ data/text files and/or more complex types such as archives, jars etc. 
+ Archives (zip, tar and tgz/tar.gz files) are un-archived at the slave nodes. 
+ Jars may be optionally added to the classpath of the tasks, a rudimentary 
+ software distribution mechanism.  Files have execution permissions.
+ In older version of Hadoop Map/Reduce users could optionally ask for symlinks
+ to be created in the working directory of the child task.  In the current 
+ version symlinks are always created.  If the URL does not have a fragment 
+ the name of the file or directory will be used. If multiple files or 
+ directories map to the same link name, the last one added, will be used.  All
+ others will not even be downloaded.</p>
+ 
+ <p><code>DistributedCache</code> tracks modification timestamps of the cache 
+ files. Clearly the cache files should not be modified by the application 
+ or externally while the job is executing.</p>
+ 
+ <p>Here is an illustrative example on how to use the 
+ <code>DistributedCache</code>:</p>
+ <p><blockquote><pre>
+     // Setting up the cache for the application
+     
+     1. Copy the requisite files to the <code>FileSystem</code>:
+     
+     $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat  
+     $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip  
+     $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar
+     $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar
+     $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz
+     $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz
+     
+     2. Setup the application's <code>JobConf</code>:
+     
+     JobConf job = new JobConf();
+     DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"), 
+                                   job);
+     DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job);
+     DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job);
+     DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar", job);
+     DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz", job);
+     DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz", job);
+     
+     3. Use the cached files in the {@link org.apache.hadoop.mapred.Mapper}
+     or {@link org.apache.hadoop.mapred.Reducer}:
+     
+     public static class MapClass extends MapReduceBase  
+     implements Mapper&lt;K, V, K, V&gt; {
+     
+       private Path[] localArchives;
+       private Path[] localFiles;
+       
+       public void configure(JobConf job) {
+         // Get the cached archives/files
+         File f = new File("./map.zip/some/file/in/zip.txt");
+       }
+       
+       public void map(K key, V value, 
+                       OutputCollector&lt;K, V&gt; output, Reporter reporter) 
+       throws IOException {
+         // Use data from the cached archives/files here
+         // ...
+         // ...
+         output.collect(k, v);
+       }
+     }
+     
+ </pre></blockquote>
+
+ It is also very common to use the DistributedCache by using
+ {@link org.apache.hadoop.util.GenericOptionsParser}.
+
+ This class includes methods that should be used by users
+ (specifically those mentioned in the example above, as well
+ as {@link DistributedCache#addArchiveToClassPath(Path, Configuration)}),
+ as well as methods intended for use by the MapReduce framework
+ (e.g., {@link org.apache.hadoop.mapred.JobClient}).
+
+ @see org.apache.hadoop.mapred.JobConf
+ @see org.apache.hadoop.mapred.JobClient
+ @see org.apache.hadoop.mapreduce.Job]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.filecache.DistributedCache -->
+</package>
+<package name="org.apache.hadoop.mapred">
+  <!-- start class org.apache.hadoop.mapred.ClusterStatus -->
+  <class name="ClusterStatus" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <method name="getTaskTrackers" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of task trackers in the cluster.
+ 
+ @return the number of task trackers in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getActiveTrackerNames" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the names of task trackers in the cluster.
+ 
+ @return the active task trackers in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getBlacklistedTrackerNames" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the names of task trackers in the cluster.
+ 
+ @return the blacklisted task trackers in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getGraylistedTrackerNames" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the names of graylisted task trackers in the cluster.
+
+ The gray list of trackers is no longer available on M/R 2.x. The function
+ is kept to be compatible with M/R 1.x applications.
+
+ @return an empty graylisted task trackers in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getGraylistedTrackers" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of graylisted task trackers in the cluster.
+
+ The gray list of trackers is no longer available on M/R 2.x. The function
+ is kept to be compatible with M/R 1.x applications.
+
+ @return 0 graylisted task trackers in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getBlacklistedTrackers" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of blacklisted task trackers in the cluster.
+ 
+ @return the number of blacklisted task trackers in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getNumExcludedNodes" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of excluded hosts in the cluster.
+ @return the number of excluded hosts in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getTTExpiryInterval" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the tasktracker expiry interval for the cluster
+ @return the expiry interval in msec]]>
+      </doc>
+    </method>
+    <method name="getMapTasks" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of currently running map tasks in the cluster.
+ 
+ @return the number of currently running map tasks in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getReduceTasks" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of currently running reduce tasks in the cluster.
+ 
+ @return the number of currently running reduce tasks in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getMaxMapTasks" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the maximum capacity for running map tasks in the cluster.
+ 
+ @return the maximum capacity for running map tasks in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getMaxReduceTasks" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the maximum capacity for running reduce tasks in the cluster.
+ 
+ @return the maximum capacity for running reduce tasks in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getJobTrackerStatus" return="org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the JobTracker's status.
+ 
+ @return {@link JobTrackerStatus} of the JobTracker]]>
+      </doc>
+    </method>
+    <method name="getMaxMemory" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns UNINITIALIZED_MEMORY_VALUE (-1)]]>
+      </doc>
+    </method>
+    <method name="getUsedMemory" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns UNINITIALIZED_MEMORY_VALUE (-1)]]>
+      </doc>
+    </method>
+    <method name="getBlackListedTrackersInfo" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Gets the list of blacklisted trackers along with reasons for blacklisting.
+ 
+ @return the collection of {@link BlackListInfo} objects.]]>
+      </doc>
+    </method>
+    <method name="getJobTrackerState" return="org.apache.hadoop.mapred.JobTracker.State"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the current state of the <code>JobTracker</code>,
+ as {@link JobTracker.State}
+
+ {@link JobTracker.State} should no longer be used on M/R 2.x. The function
+ is kept to be compatible with M/R 1.x applications.
+
+ @return the invalid state of the <code>JobTracker</code>.]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="UNINITIALIZED_MEMORY_VALUE" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Status information on the current state of the Map-Reduce cluster.
+ 
+ <p><code>ClusterStatus</code> provides clients with information such as:
+ <ol>
+   <li>
+   Size of the cluster. 
+   </li>
+   <li>
+   Name of the trackers. 
+   </li>
+   <li>
+   Task capacity of the cluster. 
+   </li>
+   <li>
+   The number of currently running map and reduce tasks.
+   </li>
+   <li>
+   State of the <code>JobTracker</code>.
+   </li>
+   <li>
+   Details regarding black listed trackers.
+   </li>
+ </ol>
+ 
+ <p>Clients can query for the latest <code>ClusterStatus</code>, via 
+ {@link JobClient#getClusterStatus()}.</p>
+ 
+ @see JobClient]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.ClusterStatus -->
+  <!-- start class org.apache.hadoop.mapred.Counters -->
+  <class name="Counters" extends="org.apache.hadoop.mapreduce.counters.AbstractCounters"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Counters"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="Counters" type="org.apache.hadoop.mapreduce.Counters"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getGroup" return="org.apache.hadoop.mapred.Counters.Group"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="groupName" type="java.lang.String"/>
+    </method>
+    <method name="getGroupNames" return="java.util.Collection"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="makeCompactString" return="java.lang.String"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="group" type="java.lang.String"/>
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use {@link #findCounter(String, String)} instead">
+      <param name="group" type="java.lang.String"/>
+      <param name="id" type="int"/>
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Find a counter by using strings
+ @param group the name of the group
+ @param id the id of the counter within the group (0 to N-1)
+ @param name the internal name of the counter
+ @return the counter for that name
+ @deprecated use {@link #findCounter(String, String)} instead]]>
+      </doc>
+    </method>
+    <method name="incrCounter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Enum"/>
+      <param name="amount" type="long"/>
+      <doc>
+      <![CDATA[Increments the specified counter by the specified amount, creating it if
+ it didn't already exist.
+ @param key identifies a counter
+ @param amount amount by which counter is to be incremented]]>
+      </doc>
+    </method>
+    <method name="incrCounter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="group" type="java.lang.String"/>
+      <param name="counter" type="java.lang.String"/>
+      <param name="amount" type="long"/>
+      <doc>
+      <![CDATA[Increments the specified counter by the specified amount, creating it if
+ it didn't already exist.
+ @param group the name of the group
+ @param counter the internal name of the counter
+ @param amount amount by which counter is to be incremented]]>
+      </doc>
+    </method>
+    <method name="getCounter" return="long"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Enum"/>
+      <doc>
+      <![CDATA[Returns current value of the specified counter, or 0 if the counter
+ does not exist.
+ @param key the counter enum to lookup
+ @return the counter value or 0 if counter not found]]>
+      </doc>
+    </method>
+    <method name="incrAllCounters"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.mapred.Counters"/>
+      <doc>
+      <![CDATA[Increments multiple counters by their amounts in another Counters
+ instance.
+ @param other the other Counters instance]]>
+      </doc>
+    </method>
+    <method name="size" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use {@link #countCounters()} instead">
+      <doc>
+      <![CDATA[@return the total number of counters
+ @deprecated use {@link #countCounters()} instead]]>
+      </doc>
+    </method>
+    <method name="sum" return="org.apache.hadoop.mapred.Counters"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="a" type="org.apache.hadoop.mapred.Counters"/>
+      <param name="b" type="org.apache.hadoop.mapred.Counters"/>
+      <doc>
+      <![CDATA[Convenience method for computing the sum of two sets of counters.
+ @param a the first counters
+ @param b the second counters
+ @return a new summed counters object]]>
+      </doc>
+    </method>
+    <method name="log"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="log" type="org.apache.commons.logging.Log"/>
+      <doc>
+      <![CDATA[Logs the current counter values.
+ @param log The log to use.]]>
+      </doc>
+    </method>
+    <method name="makeEscapedCompactString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Represent the counter in a textual format that can be converted back to
+ its object form
+ @return the string in the following format
+ {(groupName)(group-displayName)[(counterName)(displayName)(value)][]*}*]]>
+      </doc>
+    </method>
+    <method name="fromEscapedCompactString" return="org.apache.hadoop.mapred.Counters"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="compactString" type="java.lang.String"/>
+      <exception name="ParseException" type="java.text.ParseException"/>
+      <doc>
+      <![CDATA[Convert a stringified (by {@link #makeEscapedCompactString()} counter
+ representation into a counter object.
+ @param compactString to parse
+ @return a new counters object
+ @throws ParseException]]>
+      </doc>
+    </method>
+    <field name="MAX_COUNTER_LIMIT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MAX_GROUP_LIMIT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A set of named counters.
+
+ <p><code>Counters</code> represent global counters, defined either by the
+ Map-Reduce framework or applications. Each <code>Counter</code> can be of
+ any {@link Enum} type.</p>
+
+ <p><code>Counters</code> are bunched into {@link Group}s, each comprising of
+ counters from a particular <code>Enum</code> class.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.Counters -->
+  <!-- start class org.apache.hadoop.mapred.Counters.Counter -->
+  <class name="Counters.Counter" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.Counter"/>
+    <constructor name="Counters.Counter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setDisplayName"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="displayName" type="java.lang.String"/>
+    </method>
+    <method name="getName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDisplayName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getValue" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setValue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="long"/>
+    </method>
+    <method name="increment"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="incr" type="long"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="makeEscapedCompactString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the compact stringified version of the counter in the format
+ [(actual-name)(display-name)(value)]
+ @return the stringified result]]>
+      </doc>
+    </method>
+    <method name="contentEquals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="deprecated, no comment">
+      <param name="counter" type="org.apache.hadoop.mapred.Counters.Counter"/>
+      <doc>
+      <![CDATA[Checks for (content) equality of two (basic) counters
+ @param counter to compare
+ @return true if content equals
+ @deprecated]]>
+      </doc>
+    </method>
+    <method name="getCounter" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the value of the counter]]>
+      </doc>
+    </method>
+    <method name="getUnderlyingCounter" return="org.apache.hadoop.mapreduce.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="genericRight" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A counter record, comprising its name and value.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.Counters.Counter -->
+  <!-- start class org.apache.hadoop.mapred.Counters.Group -->
+  <class name="Counters.Group" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.counters.CounterGroupBase"/>
+    <constructor name="Counters.Group"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getCounter" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counterName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[@param counterName the name of the counter
+ @return the value of the specified counter, or 0 if the counter does
+ not exist.]]>
+      </doc>
+    </method>
+    <method name="makeEscapedCompactString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the compact stringified version of the group in the format
+ {(actual-name)(display-name)(value)[][][]} where [] are compact strings
+ for the counters within.]]>
+      </doc>
+    </method>
+    <method name="getCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use {@link #findCounter(String)} instead">
+      <param name="id" type="int"/>
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the counter for the given id and create it if it doesn't exist.
+ @param id the numeric id of the counter within the group
+ @param name the internal counter name
+ @return the counter
+ @deprecated use {@link #findCounter(String)} instead]]>
+      </doc>
+    </method>
+    <method name="getCounterForName" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the counter for the given name and create it if it doesn't exist.
+ @param name the internal counter name
+ @return the counter]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="iterator" return="java.util.Iterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDisplayName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setDisplayName"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="displayName" type="java.lang.String"/>
+    </method>
+    <method name="addCounter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counter" type="org.apache.hadoop.mapred.Counters.Counter"/>
+    </method>
+    <method name="addCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="displayName" type="java.lang.String"/>
+      <param name="value" type="long"/>
+    </method>
+    <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counterName" type="java.lang.String"/>
+      <param name="displayName" type="java.lang.String"/>
+    </method>
+    <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counterName" type="java.lang.String"/>
+      <param name="create" type="boolean"/>
+    </method>
+    <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counterName" type="java.lang.String"/>
+    </method>
+    <method name="size" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="incrAllCounters"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="rightGroup" type="org.apache.hadoop.mapreduce.counters.CounterGroupBase"/>
+    </method>
+    <method name="getUnderlyingGroup" return="org.apache.hadoop.mapreduce.counters.CounterGroupBase"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="genericRight" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[<code>Group</code> of counters, comprising of counters from a particular
+  counter {@link Enum} class.
+
+  <p><code>Group</code>handles localization of the class name and the
+  counter names.</p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.Counters.Group -->
+  <!-- start class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+  <class name="FileAlreadyExistsException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FileAlreadyExistsException"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileAlreadyExistsException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Used when target file already exists for any operation and 
+ is not configured to be overwritten.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+  <!-- start class org.apache.hadoop.mapred.FileInputFormat -->
+  <class name="FileInputFormat" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.InputFormat"/>
+    <constructor name="FileInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setMinSplitSize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="minSplitSize" type="long"/>
+    </method>
+    <method name="isSplitable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="filename" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Is the given filename splittable? Usually, true, but if the file is
+ stream compressed, it will not be.
+
+ The default implementation in <code>FileInputFormat</code> always returns
+ true. Implementations that may deal with non-splittable files <i>must</i>
+ override this method.
+
+ <code>FileInputFormat</code> implementations can override this and return
+ <code>false</code> to ensure that individual input files are never split-up
+ so that {@link Mapper}s process entire files.
+ 
+ @param fs the file system that the file is on
+ @param filename the file name to check
+ @return is this file splitable?]]>
+      </doc>
+    </method>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setInputPathFilter"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="filter" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set a PathFilter to be applied to the input paths for the map-reduce job.
+
+ @param filter the PathFilter class use for filtering the input paths.]]>
+      </doc>
+    </method>
+    <method name="getInputPathFilter" return="org.apache.hadoop.fs.PathFilter"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Get a PathFilter instance of the filter set for the input paths.
+
+ @return the PathFilter instance set for the job, NULL if none has been set.]]>
+      </doc>
+    </method>
+    <method name="addInputPathRecursively"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="result" type="java.util.List"/>
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="inputFilter" type="org.apache.hadoop.fs.PathFilter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Add files in the input path recursively into the results.
+ @param result
+          The List to store all files.
+ @param fs
+          The FileSystem.
+ @param path
+          The input path.
+ @param inputFilter
+          The input filter that can be used to filter files/dirs. 
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[List input directories.
+ Subclasses may override to, e.g., select only files matching a regular
+ expression. 
+ 
+ @param job the job to list input paths for
+ @return array of FileStatus objects
+ @throws IOException if zero items.]]>
+      </doc>
+    </method>
+    <method name="makeSplit" return="org.apache.hadoop.mapred.FileSplit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <param name="start" type="long"/>
+      <param name="length" type="long"/>
+      <param name="hosts" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[A factory that makes the split for this class. It can be overridden
+ by sub-classes to make sub-types]]>
+      </doc>
+    </method>
+    <method name="makeSplit" return="org.apache.hadoop.mapred.FileSplit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <param name="start" type="long"/>
+      <param name="length" type="long"/>
+      <param name="hosts" type="java.lang.String[]"/>
+      <param name="inMemoryHosts" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[A factory that makes the split for this class. It can be overridden
+ by sub-classes to make sub-types]]>
+      </doc>
+    </method>
+    <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="numSplits" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Splits files returned by {@link #listStatus(JobConf)} when
+ they're too big.]]>
+      </doc>
+    </method>
+    <method name="computeSplitSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="goalSize" type="long"/>
+      <param name="minSize" type="long"/>
+      <param name="blockSize" type="long"/>
+    </method>
+    <method name="getBlockIndex" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="blkLocations" type="org.apache.hadoop.fs.BlockLocation[]"/>
+      <param name="offset" type="long"/>
+    </method>
+    <method name="setInputPaths"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="commaSeparatedPaths" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Sets the given comma separated paths as the list of inputs 
+ for the map-reduce job.
+ 
+ @param conf Configuration of the job
+ @param commaSeparatedPaths Comma separated paths to be set as 
+        the list of inputs for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="addInputPaths"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="commaSeparatedPaths" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add the given comma separated paths to the list of inputs for
+  the map-reduce job.
+ 
+ @param conf The configuration of the job 
+ @param commaSeparatedPaths Comma separated paths to be added to
+        the list of inputs for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="setInputPaths"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="inputPaths" type="org.apache.hadoop.fs.Path[]"/>
+      <doc>
+      <![CDATA[Set the array of {@link Path}s as the list of inputs
+ for the map-reduce job.
+ 
+ @param conf Configuration of the job. 
+ @param inputPaths the {@link Path}s of the input directories/files 
+ for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="addInputPath"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Add a {@link Path} to the list of inputs for the map-reduce job.
+ 
+ @param conf The configuration of the job 
+ @param path {@link Path} to be added to the list of inputs for 
+            the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="getInputPaths" return="org.apache.hadoop.fs.Path[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Get the list of input {@link Path}s for the map-reduce job.
+ 
+ @param conf The configuration of the job 
+ @return the list of input {@link Path}s for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="getSplitHosts" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="blkLocations" type="org.apache.hadoop.fs.BlockLocation[]"/>
+      <param name="offset" type="long"/>
+      <param name="splitSize" type="long"/>
+      <param name="clusterMap" type="org.apache.hadoop.net.NetworkTopology"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This function identifies and returns the hosts that contribute 
+ most for a given split. For calculating the contribution, rack
+ locality is treated on par with host locality, so hosts from racks
+ that contribute the most are preferred over hosts on racks that 
+ contribute less
+ @param blkLocations The list of block locations
+ @param offset 
+ @param splitSize 
+ @return an array of hosts that contribute most to this split
+ @throws IOException]]>
+      </doc>
+    </method>
+    <field name="LOG" type="org.apache.commons.logging.Log"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NUM_INPUT_FILES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="INPUT_DIR_RECURSIVE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A base class for file-based {@link InputFormat}.
+ 
+ <p><code>FileInputFormat</code> is the base class for all file-based 
+ <code>InputFormat</code>s. This provides a generic implementation of
+ {@link #getSplits(JobConf, int)}.
+
+ Implementations of <code>FileInputFormat</code> can also override the
+ {@link #isSplitable(FileSystem, Path)} method to prevent input files
+ from being split-up in certain situations. Implementations that may
+ deal with non-splittable files <i>must</i> override this method, since
+ the default implementation assumes splitting is always possible.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.FileInputFormat -->
+  <!-- start class org.apache.hadoop.mapred.FileOutputCommitter -->
+  <class name="FileOutputCommitter" extends="org.apache.hadoop.mapred.OutputCommitter"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FileOutputCommitter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getWorkPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <param name="outputPath" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setupJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="commitJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="cleanupJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="abortJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+      <param name="runState" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setupTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="commitTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="abortTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="needsTaskCommit" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="isRecoverySupported" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isCommitJobRepeatable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="isRecoverySupported" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="recoverTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="LOG" type="org.apache.commons.logging.Log"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TEMP_DIR_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Temporary directory name]]>
+      </doc>
+    </field>
+    <field name="SUCCEEDED_FILE_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[An {@link OutputCommitter} that commits files specified 
+ in job output directory i.e. ${mapreduce.output.fileoutputformat.outputdir}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.FileOutputCommitter -->
+  <!-- start class org.apache.hadoop.mapred.FileOutputFormat -->
+  <class name="FileOutputFormat" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.OutputFormat"/>
+    <constructor name="FileOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setCompressOutput"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="compress" type="boolean"/>
+      <doc>
+      <![CDATA[Set whether the output of the job is compressed.
+ @param conf the {@link JobConf} to modify
+ @param compress should the output of the job be compressed?]]>
+      </doc>
+    </method>
+    <method name="getCompressOutput" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Is the job output compressed?
+ @param conf the {@link JobConf} to look in
+ @return <code>true</code> if the job output should be compressed,
+         <code>false</code> otherwise]]>
+      </doc>
+    </method>
+    <method name="setOutputCompressorClass"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="codecClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the {@link CompressionCodec} to be used to compress job outputs.
+ @param conf the {@link JobConf} to modify
+ @param codecClass the {@link CompressionCodec} to be used to
+                   compress the job outputs]]>
+      </doc>
+    </method>
+    <method name="getOutputCompressorClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="defaultValue" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Get the {@link CompressionCodec} for compressing the job outputs.
+ @param conf the {@link JobConf} to look in
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} to be used to compress the 
+         job outputs
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+      </doc>
+    </method>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="checkOutputSpecs"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.mapred.FileAlreadyExistsException"/>
+      <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setOutputPath"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="outputDir" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Set the {@link Path} of the output directory for the map-reduce job.
+
+ @param conf The configuration of the job.
+ @param outputDir the {@link Path} of the output directory for 
+ the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Get the {@link Path} to the output directory for the map-reduce job.
+ 
+ @return the {@link Path} to the output directory for the map-reduce job.
+ @see FileOutputFormat#getWorkOutputPath(JobConf)]]>
+      </doc>
+    </method>
+    <method name="getWorkOutputPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Get the {@link Path} to the task's temporary output directory 
+  for the map-reduce job
+  
+ <b id="SideEffectFiles">Tasks' Side-Effect Files</b>
+ 
+ <p><i>Note:</i> The following is valid only if the {@link OutputCommitter}
+  is {@link FileOutputCommitter}. If <code>OutputCommitter</code> is not 
+  a <code>FileOutputCommitter</code>, the task's temporary output
+  directory is same as {@link #getOutputPath(JobConf)} i.e.
+  <tt>${mapreduce.output.fileoutputformat.outputdir}$</tt></p>
+  
+ <p>Some applications need to create/write-to side-files, which differ from
+ the actual job-outputs.
+ 
+ <p>In such cases there could be issues with 2 instances of the same TIP 
+ (running simultaneously e.g. speculative tasks) trying to open/write-to the
+ same file (path) on HDFS. Hence the application-writer will have to pick 
+ unique names per task-attempt (e.g. using the attemptid, say 
+ <tt>attempt_200709221812_0001_m_000000_0</tt>), not just per TIP.</p> 
+ 
+ <p>To get around this the Map-Reduce framework helps the application-writer 
+ out by maintaining a special 
+ <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt> 
+ sub-directory for each task-attempt on HDFS where the output of the 
+ task-attempt goes. On successful completion of the task-attempt the files 
+ in the <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt> (only) 
+ are <i>promoted</i> to <tt>${mapreduce.output.fileoutputformat.outputdir}</tt>. Of course, the 
+ framework discards the sub-directory of unsuccessful task-attempts. This 
+ is completely transparent to the application.</p>
+ 
+ <p>The application-writer can take advantage of this by creating any 
+ side-files required in <tt>${mapreduce.task.output.dir}</tt> during execution 
+ of his reduce-task i.e. via {@link #getWorkOutputPath(JobConf)}, and the 
+ framework will move them out similarly - thus she doesn't have to pick 
+ unique paths per task-attempt.</p>
+ 
+ <p><i>Note</i>: the value of <tt>${mapreduce.task.output.dir}</tt> during 
+ execution of a particular task-attempt is actually 
+ <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_{$taskid}</tt>, and this value is 
+ set by the map-reduce framework. So, just create any side-files in the 
+ path  returned by {@link #getWorkOutputPath(JobConf)} from map/reduce 
+ task to take advantage of this feature.</p>
+ 
+ <p>The entire discussion holds true for maps of jobs with 
+ reducer=NONE (i.e. 0 reduces) since output of the map, in that case, 
+ goes directly to HDFS.</p> 
+ 
+ @return the {@link Path} to the task's temporary output directory 
+ for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="getTaskOutputPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Helper function to create the task's temporary output directory and 
+ return the path to the task's output file.
+ 
+ @param conf job-configuration
+ @param name temporary task-output filename
+ @return path to the task's temporary output file
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getUniqueName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Helper function to generate a name that is unique for the task.
+
+ <p>The generated name can be used to create custom files from within the
+ different tasks for the job, the names for different tasks will not collide
+ with each other.</p>
+
+ <p>The given name is postfixed with the task type, 'm' for maps, 'r' for
+ reduces and the task partition number. For example, give a name 'test'
+ running on the first map o the job the generated name will be
+ 'test-m-00000'.</p>
+
+ @param conf the configuration for the job.
+ @param name the name to make unique.
+ @return a unique name accross all tasks of the job.]]>
+      </doc>
+    </method>
+    <method name="getPathForCustomFile" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Helper function to generate a {@link Path} for a file that is unique for
+ the task within the job output directory.
+
+ <p>The path can be used to create custom files from within the map and
+ reduce tasks. The path name will be unique for each task. The path parent
+ will be the job output directory.</p>ls
+
+ <p>This method uses the {@link #getUniqueName} method to make the file name
+ unique for the task.</p>
+
+ @param conf the configuration for the job.
+ @param name the name for the file.
+ @return a unique path accross all tasks of the job.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A base class for {@link OutputFormat}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.FileOutputFormat -->
+  <!-- start class org.apache.hadoop.mapred.FileSplit -->
+  <class name="FileSplit" extends="org.apache.hadoop.mapreduce.InputSplit"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.InputSplitWithLocationInfo"/>
+    <constructor name="FileSplit"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, org.apache.hadoop.mapred.JobConf"
+      static="false" final="false" visibility="public"
+      deprecated="deprecated, no comment">
+      <doc>
+      <![CDATA[Constructs a split.
+ @deprecated
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process]]>
+      </doc>
+    </constructor>
+    <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, java.lang.String[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a split with host information
+
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process
+ @param hosts the list of hosts containing the block, possibly null]]>
+      </doc>
+    </constructor>
+    <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, java.lang.String[], java.lang.String[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a split with host information
+
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process
+ @param hosts the list of hosts containing the block, possibly null
+ @param inMemoryHosts the list of hosts containing the block in memory]]>
+      </doc>
+    </constructor>
+    <constructor name="FileSplit" type="org.apache.hadoop.mapreduce.lib.input.FileSplit"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The file containing this split's data.]]>
+      </doc>
+    </method>
+    <method name="getStart" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The position of the first byte in the file to process.]]>
+      </doc>
+    </method>
+    <method name="getLength" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The number of bytes in the file to process.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getLocations" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getLocationInfo" return="org.apache.hadoop.mapred.SplitLocationInfo[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[A section of an input file.  Returned by {@link
+ InputFormat#getSplits(JobConf, int)} and passed to
+ {@link InputFormat#getRecordReader(InputSplit,JobConf,Reporter)}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.FileSplit -->
+  <!-- start class org.apache.hadoop.mapred.FixedLengthInputFormat -->
+  <class name="FixedLengthInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+    <constructor name="FixedLengthInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setRecordLength"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="recordLength" type="int"/>
+      <doc>
+      <![CDATA[Set the length of each record
+ @param conf configuration
+ @param recordLength the length of a record]]>
+      </doc>
+    </method>
+    <method name="getRecordLength" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get record length value
+ @param conf configuration
+ @return the record length, zero means none was set]]>
+      </doc>
+    </method>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+    </method>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="isSplitable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <field name="FIXED_RECORD_LENGTH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[FixedLengthInputFormat is an input format used to read input files
+ which contain fixed length records.  The content of a record need not be
+ text.  It can be arbitrary binary data.  Users must configure the record
+ length property by calling:
+ FixedLengthInputFormat.setRecordLength(conf, recordLength);<br><br> or
+ conf.setInt(FixedLengthInputFormat.FIXED_RECORD_LENGTH, recordLength);
+ <br><br>
+ @see FixedLengthRecordReader]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.FixedLengthInputFormat -->
+  <!-- start class org.apache.hadoop.mapred.ID -->
+  <class name="ID" extends="org.apache.hadoop.mapreduce.ID"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ID" type="int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[constructs an ID object from the given int]]>
+      </doc>
+    </constructor>
+    <constructor name="ID"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </const

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/49] hadoop git commit: HDFS-12949. Fix findbugs warning in ImageWriter.java.

Posted by as...@apache.org.
HDFS-12949. Fix findbugs warning in ImageWriter.java.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ab632ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ab632ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ab632ba

Branch: refs/heads/YARN-6592
Commit: 5ab632baf52f0ecc737845051b382f68bf1385bb
Parents: 382215c
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Dec 21 10:04:34 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu Dec 21 10:04:34 2017 +0900

----------------------------------------------------------------------
 hadoop-tools/hadoop-fs2img/pom.xml | 10 ++++++++++
 1 file changed, 10 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ab632ba/hadoop-tools/hadoop-fs2img/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/pom.xml b/hadoop-tools/hadoop-fs2img/pom.xml
index 2e3e66a..1ae17dc 100644
--- a/hadoop-tools/hadoop-fs2img/pom.xml
+++ b/hadoop-tools/hadoop-fs2img/pom.xml
@@ -87,6 +87,16 @@
          </archive>
         </configuration>
        </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+        <configuration>
+          <findbugsXmlOutput>true</findbugsXmlOutput>
+          <xmlOutput>true</xmlOutput>
+          <excludeFilterFile>${basedir}/dev-support/findbugs-exclude.xml</excludeFilterFile>
+          <effort>Max</effort>
+        </configuration>
+      </plugin>
     </plugins>
   </build>
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/49] hadoop git commit: YARN-7670. [Addendum patch] Including some unstaged changes.

Posted by as...@apache.org.
YARN-7670. [Addendum patch] Including some unstaged changes.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6181cd8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6181cd8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6181cd8

Branch: refs/heads/YARN-6592
Commit: b6181cd8d5d475502c0632386c9ad1fa9b637ad0
Parents: defb138
Author: Arun Suresh <as...@apache.org>
Authored: Wed Dec 20 15:57:10 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Fri Dec 29 17:29:15 2017 -0800

----------------------------------------------------------------------
 .../scheduler/capacity/CapacityScheduler.java            |  2 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java          | 11 +++++++----
 2 files changed, 8 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6181cd8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 84273de..d92ce58 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -2672,7 +2672,7 @@ public class CapacityScheduler extends
       // proposal might be outdated if AM failover just finished
       // and proposal queue was not be consumed in time
       if (app != null && attemptId.equals(app.getApplicationAttemptId())) {
-        if (app.accept(cluster, request)) {
+        if (app.accept(cluster, request, updatePending)) {
           app.apply(cluster, request, updatePending);
           LOG.info("Allocation proposal accepted");
           isSuccess = true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6181cd8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 9fda1f4..12567b4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -375,7 +375,8 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
   }
 
   public boolean accept(Resource cluster,
-      ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode> request) {
+      ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode> request,
+      boolean checkPending) {
     ContainerRequest containerRequest = null;
     boolean reReservation = false;
 
@@ -408,9 +409,11 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
               schedulerContainer.getRmContainer().getContainerRequest();
 
           // Check pending resource request
-          if (!appSchedulingInfo.checkAllocation(allocation.getAllocationLocalityType(),
-              schedulerContainer.getSchedulerNode(),
-              schedulerContainer.getSchedulerRequestKey())) {
+          if (checkPending &&
+              !appSchedulingInfo.checkAllocation(
+                  allocation.getAllocationLocalityType(),
+                  schedulerContainer.getSchedulerNode(),
+                  schedulerContainer.getSchedulerRequestKey())) {
             if (LOG.isDebugEnabled()) {
               LOG.debug("No pending resource for: nodeType=" + allocation
                   .getAllocationLocalityType() + ", node=" + schedulerContainer


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/49] hadoop git commit: HDFS-12930. Remove the extra space in HdfsImageViewer.md. Contributed by Rahul Pathak.

Posted by as...@apache.org.
HDFS-12930. Remove the extra space in HdfsImageViewer.md. Contributed by Rahul Pathak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25a36b74
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25a36b74
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25a36b74

Branch: refs/heads/YARN-6592
Commit: 25a36b74528678f56c63be643c76d819d6f07840
Parents: c7499f2
Author: Yiqun Lin <yq...@apache.org>
Authored: Tue Dec 19 11:23:16 2017 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Tue Dec 19 11:23:16 2017 +0800

----------------------------------------------------------------------
 .../hadoop-hdfs/src/site/markdown/HdfsImageViewer.md               | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25a36b74/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
index 9baadc0..bd3a797 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
@@ -99,7 +99,7 @@ The Web processor now supports the following operations:
 * [GETACLSTATUS](./WebHDFS.html#Get_ACL_Status)
 * [GETXATTRS](./WebHDFS.html#Get_an_XAttr)
 * [LISTXATTRS](./WebHDFS.html#List_all_XAttrs)
-* [CONTENTSUMMARY] (./WebHDFS.html#Get_Content_Summary_of_a_Directory)
+* [CONTENTSUMMARY](./WebHDFS.html#Get_Content_Summary_of_a_Directory)
 
 ### XML Processor
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/49] hadoop git commit: HADOOP-15149. CryptoOutputStream should implement StreamCapabilities.

Posted by as...@apache.org.
HADOOP-15149. CryptoOutputStream should implement StreamCapabilities.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81127616
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81127616
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81127616

Branch: refs/heads/YARN-6592
Commit: 81127616c571b7cd25686e60a1105f96ca0626b7
Parents: b82049b
Author: Xiao Chen <xi...@apache.org>
Authored: Fri Dec 29 13:40:42 2017 -0800
Committer: Xiao Chen <xi...@apache.org>
Committed: Fri Dec 29 13:41:15 2017 -0800

----------------------------------------------------------------------
 .../hadoop/crypto/CryptoOutputStream.java       | 11 ++++-
 .../hadoop/crypto/CryptoStreamsTestBase.java    |  4 +-
 .../apache/hadoop/crypto/TestCryptoStreams.java | 47 +++++++++++++++++++-
 3 files changed, 57 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81127616/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
index 9fb0ff6..2f347c5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
@@ -26,6 +26,7 @@ import java.security.GeneralSecurityException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CanSetDropBehind;
+import org.apache.hadoop.fs.StreamCapabilities;
 import org.apache.hadoop.fs.Syncable;
 
 import com.google.common.base.Preconditions;
@@ -47,7 +48,7 @@ import com.google.common.base.Preconditions;
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class CryptoOutputStream extends FilterOutputStream implements 
-    Syncable, CanSetDropBehind {
+    Syncable, CanSetDropBehind, StreamCapabilities {
   private final byte[] oneByteBuf = new byte[1];
   private final CryptoCodec codec;
   private final Encryptor encryptor;
@@ -304,4 +305,12 @@ public class CryptoOutputStream extends FilterOutputStream implements
     CryptoStreamUtils.freeDB(inBuffer);
     CryptoStreamUtils.freeDB(outBuffer);
   }
+
+  @Override
+  public boolean hasCapability(String capability) {
+    if (out instanceof StreamCapabilities) {
+      return ((StreamCapabilities) out).hasCapability(capability);
+    }
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81127616/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
index 259383d..a0eb105 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
@@ -50,9 +50,9 @@ public abstract class CryptoStreamsTestBase {
       CryptoStreamsTestBase.class);
 
   protected static CryptoCodec codec;
-  private static final byte[] key = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 
+  protected static final byte[] key = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
     0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16};
-  private static final byte[] iv = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 
+  protected static final byte[] iv = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
     0x07, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08};
   
   protected static final int count = 10000;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81127616/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
index 027ac93..2172d8a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
@@ -42,6 +42,10 @@ import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 public class TestCryptoStreams extends CryptoStreamsTestBase {
   /**
@@ -91,7 +95,7 @@ public class TestCryptoStreams extends CryptoStreamsTestBase {
   }
   
   private class FakeOutputStream extends OutputStream 
-      implements Syncable, CanSetDropBehind{
+      implements Syncable, CanSetDropBehind, StreamCapabilities{
     private final byte[] oneByteBuf = new byte[1];
     private final DataOutputBuffer out;
     private boolean closed;
@@ -153,7 +157,19 @@ public class TestCryptoStreams extends CryptoStreamsTestBase {
       checkStream();
       flush();
     }
-    
+
+    @Override
+    public boolean hasCapability(String capability) {
+      switch (capability.toLowerCase()) {
+      case StreamCapabilities.HFLUSH:
+      case StreamCapabilities.HSYNC:
+      case StreamCapabilities.DROPBEHIND:
+        return true;
+      default:
+        return false;
+      }
+    }
+
     private void checkStream() throws IOException {
       if (closed) {
         throw new IOException("Stream is closed!");
@@ -393,4 +409,31 @@ public class TestCryptoStreams extends CryptoStreamsTestBase {
       return ( ret <= 0 ) ? -1 : (oneByteBuf[0] & 0xff);
     }
   }
+
+  /**
+   * This tests {@link StreamCapabilities#hasCapability(String)} for the
+   * the underlying streams.
+   */
+  @Test(timeout = 120000)
+  public void testHasCapability() throws Exception {
+    // verify hasCapability returns what FakeOutputStream is set up for
+    CryptoOutputStream cos =
+        (CryptoOutputStream) getOutputStream(defaultBufferSize, key, iv);
+    assertTrue(cos instanceof StreamCapabilities);
+    assertTrue(cos.hasCapability(StreamCapabilities.HFLUSH));
+    assertTrue(cos.hasCapability(StreamCapabilities.HSYNC));
+    assertTrue(cos.hasCapability(StreamCapabilities.DROPBEHIND));
+    assertFalse(cos.hasCapability(StreamCapabilities.READAHEAD));
+    assertFalse(cos.hasCapability(StreamCapabilities.UNBUFFER));
+
+    // verify hasCapability for input stream
+    CryptoInputStream cis =
+        (CryptoInputStream) getInputStream(defaultBufferSize, key, iv);
+    assertTrue(cis instanceof StreamCapabilities);
+    assertTrue(cis.hasCapability(StreamCapabilities.DROPBEHIND));
+    assertTrue(cis.hasCapability(StreamCapabilities.READAHEAD));
+    assertTrue(cis.hasCapability(StreamCapabilities.UNBUFFER));
+    assertFalse(cis.hasCapability(StreamCapabilities.HFLUSH));
+    assertFalse(cis.hasCapability(StreamCapabilities.HSYNC));
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/49] hadoop git commit: Add 2.8.3 release jdiff files.

Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f8caf5/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.8.3.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.8.3.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.8.3.xml
new file mode 100644
index 0000000..f3191e4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.8.3.xml
@@ -0,0 +1,829 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Dec 05 05:32:45 UTC 2017 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop YARN Server Common 2.8.3"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/hadoop-annotations.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/jdiff.jar -verbose -classpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/classes:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.8.3.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/commons-n
 et/commons-net/3.1/commons-net-3.1.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/c
 ommons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/maven/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.8.3.jar:/maven/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/maven/net/jcip/jcip-anno
 tations/1.0/jcip-annotations-1.0.jar:/maven/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/maven/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/htrace/htrace-core4/4.0.1-incubating/htrace-core4-4.0.1-incubating.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/build/source/hadoo
 p-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.8.3.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-2.8.3.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/maven/com/google/inject/guice/3.0/guice-3.0.jar:/maven/javax/inject/javax.inject/1/javax.inject-1.jar:/maven/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/maven/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/build/source/ha
 doop-common-project/hadoop-annotations/target/hadoop-annotations-2.8.3.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar -sourcepath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java -apidir /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/site/jdiff/xml -apiname Apache Hadoop YARN Server Common 2.8.3 -->
+<package name="org.apache.hadoop.yarn.server">
+</package>
+<package name="org.apache.hadoop.yarn.server.api">
+</package>
+<package name="org.apache.hadoop.yarn.server.api.impl.pb.client">
+</package>
+<package name="org.apache.hadoop.yarn.server.api.impl.pb.service">
+</package>
+<package name="org.apache.hadoop.yarn.server.api.records">
+  <!-- start class org.apache.hadoop.yarn.server.api.records.NodeHealthStatus -->
+  <class name="NodeHealthStatus" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NodeHealthStatus"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getIsNodeHealthy" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Is the node healthy?
+ @return <code>true</code> if the node is healthy, else <code>false</code>]]>
+      </doc>
+    </method>
+    <method name="getHealthReport" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>diagnostic health report</em> of the node.
+ @return <em>diagnostic health report</em> of the node]]>
+      </doc>
+    </method>
+    <method name="getLastHealthReportTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>last timestamp</em> at which the health report was received.
+ @return <em>last timestamp</em> at which the health report was received]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@code NodeHealthStatus} is a summary of the health status of the node.
+ <p>
+ It includes information such as:
+ <ul>
+   <li>
+     An indicator of whether the node is healthy, as determined by the
+     health-check script.
+   </li>
+   <li>The previous time at which the health status was reported.</li>
+   <li>A diagnostic report on the health status.</li>
+ </ul>
+ 
+ @see NodeReport
+ @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.records.NodeHealthStatus -->
+</package>
+<package name="org.apache.hadoop.yarn.server.api.records.impl.pb">
+</package>
+<package name="org.apache.hadoop.yarn.server.metrics">
+</package>
+<package name="org.apache.hadoop.yarn.server.records">
+</package>
+<package name="org.apache.hadoop.yarn.server.records.impl.pb">
+</package>
+<package name="org.apache.hadoop.yarn.server.security.http">
+</package>
+<package name="org.apache.hadoop.yarn.server.sharedcache">
+</package>
+<package name="org.apache.hadoop.yarn.server.utils">
+  <!-- start class org.apache.hadoop.yarn.server.utils.LeveldbIterator -->
+  <class name="LeveldbIterator" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.util.Iterator"/>
+    <implements name="java.io.Closeable"/>
+    <constructor name="LeveldbIterator" type="org.iq80.leveldb.DB"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an iterator for the specified database]]>
+      </doc>
+    </constructor>
+    <constructor name="LeveldbIterator" type="org.iq80.leveldb.DB, org.iq80.leveldb.ReadOptions"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an iterator for the specified database]]>
+      </doc>
+    </constructor>
+    <constructor name="LeveldbIterator" type="org.iq80.leveldb.DBIterator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an iterator using the specified underlying DBIterator]]>
+      </doc>
+    </constructor>
+    <method name="seek"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="byte[]"/>
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Repositions the iterator so the key of the next BlockElement
+ returned greater than or equal to the specified targetKey.]]>
+      </doc>
+    </method>
+    <method name="seekToFirst"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Repositions the iterator so is is at the beginning of the Database.]]>
+      </doc>
+    </method>
+    <method name="seekToLast"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Repositions the iterator so it is at the end of of the Database.]]>
+      </doc>
+    </method>
+    <method name="hasNext" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Returns <tt>true</tt> if the iteration has more elements.]]>
+      </doc>
+    </method>
+    <method name="next" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Returns the next element in the iteration.]]>
+      </doc>
+    </method>
+    <method name="peekNext" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Returns the next element in the iteration, without advancing the
+ iteration.]]>
+      </doc>
+    </method>
+    <method name="hasPrev" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[@return true if there is a previous entry in the iteration.]]>
+      </doc>
+    </method>
+    <method name="prev" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[@return the previous element in the iteration and rewinds the iteration.]]>
+      </doc>
+    </method>
+    <method name="peekPrev" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[@return the previous element in the iteration, without rewinding the
+ iteration.]]>
+      </doc>
+    </method>
+    <method name="remove"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Removes from the database the last element returned by the iterator.]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Closes the iterator.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A wrapper for a DBIterator to translate the raw RuntimeExceptions that
+ can be thrown into DBExceptions.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.utils.LeveldbIterator -->
+</package>
+<package name="org.apache.hadoop.yarn.server.webapp">
+</package>
+<package name="org.apache.hadoop.yarn.server.webapp.dao">
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo -->
+  <class name="AppAttemptInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppAttemptInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AppAttemptInfo" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getAppAttemptId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getHost" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRpcPort" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getTrackingUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getOriginalTrackingUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDiagnosticsInfo" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAppAttemptState" return="org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAmContainerId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getStartedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFinishedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="appAttemptId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="host" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="rpcPort" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="trackingUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="originalTrackingUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="diagnosticsInfo" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="appAttemptState" type="org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="amContainerId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="startedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="finishedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo -->
+  <class name="AppAttemptsInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppAttemptsInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo"/>
+    </method>
+    <method name="getAttempts" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="attempt" type="java.util.ArrayList"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.AppInfo -->
+  <class name="AppInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AppInfo" type="org.apache.hadoop.yarn.api.records.ApplicationReport"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getAppId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getCurrentAppAttemptId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getQueue" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getType" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getHost" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRpcPort" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAppState" return="org.apache.hadoop.yarn.api.records.YarnApplicationState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRunningContainers" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAllocatedCpuVcores" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAllocatedMemoryMB" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDiagnosticsInfo" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getOriginalTrackingUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getTrackingUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFinalAppStatus" return="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getSubmittedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getStartedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFinishedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getElapsedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationTags" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isUnmanagedApp" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPriority" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAppNodeLabelExpression" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAmNodeLabelExpression" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="appId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="currentAppAttemptId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="user" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="name" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="queue" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="type" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="host" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="rpcPort" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="appState" type="org.apache.hadoop.yarn.api.records.YarnApplicationState"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="runningContainers" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="progress" type="float"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="diagnosticsInfo" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="originalTrackingUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="trackingUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="finalAppStatus" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="submittedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="startedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="finishedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="elapsedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="applicationTags" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="priority" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="unmanagedApplication" type="boolean"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.AppInfo -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.AppsInfo -->
+  <class name="AppsInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppsInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appinfo" type="org.apache.hadoop.yarn.server.webapp.dao.AppInfo"/>
+    </method>
+    <method name="getApps" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="app" type="java.util.ArrayList"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.AppsInfo -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo -->
+  <class name="ContainerInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerInfo" type="org.apache.hadoop.yarn.api.records.ContainerReport"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getContainerId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAllocatedMB" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAllocatedVCores" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAssignedNodeId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPriority" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getStartedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFinishedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getElapsedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDiagnosticsInfo" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getLogUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getContainerExitStatus" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getContainerState" return="org.apache.hadoop.yarn.api.records.ContainerState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNodeHttpAddress" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="containerId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="allocatedMB" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="allocatedVCores" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="assignedNodeId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="priority" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="startedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="finishedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="elapsedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="diagnosticsInfo" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="logUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="containerExitStatus" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="containerState" type="org.apache.hadoop.yarn.api.records.ContainerState"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="nodeHttpAddress" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo -->
+  <class name="ContainersInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainersInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerInfo" type="org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo"/>
+    </method>
+    <method name="getContainers" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="container" type="java.util.ArrayList"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo -->
+</package>
+
+</api>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/49] hadoop git commit: HDFS-12951. Incorrect javadoc in SaslDataTransferServer.java#receive. Contributed by Mukul Kumar Singh.

Posted by as...@apache.org.
HDFS-12951. Incorrect javadoc in SaslDataTransferServer.java#receive. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/826507c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/826507c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/826507c4

Branch: refs/heads/YARN-6592
Commit: 826507c41b7dd89ce5b53d2245d09c2443423670
Parents: b318bed
Author: Chen Liang <cl...@apache.org>
Authored: Thu Dec 21 11:20:30 2017 -0800
Committer: Chen Liang <cl...@apache.org>
Committed: Thu Dec 21 11:20:30 2017 -0800

----------------------------------------------------------------------
 .../hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/826507c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
index e67d873..e3a72d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
@@ -97,7 +97,7 @@ public class SaslDataTransferServer {
    * @param peer connection peer
    * @param underlyingOut connection output stream
    * @param underlyingIn connection input stream
-   * @param int xferPort data transfer port of DataNode accepting connection
+   * @param xferPort data transfer port of DataNode accepting connection
    * @param datanodeId ID of DataNode accepting connection
    * @return new pair of streams, wrapped after SASL negotiation
    * @throws IOException for any error


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/49] hadoop git commit: HDFS-12932. Fix confusing LOG message for block replication. Contributed by Chao Sun.

Posted by as...@apache.org.
HDFS-12932. Fix confusing LOG message for block replication. Contributed by Chao Sun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a78db991
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a78db991
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a78db991

Branch: refs/heads/YARN-6592
Commit: a78db9919065d06ced8122229530f44cc7369857
Parents: d62932c
Author: Wei Yan <we...@apache.org>
Authored: Wed Dec 20 08:55:46 2017 -0800
Committer: Wei Yan <we...@apache.org>
Committed: Wed Dec 20 08:55:46 2017 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java     | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a78db991/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 0dfaa8e..201605f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -414,9 +414,12 @@ public class FSDirAttrOp {
       if (oldBR > targetReplication) {
         FSDirectory.LOG.info("Decreasing replication from {} to {} for {}",
                              oldBR, targetReplication, iip.getPath());
-      } else {
+      } else if (oldBR < targetReplication) {
         FSDirectory.LOG.info("Increasing replication from {} to {} for {}",
                              oldBR, targetReplication, iip.getPath());
+      } else {
+        FSDirectory.LOG.info("Replication remains unchanged at {} for {}",
+                             oldBR, iip.getPath());
       }
     }
     return file.getBlocks();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/49] hadoop git commit: YARN-7577. Unit Fail: TestAMRestart#testPreemptedAMRestartOnRMRestart (miklos.szegedi@cloudera.com via rkanter)

Posted by as...@apache.org.
YARN-7577. Unit Fail: TestAMRestart#testPreemptedAMRestartOnRMRestart (miklos.szegedi@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/382215c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/382215c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/382215c7

Branch: refs/heads/YARN-6592
Commit: 382215c72b93d6a97d813f407cf6496a7c3f2a4a
Parents: 1ba491ff
Author: Robert Kanter <rk...@apache.org>
Authored: Wed Dec 20 13:39:00 2017 -0800
Committer: Robert Kanter <rk...@apache.org>
Committed: Wed Dec 20 13:39:00 2017 -0800

----------------------------------------------------------------------
 .../applicationsmanager/TestAMRestart.java      | 131 +++++++++++--------
 1 file changed, 73 insertions(+), 58 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/382215c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
index 3d523aa..4add186 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -45,6 +46,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockMemoryRMStateStore;
+import org.apache.hadoop.yarn.server.resourcemanager.ParameterizedSchedulerTestBase;
 import org.apache.hadoop.yarn.server.resourcemanager.TestRMRestart;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData;
@@ -63,14 +65,20 @@ import org.apache.hadoop.yarn.util.Records;
 import org.junit.Assert;
 import org.junit.Test;
 
-public class TestAMRestart {
+/**
+ * Test AM restart functions.
+ */
+public class TestAMRestart extends ParameterizedSchedulerTestBase {
+
+  public TestAMRestart(SchedulerType type) throws IOException {
+    super(type);
+  }
 
   @Test(timeout = 30000)
   public void testAMRestartWithExistingContainers() throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
 
-    MockRM rm1 = new MockRM(conf);
+    MockRM rm1 = new MockRM(getConf());
     rm1.start();
     RMApp app1 =
         rm1.submitApp(200, "name", "user",
@@ -266,15 +274,14 @@ public class TestAMRestart {
 
   @Test(timeout = 30000)
   public void testNMTokensRebindOnAMRestart() throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 3);
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 3);
     // To prevent test from blacklisting nm1 for AM, we sit threshold to half
     // of 2 nodes which is 1
-    conf.setFloat(
+    getConf().setFloat(
         YarnConfiguration.AM_SCHEDULING_NODE_BLACKLISTING_DISABLE_THRESHOLD,
         0.5f);
 
-    MockRM rm1 = new MockRM(conf);
+    MockRM rm1 = new MockRM(getConf());
     rm1.start();
     RMApp app1 =
         rm1.submitApp(200, "myname", "myuser",
@@ -378,11 +385,11 @@ public class TestAMRestart {
   // should not be counted towards AM max retry count.
   @Test(timeout = 100000)
   public void testShouldNotCountFailureToMaxAttemptRetry() throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
-    conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
-    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
-    MockRM rm1 = new MockRM(conf);
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
+    getConf().setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
+    getConf().set(
+        YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+    MockRM rm1 = new MockRM(getConf());
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8000, rm1.getResourceTrackerService());
@@ -503,11 +510,11 @@ public class TestAMRestart {
 
   @Test(timeout = 100000)
   public void testMaxAttemptOneMeansOne() throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
-    conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
-    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
-    MockRM rm1 = new MockRM(conf);
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
+    getConf().setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
+    getConf().set(
+        YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+    MockRM rm1 = new MockRM(getConf());
     rm1.start();
     MockNM nm1 =
         new MockNM("127.0.0.1:1234", 8000, rm1.getResourceTrackerService());
@@ -537,14 +544,15 @@ public class TestAMRestart {
   // re-launch the AM.
   @Test(timeout = 60000)
   public void testPreemptedAMRestartOnRMRestart() throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
-    conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
-    conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
+    getConf().setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
+    getConf().setBoolean(
+        YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
 
-    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
+    getConf().set(
+        YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
 
-    MockRM rm1 = new MockRM(conf);
+    MockRM rm1 = new MockRM(getConf());
     MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     rm1.start();
     MockNM nm1 =
@@ -584,12 +592,19 @@ public class TestAMRestart {
     ApplicationStateData appState =
         memStore.getState().getApplicationState().get(app1.getApplicationId());
     Assert.assertEquals(2, appState.getAttemptCount());
-    // attempt stored has the preempted container exit status.
-    Assert.assertEquals(ContainerExitStatus.PREEMPTED,
-        appState.getAttempt(am2.getApplicationAttemptId())
-            .getAMContainerExitStatus());
+    if (getSchedulerType().equals(SchedulerType.FAIR)) {
+      // attempt stored has the preempted container exit status.
+      Assert.assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,
+          appState.getAttempt(am2.getApplicationAttemptId())
+              .getAMContainerExitStatus());
+    } else {
+      // attempt stored has the preempted container exit status.
+      Assert.assertEquals(ContainerExitStatus.PREEMPTED,
+          appState.getAttempt(am2.getApplicationAttemptId())
+              .getAMContainerExitStatus());
+    }
     // Restart rm.
-    MockRM rm2 = new MockRM(conf, memStore);
+    MockRM rm2 = new MockRM(getConf(), memStore);
     nm1.setResourceTrackerService(rm2.getResourceTrackerService());
     nm1.registerNode();
     rm2.start();
@@ -615,15 +630,16 @@ public class TestAMRestart {
   @Test(timeout = 50000)
   public void testRMRestartOrFailoverNotCountedForAMFailures()
       throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
-    conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
-    conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
+    getConf().setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
+    getConf().setBoolean(
+        YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
 
-    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+    getConf().set(
+        YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
     // explicitly set max-am-retry count as 2.
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
 
-    MockRM rm1 = new MockRM(conf);
+    MockRM rm1 = new MockRM(getConf());
     MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     rm1.start();
     AbstractYarnScheduler scheduler =
@@ -651,7 +667,7 @@ public class TestAMRestart {
     RMAppAttempt attempt2 = app1.getCurrentAppAttempt();
 
     // Restart rm.
-    MockRM rm2 = new MockRM(conf, memStore);
+    MockRM rm2 = new MockRM(getConf(), memStore);
     rm2.start();
     ApplicationStateData appState =
         memStore.getState().getApplicationState().get(app1.getApplicationId());
@@ -688,14 +704,15 @@ public class TestAMRestart {
 
   @Test (timeout = 120000)
   public void testRMAppAttemptFailuresValidityInterval() throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
-    conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
-    conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
+    getConf().setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
+    getConf().setBoolean(
+        YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
 
-    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+    getConf().set(
+        YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
     // explicitly set max-am-retry count as 2.
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
-    MockRM rm1 = new MockRM(conf);
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
+    MockRM rm1 = new MockRM(getConf());
     rm1.start();
 
     MockMemoryRMStateStore memStore =
@@ -765,7 +782,7 @@ public class TestAMRestart {
 
     // Restart rm.
     @SuppressWarnings("resource")
-    MockRM rm2 = new MockRM(conf, memStore);
+    MockRM rm2 = new MockRM(getConf(), memStore);
     rm2.start();
 
     MockMemoryRMStateStore memStore1 =
@@ -834,12 +851,11 @@ public class TestAMRestart {
     return false;
   }
 
-  @Test(timeout = 30000)
+  @Test(timeout = 40000)
   public void testAMRestartNotLostContainerCompleteMsg() throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
 
-    MockRM rm1 = new MockRM(conf);
+    MockRM rm1 = new MockRM(getConf());
     rm1.start();
     RMApp app1 =
         rm1.submitApp(200, "name", "user",
@@ -934,11 +950,10 @@ public class TestAMRestart {
   @Test (timeout = 20000)
   public void testAMRestartNotLostContainerAfterAttemptFailuresValidityInterval()
       throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
     // explicitly set max-am-retry count as 2.
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
 
-    MockRM rm1 = new MockRM(conf);
+    MockRM rm1 = new MockRM(getConf());
     rm1.start();
     MockNM nm1 =
             new MockNM("127.0.0.1:1234", 8000, rm1.getResourceTrackerService());
@@ -1019,16 +1034,16 @@ public class TestAMRestart {
   @Test(timeout = 200000)
   public void testContainersFromPreviousAttemptsWithRMRestart()
       throws Exception {
-    YarnConfiguration conf = new YarnConfiguration();
-    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
-    conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
-    conf.setBoolean(
+    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
+    getConf().setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
+    getConf().setBoolean(
         YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
-    conf.setLong(
+    getConf().setLong(
         YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS, 0);
-    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+    getConf()
+        .set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
 
-    MockRM rm1 = new MockRM(conf);
+    MockRM rm1 = new MockRM(getConf());
     MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
     rm1.start();
     YarnScheduler scheduler = rm1.getResourceScheduler();
@@ -1071,7 +1086,7 @@ public class TestAMRestart {
         (AbstractYarnScheduler)scheduler, am1.getApplicationAttemptId());
 
     // restart rm
-    MockRM rm2 = new MockRM(conf, memStore);
+    MockRM rm2 = new MockRM(getConf(), memStore);
     rm2.start();
     nm1.setResourceTrackerService(rm2.getResourceTrackerService());
     NMContainerStatus container2Status =


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/49] hadoop git commit: YARN-7620. Allow node partition filters on Queues page of new YARN UI. Contributed by Vasudevan Skm.

Posted by as...@apache.org.
YARN-7620. Allow node partition filters on Queues page of new YARN UI. Contributed by Vasudevan Skm.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe5b057c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe5b057c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe5b057c

Branch: refs/heads/YARN-6592
Commit: fe5b057c8144d01ef9fdfb2639a2cba97ead8144
Parents: e040c97
Author: Sunil G <su...@apache.org>
Authored: Tue Dec 19 20:27:25 2017 +0530
Committer: Sunil G <su...@apache.org>
Committed: Tue Dec 19 20:27:25 2017 +0530

----------------------------------------------------------------------
 .../webapp/app/components/queue-navigator.js    |  26 +-
 .../main/webapp/app/components/tree-selector.js | 301 ++++++++++++-------
 .../yarn-queue-partition-capacity-labels.js     |  48 +++
 .../src/main/webapp/app/constants.js            |   4 +-
 .../main/webapp/app/controllers/yarn-queues.js  |  36 ++-
 .../app/models/yarn-queue/capacity-queue.js     |  56 ++--
 .../serializers/yarn-queue/capacity-queue.js    |   6 +
 .../src/main/webapp/app/styles/app.scss         |   1 +
 .../src/main/webapp/app/styles/yarn-queues.scss |  29 ++
 .../templates/components/queue-navigator.hbs    |  20 +-
 .../yarn-queue-partition-capacity-labels.hbs    |  54 ++++
 .../components/yarn-queue/capacity-queue.hbs    |  34 +--
 .../main/webapp/app/templates/yarn-queues.hbs   |   3 +-
 ...yarn-queue-partition-capacity-labels-test.js |  43 +++
 14 files changed, 478 insertions(+), 183 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-navigator.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-navigator.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-navigator.js
index 4b741b8..2cecefb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-navigator.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/queue-navigator.js
@@ -16,7 +16,27 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
+import Ember from "ember";
 
-export default Ember.Component.extend({
-});
\ No newline at end of file
+export default Ember.Component.extend(Ember.TargetActionSupport,{
+  actions: {
+    filterQueuesByPartition(filter) {
+      this.set("filteredPartition", filter);
+      this.sendAction("setFilter", filter);
+    }
+  },
+  didInsertElement: function() {
+    $(".js-filter-queue-by-labels").select2({
+      width: "350px",
+      multiple: false
+    });
+
+    $(".js-filter-queue-by-labels").on("select2:select", e => {
+      this.triggerAction({
+        action: "filterQueuesByPartition",
+        target: this,
+        actionContext: e.params.data.text
+      });
+    });
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
index 4645a48..5168c0e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js
@@ -16,19 +16,20 @@
  * limitations under the License.
  */
 
-import Ember from 'ember';
+import Ember from "ember";
+import {PARTITION_LABEL} from '../constants';
 
 const INBETWEEN_HEIGHT = 130;
 
 export default Ember.Component.extend({
   // Map: <queue-name, queue>
-  map : undefined,
+  map: undefined,
 
   // Normalized data for d3
   treeData: undefined,
 
   // folded queues, folded[<queue-name>] == true means <queue-name> is folded
-  foldedQueues: { },
+  foldedQueues: {},
 
   // maxDepth
   maxDepth: 0,
@@ -42,17 +43,23 @@ export default Ember.Component.extend({
   used: undefined,
   max: undefined,
 
+  didUpdateAttrs: function({ oldAttrs, newAttrs }) {
+    if (oldAttrs.filteredPartition.value !== newAttrs.filteredPartition.value) {
+      this.reDraw();
+    }
+  },
   // Init data
   initData: function() {
-    this.map = { };
-    this.treeData = { };
+    this.map = {};
+    this.treeData = {};
     this.maxDepth = 0;
     this.numOfLeafQueue = 0;
 
-    this.get("model")
-      .forEach(function(o) {
+    this.get("model").forEach(
+      function(o) {
         this.map[o.id] = o;
-      }.bind(this));
+      }.bind(this)
+    );
 
     // var selected = this.get("selected");
     this.used = this.get("used");
@@ -81,9 +88,9 @@ export default Ember.Component.extend({
 
   // Init queues
   initQueue: function(queueName, depth, node) {
-    if ((!queueName) || (!this.map[queueName])) {
+    if (!queueName || !this.map[queueName]) {
       // Queue is not existed
-      return;
+      return false;
     }
     if (depth > this.maxDepth) {
       this.maxDepth = this.maxDepth + 1;
@@ -91,6 +98,13 @@ export default Ember.Component.extend({
 
     var queue = this.map[queueName];
 
+    if (
+      this.filteredPartition &&
+      !queue.get("partitions").contains(this.filteredPartition)
+    ) {
+      return false;
+    }
+
     var names = this.getChildrenNamesArray(queue);
 
     node.name = queueName;
@@ -100,14 +114,21 @@ export default Ember.Component.extend({
     if (names.length > 0) {
       node.children = [];
 
-      names.forEach(function(name) {
-        var childQueueData = {};
-        node.children.push(childQueueData);
-        this.initQueue(name, depth + 1, childQueueData);
-      }.bind(this));
+      names.forEach(
+        function(name) {
+          var childQueueData = {};
+          node.children.push(childQueueData);
+          const status = this.initQueue(name, depth + 1, childQueueData);
+          if (!status) {
+            node.children.pop();
+          }
+        }.bind(this)
+      );
     } else {
       this.numOfLeafQueue = this.numOfLeafQueue + 1;
     }
+
+    return true;
   },
 
   update: function(source, root, tree, diagonal) {
@@ -119,141 +140,183 @@ export default Ember.Component.extend({
     var links = tree.links(nodes);
 
     // Normalize for fixed-depth.
-    nodes.forEach(function(d) { d.y = d.depth * 200; });
+    nodes.forEach(function(d) {
+      d.y = d.depth * 200;
+    });
 
     // Update the nodes…
-    var node = this.mainSvg.selectAll("g.node")
-      .data(nodes, function(d) { return d.id || (d.id = ++i); });
+    var node = this.mainSvg.selectAll("g.node").data(nodes, function(d) {
+      return d.id || (d.id = ++i);
+    });
 
     // Enter any new nodes at the parent's previous position.
-    var nodeEnter = node.enter().append("g")
+    var nodeEnter = node
+      .enter()
+      .append("g")
       .attr("class", "node")
-      .attr("transform", function() { return "translate(" + source.y0 + "," + source.x0 + ")"; })
-      .on("click", function(d){
-        if (d.queueData.get("name") !== this.get("selected")) {
-            document.location.href = "#/yarn-queues/" + d.queueData.get("name") + "!";
-        }
-
-        Ember.run.later(this, function () {
-          var treeWidth = this.maxDepth * 200;
-          var treeHeight = this.numOfLeafQueue * INBETWEEN_HEIGHT;
-          var tree = d3.layout.tree().size([treeHeight, treeWidth]);
-          var diagonal = d3.svg.diagonal()
-            .projection(function(d) { return [d.y, d.x]; });
-
-          this.update(this.treeData, this.treeData, tree, diagonal);
-        }, 100);
-
-      }.bind(this))
-    .on("dblclick", function (d) {
-      document.location.href = "#/yarn-queue/" + d.queueData.get("name") + "/apps";
-    });
+      .attr("transform", function() {
+        return `translate(${source.y0 + 50}, ${source.x0})`;
+      })
+      .on(
+        "click",
+        function(d) {
+          if (d.queueData.get("name") !== this.get("selected")) {
+            document.location.href =
+              "#/yarn-queues/" + d.queueData.get("name") + "!";
+          }
+
+          Ember.run.later(
+            this,
+            function() {
+              var treeWidth = this.maxDepth * 200;
+              var treeHeight = this.numOfLeafQueue * INBETWEEN_HEIGHT;
+              var tree = d3.layout.tree().size([treeHeight, treeWidth]);
+              var diagonal = d3.svg.diagonal().projection(function(d) {
+                return [d.y + 50, d.x];
+              });
+
+              this.update(this.treeData, this.treeData, tree, diagonal);
+            },
+            100
+          );
+        }.bind(this)
+      )
+      .on("dblclick", function(d) {
+        document.location.href =
+          "#/yarn-queue/" + d.queueData.get("name") + "/apps";
+      });
 
-    nodeEnter.append("circle")
+    nodeEnter
+      .append("circle")
       .attr("r", 1e-6)
-      .style("fill", function(d) {
-        var maxCap = d.queueData.get(this.max);
-        maxCap = maxCap === undefined ? 100 : maxCap;
-        var usedCap = d.queueData.get(this.used) / maxCap * 100.0;
-        if (usedCap <= 60.0) {
-          return "mediumaquamarine";
-        } else if (usedCap <= 100.0) {
-          return "coral";
-        } else {
-          return "salmon";
-        }
-      }.bind(this));
+      .style(
+        "fill",
+        function(d) {
+          const usedCapacity = getUsedCapacity(d.queueData.get("partitionMap"), this.filteredPartition);
+          if (usedCapacity <= 60.0) {
+            return "#60cea5";
+          } else if (usedCapacity <= 100.0) {
+            return "#ffbc0b";
+          } else {
+            return "#ef6162";
+          }
+        }.bind(this)
+      );
 
     // append percentage
-    nodeEnter.append("text")
-      .attr("x", function() { return 0; })
+    nodeEnter
+      .append("text")
+      .attr("x", function() {
+        return 0;
+      })
       .attr("dy", ".35em")
       .attr("fill", "white")
-      .attr("text-anchor", function() { return "middle"; })
-      .text(function(d) {
-        var maxCap = d.queueData.get(this.max);
-        maxCap = maxCap === undefined ? 100 : maxCap;
-        var usedCap = d.queueData.get(this.used) / maxCap * 100.0;
-        if (usedCap >= 100.0) {
-          return usedCap.toFixed(0) + "%";
-        } else {
-          return usedCap.toFixed(1) + "%";
-        }
-      }.bind(this))
+      .attr("text-anchor", function() {
+        return "middle";
+      })
+      .text(
+        function(d) {
+          const usedCapacity = getUsedCapacity(d.queueData.get("partitionMap"), this.filteredPartition);
+          if (usedCapacity >= 100.0) {
+            return usedCapacity.toFixed(0) + "%";
+          } else {
+            return usedCapacity.toFixed(1) + "%";
+          }
+        }.bind(this)
+      )
       .style("fill-opacity", 1e-6);
 
     // append queue name
-    nodeEnter.append("text")
+    nodeEnter
+      .append("text")
       .attr("x", "0px")
       .attr("dy", "45px")
       .attr("text-anchor", "middle")
-      .text(function(d) { return d.name; })
+      .text(function(d) {
+        return d.name;
+      })
       .style("fill-opacity", 1e-6);
 
     // Transition nodes to their new position.
-    var nodeUpdate = node.transition()
+    var nodeUpdate = node
+      .transition()
       .duration(duration)
-      .attr("transform", function(d) { return "translate(" + d.y + "," + d.x + ")"; });
+      .attr("transform", function(d) {
+        return `translate(${d.y + 50}, ${d.x})`;
+      });
 
-    nodeUpdate.select("circle")
+    nodeUpdate
+      .select("circle")
       .attr("r", 30)
-      .attr("href",
+      .attr("href", function(d) {
+        return "#/yarn-queues/" + d.queueData.get("name");
+      })
+      .style(
+        "stroke-width",
+        function(d) {
+          if (d.queueData.get("name") === this.get("selected")) {
+            return 7;
+          } else {
+            return 2;
+          }
+        }.bind(this)
+      )
+      .style(
+        "stroke",
         function(d) {
-          return "#/yarn-queues/" + d.queueData.get("name");
-        })
-      .style("stroke-width", function(d) {
-        if (d.queueData.get("name") === this.get("selected")) {
-          return 7;
-        } else {
-          return 2;
-        }
-      }.bind(this))
-      .style("stroke", function(d) {
-        if (d.queueData.get("name") === this.get("selected")) {
-          return "gray";
-        } else {
-          return "gray";
-        }
-      }.bind(this));
-
-    nodeUpdate.selectAll("text")
-      .style("fill-opacity", 1);
+          if (d.queueData.get("name") === this.get("selected")) {
+            return "gray";
+          } else {
+            return "gray";
+          }
+        }.bind(this)
+      );
+
+    nodeUpdate.selectAll("text").style("fill-opacity", 1);
 
     // Transition exiting nodes to the parent's new position.
-    var nodeExit = node.exit().transition()
+    var nodeExit = node
+      .exit()
+      .transition()
       .duration(duration)
-      .attr("transform", function() { return "translate(" + source.y + "," + source.x + ")"; })
+      .attr("transform", function() {
+        return `translate(${source.y}, ${source.x})`;
+      })
       .remove();
 
-    nodeExit.select("circle")
-      .attr("r", 1e-6);
+    nodeExit.select("circle").attr("r", 1e-6);
 
-    nodeExit.select("text")
-      .style("fill-opacity", 1e-6);
+    nodeExit.select("text").style("fill-opacity", 1e-6);
 
     // Update the links…
-    var link = this.mainSvg.selectAll("path.link")
-      .data(links, function(d) { return d.target.id; });
+    var link = this.mainSvg.selectAll("path.link").data(links, function(d) {
+      return d.target.id;
+    });
 
     // Enter any new links at the parent's previous position.
-    link.enter().insert("path", "g")
+    link
+      .enter()
+      .insert("path", "g")
       .attr("class", "link")
       .attr("d", function() {
-        var o = {x: source.x0, y: source.y0};
-        return diagonal({source: o, target: o});
+        var o = { x: source.x0, y: source.y0 + 50 };
+        return diagonal({ source: o, target: o });
       });
 
     // Transition links to their new position.
-    link.transition()
+    link
+      .transition()
       .duration(duration)
       .attr("d", diagonal);
 
     // Transition exiting nodes to the parent's new position.
-    link.exit().transition()
+    link
+      .exit()
+      .transition()
       .duration(duration)
       .attr("d", function() {
-        var o = {x: source.x, y: source.y};
-        return diagonal({source: o, target: o});
+        var o = { x: source.x, y: source.y };
+        return diagonal({ source: o, target: o });
       })
       .remove();
 
@@ -267,27 +330,32 @@ export default Ember.Component.extend({
   reDraw: function() {
     this.initData();
 
-    var margin = {top: 20, right: 120, bottom: 20, left: 120};
+    var margin = { top: 20, right: 120, bottom: 20, left: 120 };
     var treeWidth = this.maxDepth * 200;
     var treeHeight = this.numOfLeafQueue * INBETWEEN_HEIGHT;
     var width = treeWidth + margin.left + margin.right;
     var height = treeHeight + margin.top + margin.bottom;
 
     if (this.mainSvg) {
-      this.mainSvg.remove();
+      this.mainSvg.selectAll("*").remove();
+    } else {
+      this.mainSvg = d3
+        .select("#" + this.get("parentId"))
+        .append("svg")
+        .attr("width", width)
+        .attr("height", height)
+        .attr("class", "tree-selector");
     }
 
-    this.mainSvg = d3.select("#" + this.get("parentId")).append("svg")
-      .attr("width", width)
-      .attr("height", height)
-      .attr("class", "tree-selector")
+    this.mainSvg
       .append("g")
       .attr("transform", "translate(" + margin.left + "," + margin.top + ")");
 
     var tree = d3.layout.tree().size([treeHeight, treeWidth]);
 
-    var diagonal = d3.svg.diagonal()
-      .projection(function(d) { return [d.y, d.x]; });
+    var diagonal = d3.svg.diagonal().projection(function(d) {
+      return [d.y + 50, d.x];
+    });
 
     var root = this.treeData;
     root.x0 = height / 2;
@@ -299,6 +367,11 @@ export default Ember.Component.extend({
   },
 
   didInsertElement: function() {
-   this.reDraw();
+    this.reDraw();
   }
 });
+
+
+const getUsedCapacity = (partitionMap, filter=PARTITION_LABEL) => {
+  return partitionMap[filter].absoluteUsedCapacity;
+};
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/yarn-queue-partition-capacity-labels.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/yarn-queue-partition-capacity-labels.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/yarn-queue-partition-capacity-labels.js
new file mode 100644
index 0000000..e7f9c03
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/yarn-queue-partition-capacity-labels.js
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from "ember";
+import { PARTITION_LABEL } from "../constants";
+
+export default Ember.Component.extend({
+  didUpdateAttrs: function({ oldAttrs, newAttrs }) {
+    this._super(...arguments);
+    this.set("data", this.initData());
+  },
+
+  init() {
+    this._super(...arguments);
+    this.set("data", this.initData());
+  },
+
+  initData() {
+    const queue = this.get("queue");
+    const partitionMap = this.get("partitionMap");
+    const filteredParition = this.get("filteredPartition") || PARTITION_LABEL;
+    const userLimit = queue.get("userLimit");
+    const userLimitFactor = queue.get("userLimitFactor");
+    const isLeafQueue = queue.get("isLeafQueue");
+
+    return {
+      ...partitionMap[filteredParition],
+      userLimit,
+      userLimitFactor,
+      isLeafQueue
+    };
+  }
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/constants.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/constants.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/constants.js
index 29ad4bc..6b37b7f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/constants.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/constants.js
@@ -34,4 +34,6 @@ export const Entities = {
   Memory:'memory',
   Resource: 'resource',
   Unit: 'unit'
-}
\ No newline at end of file
+}
+
+export const PARTITION_LABEL = 'Default partition';
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-queues.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-queues.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-queues.js
index 9658ded..6cc8767 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-queues.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-queues.js
@@ -17,19 +17,39 @@
  */
 
 import Ember from 'ember';
+import {PARTITION_LABEL} from '../constants';
 
 export default Ember.Controller.extend({
   needReload: true,
   selectedQueue: undefined,
   showLoading: true,
+  filteredPartition: PARTITION_LABEL,
 
-  breadcrumbs: [{
-    text: "Home",
-    routeName: 'application'
-  }, {
-    text: "Queues",
-    routeName: 'yarn-queues',
-    model: 'root'
-  }]
+  breadcrumbs: [
+    {
+      text: "Home",
+      routeName: "application"
+    },
+    {
+      text: "Queues",
+      routeName: "yarn-queues",
+      model: "root"
+    }
+  ],
 
+  actions: {
+    setFilter(partition) {
+      this.set("filteredPartition", partition);
+      const model = this.get('model');
+      const {selectedQueue} = model;
+      // If the selected queue does not have the filtered partition
+      // reset it to root
+      if (!selectedQueue.get('partitions').contains(partition)) {
+        const root = model.queues.get('firstObject');
+        document.location.href = "#/yarn-queues/" + root.get("id") + "!";
+        this.set("model.selectedQueue", root);
+        this.set("model.selected", root.get('id'));
+      }
+    }
+  }
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js
index f892c2b..c123989 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-queue/capacity-queue.js
@@ -20,24 +20,26 @@ import DS from 'ember-data';
 import Converter from 'yarn-ui/utils/converter';
 
 export default DS.Model.extend({
-  name: DS.attr('string'),
-  children: DS.attr('array'),
-  parent: DS.attr('string'),
-  capacity: DS.attr('number'),
-  maxCapacity: DS.attr('number'),
-  usedCapacity: DS.attr('number'),
-  absCapacity: DS.attr('number'),
-  absMaxCapacity: DS.attr('number'),
-  absUsedCapacity: DS.attr('number'),
-  state: DS.attr('string'),
-  userLimit: DS.attr('number'),
-  userLimitFactor: DS.attr('number'),
-  preemptionDisabled: DS.attr('number'),
-  numPendingApplications: DS.attr('number'),
-  numActiveApplications: DS.attr('number'),
-  users: DS.hasMany('YarnUser'),
-  type: DS.attr('string'),
-  resources: DS.attr('object'),
+  name: DS.attr("string"),
+  children: DS.attr("array"),
+  parent: DS.attr("string"),
+  capacity: DS.attr("number"),
+  partitions: DS.attr("array"),
+  partitionMap: DS.attr("object"),
+  maxCapacity: DS.attr("number"),
+  usedCapacity: DS.attr("number"),
+  absCapacity: DS.attr("number"),
+  absMaxCapacity: DS.attr("number"),
+  absUsedCapacity: DS.attr("number"),
+  state: DS.attr("string"),
+  userLimit: DS.attr("number"),
+  userLimitFactor: DS.attr("number"),
+  preemptionDisabled: DS.attr("number"),
+  numPendingApplications: DS.attr("number"),
+  numActiveApplications: DS.attr("number"),
+  users: DS.hasMany("YarnUser"),
+  type: DS.attr("string"),
+  resources: DS.attr("object"),
 
   isLeafQueue: function() {
     var len = this.get("children.length");
@@ -53,21 +55,29 @@ export default DS.Model.extend({
       {
         label: "Absolute Used",
         style: "primary",
-        value: this.get("name") === "root" ? floatToFixed(this.get("usedCapacity")) : floatToFixed(this.get("absUsedCapacity"))
+        value:
+          this.get("name") === "root"
+            ? floatToFixed(this.get("usedCapacity"))
+            : floatToFixed(this.get("absUsedCapacity"))
       },
       {
         label: "Absolute Capacity",
         style: "primary",
-        value: this.get("name") === "root" ? 100 : floatToFixed(this.get("absCapacity"))
+        value:
+          this.get("name") === "root"
+            ? 100
+            : floatToFixed(this.get("absCapacity"))
       },
       {
         label: "Absolute Max Capacity",
         style: "secondary",
-        value: this.get("name") === "root" ? 100 : floatToFixed(this.get("absMaxCapacity"))
+        value:
+          this.get("name") === "root"
+            ? 100
+            : floatToFixed(this.get("absMaxCapacity"))
       }
     ];
   }.property("absCapacity", "usedCapacity", "absMaxCapacity"),
-
   userUsagesDonutChartData: function() {
     var data = [];
     if (this.get("users")) {
@@ -97,5 +107,5 @@ export default DS.Model.extend({
         value: this.get("numActiveApplications") || 0
       }
     ];
-  }.property("numPendingApplications", "numActiveApplications"),
+  }.property("numPendingApplications", "numActiveApplications")
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
index 7626598..b171c6e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
@@ -17,6 +17,7 @@
  */
 
 import DS from 'ember-data';
+import {PARTITION_LABEL} from '../../constants';
 
 export default DS.JSONAPISerializer.extend({
 
@@ -73,6 +74,11 @@ export default DS.JSONAPISerializer.extend({
           numPendingApplications: payload.numPendingApplications,
           numActiveApplications: payload.numActiveApplications,
           resources: payload.resources,
+          partitions: payload.capacities.queueCapacitiesByPartition.map(cap => cap.partitionName || PARTITION_LABEL),
+          partitionMap: payload.capacities.queueCapacitiesByPartition.reduce((init, cap) => {
+            init[cap.partitionName || PARTITION_LABEL] = cap;
+            return init;
+          }, {}),
           type: "capacity",
         },
         // Relationships

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss
index 3919ac3..5d99d8e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss
@@ -3,6 +3,7 @@
 @import 'yarn-app.scss';
 @import './compose-box.scss';
 @import 'em-table.scss';
+@import './yarn-queues.scss';
 
 /**
  * Licensed to the Apache Software Foundation (ASF) under one

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/yarn-queues.scss
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/yarn-queues.scss b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/yarn-queues.scss
new file mode 100644
index 0000000..8852270
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/yarn-queues.scss
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+.filter-partitions {
+  padding: 15px;
+  margin-left: auto;
+  label {
+    font-weight: 500;
+  }
+  .filter-queue-by-labels {
+    display: inline-block;
+    max-width: 350px;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-navigator.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-navigator.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-navigator.hbs
index e3b0a90..b063aae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-navigator.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/queue-navigator.hbs
@@ -21,9 +21,25 @@
   <div class="col-md-12 container-fluid">
     <div class="panel panel-default" id="tree-selector-container">
       <div class="panel-heading">
-        Scheduler: {{model.firstObject.type}}
+        {{#if filteredPartition}}
+           {{model.firstObject.type}} scheduler - Showing queues from partition {{lower filteredPartition}}
+        {{else}}
+          {{model.firstObject.type}} scheduler - Showing queues from all partitions
+        {{/if}}
       </div>
-     {{tree-selector model=model parentId="tree-selector-container" selected=selected used=used max=max}}
+       {{#if (eq model.firstObject.type "capacity")}}
+       <div class="flex">
+        <div class="filter-partitions flex-right">
+          <label><i class="glyphicon glyphicon-filter"/> Partitions: </label>
+            <select onchange={{action "filterQueuesByPartition" value="target.value"}} class="form-control js-filter-queue-by-labels">
+              {{#each model.firstObject.partitions as |part|}}
+                <option value={{part}}>{{part}}</option>
+              {{/each}}
+            </select>
+        </div>
+       </div>
+      {{/if}}
+     {{tree-selector model=model parentId="tree-selector-container" selected=selected used=used max=max filteredPartition=filteredPartition}}
     </div>
   </div>
 </div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue-partition-capacity-labels.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue-partition-capacity-labels.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue-partition-capacity-labels.hbs
new file mode 100644
index 0000000..fdecb2d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue-partition-capacity-labels.hbs
@@ -0,0 +1,54 @@
+{{!
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+}}
+
+<div class="top-1">
+  <span class="yarn-label primary">
+    <span class="label-key">absolute used</span>
+    <span class="label-value">{{data.absoluteUsedCapacity}}%</span>
+  </span>
+  <span class="yarn-label primary">
+    <span class="label-key">absolute capacity</span>
+    <span class="label-value">{{data.absoluteCapacity}}%</span>
+  </span>
+  <span class="yarn-label secondary">
+    <span class="label-key">absolute max capacity</span>
+    <span class="label-value">{{data.absoluteMaxCapacity}}%</span>
+  </span>
+</div>
+<div class="top-1">
+  <span class="yarn-label secondary">
+    <span class="label-key">configured capacity</span>
+    <span class="label-value">{{data.capacity}}%</span>
+  </span>
+  <span class="yarn-label secondary">
+    <span class="label-key">configured max capacity</span>
+    <span class="label-value">{{data.maxCapacity}}%</span>
+  </span>
+</div>
+{{#if data.isLeafQueue}}
+<div class="top-1">
+  <span class="yarn-label secondary">
+    <span class="label-key">user limit</span>
+    <span class="label-value">{{data.userLimit}}%</span>
+  </span>
+  <span class="yarn-label secondary">
+    <span class="label-key">user limit factor</span>
+    <span class="label-value">{{data.userLimitFactor}}</span>
+  </span>
+</div>
+{{/if}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs
index bb9a87e..9ad2a6f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/capacity-queue.hbs
@@ -17,7 +17,7 @@
 }}
 
 {{queue-navigator model=model.queues selected=model.selected
-  used="usedCapacity" max="absMaxCapacity"}}
+  used="usedCapacity" max="absMaxCapacity" setFilter=(action setFilter)}}
 
 <div class="yarn-compose-box yarn-queues-container">
   <div>
@@ -31,36 +31,8 @@
         {{em-table-simple-status-cell content=model.selectedQueue.state}}
       </div>
     {{/if}}
-    <div class="top-1">
-      {{#each model.selectedQueue.capacitiesBarChartData as |item|}}
-        <span class="yarn-label {{item.style}}">
-          <span class="label-key"> {{lower item.label}}</span>
-          <span class="label-value">{{item.value}}%</span>
-        </span>
-      {{/each}}
-    </div>
-    <div class="top-1">
-      <span class="yarn-label secondary">
-        <span class="label-key">configured capacity</span>
-        <span class="label-value">{{model.selectedQueue.capacity}}%</span>
-      </span>
-      <span class="yarn-label secondary">
-        <span class="label-key">configured max capacity</span>
-        <span class="label-value">{{model.selectedQueue.maxCapacity}}%</span>
-      </span>
-    </div>
-    {{#if model.selectedQueue.isLeafQueue}}
-      <div class="top-1">
-        <span class="yarn-label secondary">
-          <span class="label-key">user limit</span>
-          <span class="label-value">{{model.selectedQueue.userLimit}}%</span>
-        </span>
-        <span class="yarn-label secondary">
-          <span class="label-key">user limit factor</span>
-          <span class="label-value">{{model.selectedQueue.userLimitFactor}}</span>
-        </span>
-      </div>
-    {{/if}}
+
+    {{yarn-queue-partition-capacity-labels partitionMap=model.selectedQueue.partitionMap queue=model.selectedQueue filteredPartition=filteredPartition}}
   </div>
 
   <h5> Running Apps </h5>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
index b3165d5..ede2994 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queues.hbs
@@ -18,9 +18,10 @@
 <div class="queue-page-breadcrumb">
   {{breadcrumb-bar breadcrumbs=breadcrumbs}}
 </div>
+
 <div class="container-fluid">
   {{#if (eq model.queues.firstObject.type "capacity")}}
-    {{yarn-queue.capacity-queue model=model}}
+    {{yarn-queue.capacity-queue model=model setFilter=(action "setFilter") filteredPartition=filteredPartition}}
   {{else if (eq model.queues.firstObject.type "fair")}}
     {{yarn-queue.fair-queue model=model}}
   {{else}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe5b057c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/yarn-queue-partition-capacity-labels-test.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/yarn-queue-partition-capacity-labels-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/yarn-queue-partition-capacity-labels-test.js
new file mode 100644
index 0000000..414e326
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/yarn-queue-partition-capacity-labels-test.js
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ import { moduleForComponent, test } from 'ember-qunit';
+import hbs from 'htmlbars-inline-precompile';
+
+moduleForComponent('yarn-queue-partition-capacity-labels', 'Integration | Component | yarn queue partition capacity labels', {
+  integration: true
+});
+
+test('it renders', function(assert) {
+
+  // Set any properties with this.set('myProperty', 'value');
+  // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL +
+
+  this.render(hbs`{{yarn-queue-partition-capacity-labels}}`);
+
+  assert.equal(this.$().text().trim(), '');
+
+  // Template block usage:" + EOL +
+  this.render(hbs`
+    {{#yarn-queue-partition-capacity-labels}}
+      template block text
+    {{/yarn-queue-partition-capacity-labels}}
+  `);
+
+  assert.equal(this.$().text().trim(), 'template block text');
+});


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/49] hadoop git commit: YARN-6594. [API] Introduce SchedulingRequest object. (Konstantinos Karanasos via wangda)

Posted by as...@apache.org.
YARN-6594. [API] Introduce SchedulingRequest object. (Konstantinos Karanasos via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d41eec87
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d41eec87
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d41eec87

Branch: refs/heads/YARN-6592
Commit: d41eec87938cd3ceb04032721702dbb07e029d3d
Parents: 276a62d
Author: Wangda Tan <wa...@apache.org>
Authored: Mon Oct 30 16:54:02 2017 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Fri Dec 29 17:29:15 2017 -0800

----------------------------------------------------------------------
 .../hadoop/yarn/api/records/ResourceSizing.java |  64 +++++
 .../yarn/api/records/SchedulingRequest.java     | 205 ++++++++++++++
 .../src/main/proto/yarn_protos.proto            |  14 +
 .../records/impl/pb/ResourceSizingPBImpl.java   | 117 ++++++++
 .../impl/pb/SchedulingRequestPBImpl.java        | 266 +++++++++++++++++++
 5 files changed, 666 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d41eec87/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java
new file mode 100644
index 0000000..d82be11
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * {@code ResourceSizing} contains information for the size of a
+ * {@link SchedulingRequest}, such as the number of requested allocations and
+ * the resources for each allocation.
+ */
+@Public
+@Unstable
+public abstract class ResourceSizing {
+
+  @Public
+  @Unstable
+  public static ResourceSizing newInstance(Resource resources) {
+    return ResourceSizing.newInstance(1, resources);
+  }
+
+  @Public
+  @Unstable
+  public static ResourceSizing newInstance(int numAllocations, Resource resources) {
+    ResourceSizing resourceSizing = Records.newRecord(ResourceSizing.class);
+    resourceSizing.setNumAllocations(numAllocations);
+    resourceSizing.setResources(resources);
+    return resourceSizing;
+  }
+
+  @Public
+  @Unstable
+  public abstract int getNumAllocations();
+
+  @Public
+  @Unstable
+  public abstract void setNumAllocations(int numAllocations);
+
+  @Public
+  @Unstable
+  public abstract Resource getResources();
+
+  @Public
+  @Unstable
+  public abstract void setResources(Resource resources);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d41eec87/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
new file mode 100644
index 0000000..47a0697
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
@@ -0,0 +1,205 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * {@code SchedulingRequest} represents a request made by an application to the
+ * {@code ResourceManager} to obtain an allocation. It is similar to the
+ * {@link ResourceRequest}. However, it is more complete than the latter, as it
+ * allows applications to specify allocation tags (e.g., to express that an
+ * allocation belongs to {@code Spark} or is an {@code HBase-master}), as well
+ * as involved {@link PlacementConstraint}s (e.g., anti-affinity between Spark
+ * and HBase allocations).
+ *
+ * The size specification of the allocation is in {@code ResourceSizing}.
+ */
+@Public
+@Unstable
+public abstract class SchedulingRequest {
+
+  @Public
+  @Unstable
+  public static SchedulingRequest newInstance(long allocationRequestId,
+      Priority priority, ExecutionTypeRequest executionType,
+      Set<String> allocationTags, ResourceSizing resourceSizing,
+      PlacementConstraint placementConstraintExpression) {
+    return SchedulingRequest.newBuilder()
+        .allocationRequestId(allocationRequestId).priority(priority)
+        .executionType(executionType).allocationTags(allocationTags)
+        .placementConstraintExpression(placementConstraintExpression).build();
+  }
+
+  @Public
+  @Unstable
+  public static SchedulingRequestBuilder newBuilder() {
+    return new SchedulingRequestBuilder();
+  }
+
+  /**
+   * Class to construct instances of {@link SchedulingRequest} with specific
+   * options.
+   */
+  @Public
+  @Unstable
+  public static final class SchedulingRequestBuilder {
+    private SchedulingRequest schedulingRequest =
+            Records.newRecord(SchedulingRequest.class);
+
+    private SchedulingRequestBuilder() {
+      schedulingRequest.setAllocationRequestId(0);
+      schedulingRequest.setPriority(Priority.newInstance(0));
+      schedulingRequest.setExecutionType(ExecutionTypeRequest.newInstance());
+    }
+
+    /**
+     * Set the <code>allocationRequestId</code> of the request.
+     * 
+     * @see SchedulingRequest#setAllocationRequestId(long)
+     * @param allocationRequestId <code>allocationRequestId</code> of the
+     *          request
+     * @return {@link SchedulingRequest.SchedulingRequestBuilder}
+     */
+    @Public
+    @Unstable
+    public SchedulingRequestBuilder allocationRequestId(
+            long allocationRequestId) {
+      schedulingRequest.setAllocationRequestId(allocationRequestId);
+      return this;
+    }
+
+    /**
+     * Set the <code>priority</code> of the request.
+     *
+     * @param priority <code>priority</code> of the request
+     * @return {@link SchedulingRequest.SchedulingRequestBuilder}
+     * @see SchedulingRequest#setPriority(Priority)
+     */
+    @Public
+    @Unstable
+    public SchedulingRequestBuilder priority(Priority priority) {
+      schedulingRequest.setPriority(priority);
+      return this;
+    }
+
+    /**
+     * Set the <code>executionType</code> of the request.
+     * 
+     * @see SchedulingRequest#setExecutionType(ExecutionTypeRequest)
+     * @param executionType <code>executionType</code> of the request
+     * @return {@link SchedulingRequest.SchedulingRequestBuilder}
+     */
+    @Public
+    @Unstable
+    public SchedulingRequestBuilder executionType(
+        ExecutionTypeRequest executionType) {
+      schedulingRequest.setExecutionType(executionType);
+      return this;
+    }
+    
+    /**
+     * Set the <code>allocationTags</code> of the request.
+     *
+     * @see SchedulingRequest#setAllocationTags(Set)
+     * @param allocationTags <code>allocationsTags</code> of the request
+     * @return {@link SchedulingRequest.SchedulingRequestBuilder}
+     */
+    @Public
+    @Unstable
+    public SchedulingRequestBuilder allocationTags(Set<String> allocationTags) {
+      schedulingRequest.setAllocationTags(allocationTags);
+      return this;
+    }
+
+    /**
+     * Set the <code>executionType</code> of the request.
+     *
+     * @see SchedulingRequest#setResourceSizing(ResourceSizing)
+     * @param resourceSizing <code>resourceSizing</code> of the request
+     * @return {@link SchedulingRequest.SchedulingRequestBuilder}
+     */
+    @Public
+    @Unstable
+    public SchedulingRequestBuilder resourceSizing(
+        ResourceSizing resourceSizing) {
+      schedulingRequest.setResourceSizing(resourceSizing);
+      return this;
+    }
+
+    /**
+     * Set the <code>placementConstraintExpression</code> of the request.
+     *
+     * @see SchedulingRequest#setPlacementConstraint(
+     *      PlacementConstraint)
+     * @param placementConstraintExpression <code>placementConstraints</code> of
+     *          the request
+     * @return {@link SchedulingRequest.SchedulingRequestBuilder}
+     */
+    @Public
+    @Unstable
+    public SchedulingRequestBuilder placementConstraintExpression(
+        PlacementConstraint placementConstraintExpression) {
+      schedulingRequest
+          .setPlacementConstraint(placementConstraintExpression);
+      return this;
+    }
+
+    /**
+     * Return generated {@link SchedulingRequest} object.
+     * 
+     * @return {@link SchedulingRequest}
+     */
+    @Public
+    @Unstable
+    public SchedulingRequest build() {
+      return schedulingRequest;
+    }
+  }
+
+  public abstract long getAllocationRequestId();
+
+  public abstract void setAllocationRequestId(long allocationRequestId);
+
+  public abstract Priority getPriority();
+
+  public abstract void setPriority(Priority priority);
+
+  public abstract ExecutionTypeRequest getExecutionType();
+
+  public abstract void setExecutionType(ExecutionTypeRequest executionType);
+
+  public abstract Set<String> getAllocationTags();
+
+  public abstract void setAllocationTags(Set<String> allocationTags);
+
+  public abstract ResourceSizing getResourceSizing();
+
+  public abstract void setResourceSizing(ResourceSizing resourceSizing);
+
+  public abstract PlacementConstraint getPlacementConstraint();
+
+  public abstract void setPlacementConstraint(
+      PlacementConstraint placementConstraint);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d41eec87/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 968b75e..2dbdefb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -405,6 +405,20 @@ message ExecutionTypeRequestProto {
   optional bool enforce_execution_type = 2 [default = false];
 }
 
+message SchedulingRequestProto {
+  optional int64 allocationRequestId = 1 [default = 0];
+  optional PriorityProto priority = 2;
+  optional ExecutionTypeRequestProto executionType = 3;
+  repeated string allocationTags = 4;
+  optional ResourceSizingProto resourceSizing = 5;
+  optional PlacementConstraintProto placementConstraint = 6;
+}
+
+message ResourceSizingProto {
+  optional int32 numAllocations = 1;
+  optional ResourceProto resources = 2;
+}
+
 enum AMCommandProto {
   AM_RESYNC = 1;
   AM_SHUTDOWN = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d41eec87/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
new file mode 100644
index 0000000..05bb3bd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
@@ -0,0 +1,117 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceSizingProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceSizingProtoOrBuilder;
+
+@Private
+@Unstable
+public class ResourceSizingPBImpl extends ResourceSizing {
+  ResourceSizingProto proto = ResourceSizingProto.getDefaultInstance();
+  ResourceSizingProto.Builder builder = null;
+  boolean viaProto = false;
+
+  private Resource resources = null;
+
+  public ResourceSizingPBImpl() {
+    builder = ResourceSizingProto.newBuilder();
+  }
+
+  public ResourceSizingPBImpl(ResourceSizingProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  public ResourceSizingProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  private void mergeLocalToBuilder() {
+    if (this.resources != null) {
+      builder.setResources(convertToProtoFormat(this.resources));
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = ResourceSizingProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  @Override
+  public int getNumAllocations() {
+    ResourceSizingProtoOrBuilder p = viaProto ? proto : builder;
+    return (p.getNumAllocations());
+  }
+
+  @Override
+  public void setNumAllocations(int numAllocations) {
+    maybeInitBuilder();
+    builder.setNumAllocations(numAllocations);
+  }
+
+  @Override
+  public Resource getResources() {
+    ResourceSizingProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.resources != null) {
+      return this.resources;
+    }
+    if (!p.hasResources()) {
+      return null;
+    }
+    this.resources = convertFromProtoFormat(p.getResources());
+    return this.resources;
+  }
+
+  @Override
+  public void setResources(Resource resources) {
+    maybeInitBuilder();
+    if (resources == null) {
+      builder.clearResources();
+    }
+    this.resources = resources;
+  }
+
+  private ResourcePBImpl convertFromProtoFormat(ResourceProto r) {
+    return new ResourcePBImpl(r);
+  }
+
+  private ResourceProto convertToProtoFormat(Resource r) {
+    return ((ResourcePBImpl) r).getProto();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d41eec87/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
new file mode 100644
index 0000000..7826b36
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
@@ -0,0 +1,266 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.pb.PlacementConstraintFromProtoConverter;
+import org.apache.hadoop.yarn.api.pb.PlacementConstraintToProtoConverter;
+import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.proto.YarnProtos.ExecutionTypeRequestProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceSizingProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.SchedulingRequestProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.SchedulingRequestProtoOrBuilder;
+
+@Private
+@Unstable
+public class SchedulingRequestPBImpl extends SchedulingRequest {
+  SchedulingRequestProto proto = SchedulingRequestProto.getDefaultInstance();
+  SchedulingRequestProto.Builder builder = null;
+  boolean viaProto = false;
+
+  private Priority priority = null;
+  private ExecutionTypeRequest executionType = null;
+  private Set<String> allocationTags = null;
+  private ResourceSizing resourceSizing = null;
+  private PlacementConstraint placementConstraint = null;
+
+  public SchedulingRequestPBImpl() {
+    builder = SchedulingRequestProto.newBuilder();
+  }
+
+  public SchedulingRequestPBImpl(SchedulingRequestProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  public SchedulingRequestProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  private void mergeLocalToBuilder() {
+    if (this.priority != null) {
+      builder.setPriority(convertToProtoFormat(this.priority));
+    }
+    if (this.executionType != null) {
+      builder.setExecutionType(convertToProtoFormat(this.executionType));
+    }
+    if (this.allocationTags != null) {
+      builder.clearAllocationTags();
+      builder.addAllAllocationTags(this.allocationTags);
+    }
+    if (this.resourceSizing != null) {
+      builder.setResourceSizing(convertToProtoFormat(this.resourceSizing));
+    }
+    if (this.placementConstraint != null) {
+      builder.setPlacementConstraint(
+          convertToProtoFormat(this.placementConstraint));
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = SchedulingRequestProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  @Override
+  public long getAllocationRequestId() {
+    SchedulingRequestProtoOrBuilder p = viaProto ? proto : builder;
+    return (p.getAllocationRequestId());
+  }
+
+  @Override
+  public void setAllocationRequestId(long allocationRequestId) {
+    maybeInitBuilder();
+    builder.setAllocationRequestId(allocationRequestId);
+  }
+
+  @Override
+  public Priority getPriority() {
+    SchedulingRequestProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.priority != null) {
+      return this.priority;
+    }
+    if (!p.hasPriority()) {
+      return null;
+    }
+    this.priority = convertFromProtoFormat(p.getPriority());
+    return this.priority;
+  }
+
+  @Override
+  public void setPriority(Priority priority) {
+    maybeInitBuilder();
+    if (priority == null) {
+      builder.clearPriority();
+    }
+    this.priority = priority;
+  }
+
+  @Override
+  public ExecutionTypeRequest getExecutionType() {
+    SchedulingRequestProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.executionType != null) {
+      return this.executionType;
+    }
+    if (!p.hasExecutionType()) {
+      return null;
+    }
+    this.executionType = convertFromProtoFormat(p.getExecutionType());
+    return this.executionType;
+  }
+
+  @Override
+  public void setExecutionType(ExecutionTypeRequest executionType) {
+    maybeInitBuilder();
+    if (executionType == null) {
+      builder.clearExecutionType();
+    }
+    this.executionType = executionType;
+  }
+
+  @Override
+  public Set<String> getAllocationTags() {
+    initAllocationTags();
+    return this.allocationTags;
+  }
+
+  @Override
+  public void setAllocationTags(Set<String> allocationTags) {
+    maybeInitBuilder();
+    builder.clearAllocationTags();
+    this.allocationTags = allocationTags;
+  }
+
+  @Override
+  public ResourceSizing getResourceSizing() {
+    SchedulingRequestProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.resourceSizing != null) {
+      return this.resourceSizing;
+    }
+    if (!p.hasResourceSizing()) {
+      return null;
+    }
+    this.resourceSizing = convertFromProtoFormat(p.getResourceSizing());
+    return this.resourceSizing;
+  }
+
+  @Override
+  public void setResourceSizing(ResourceSizing resourceSizing) {
+    maybeInitBuilder();
+    if (resourceSizing == null) {
+      builder.clearResourceSizing();
+    }
+    this.resourceSizing = resourceSizing;
+  }
+
+  @Override
+  public PlacementConstraint getPlacementConstraint() {
+    SchedulingRequestProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.placementConstraint != null) {
+      return this.placementConstraint;
+    }
+    if (!p.hasPlacementConstraint()) {
+      return null;
+    }
+    this.placementConstraint =
+        convertFromProtoFormat(p.getPlacementConstraint());
+    return this.placementConstraint;
+  }
+
+  @Override
+  public void setPlacementConstraint(PlacementConstraint placementConstraint) {
+    maybeInitBuilder();
+    if (placementConstraint == null) {
+      builder.clearPlacementConstraint();
+    }
+    this.placementConstraint = placementConstraint;
+  }
+
+  private PriorityPBImpl convertFromProtoFormat(PriorityProto p) {
+    return new PriorityPBImpl(p);
+  }
+
+  private PriorityProto convertToProtoFormat(Priority p) {
+    return ((PriorityPBImpl) p).getProto();
+  }
+
+  private ExecutionTypeRequestPBImpl convertFromProtoFormat(
+      ExecutionTypeRequestProto p) {
+    return new ExecutionTypeRequestPBImpl(p);
+  }
+
+  private ExecutionTypeRequestProto convertToProtoFormat(
+      ExecutionTypeRequest p) {
+    return ((ExecutionTypeRequestPBImpl) p).getProto();
+  }
+
+  private ResourceSizingPBImpl convertFromProtoFormat(ResourceSizingProto p) {
+    return new ResourceSizingPBImpl(p);
+  }
+
+  private ResourceSizingProto convertToProtoFormat(ResourceSizing p) {
+    return ((ResourceSizingPBImpl) p).getProto();
+  }
+
+  private PlacementConstraint convertFromProtoFormat(
+      PlacementConstraintProto c) {
+    PlacementConstraintFromProtoConverter fromProtoConverter =
+        new PlacementConstraintFromProtoConverter(c);
+    return fromProtoConverter.convert();
+  }
+
+  private PlacementConstraintProto convertToProtoFormat(PlacementConstraint c) {
+    PlacementConstraintToProtoConverter toProtoConverter =
+        new PlacementConstraintToProtoConverter(c);
+    return toProtoConverter.convert();
+  }
+
+  private void initAllocationTags() {
+    if (this.allocationTags != null) {
+      return;
+    }
+    SchedulingRequestProtoOrBuilder p = viaProto ? proto : builder;
+    this.allocationTags = new HashSet<>();
+    this.allocationTags.addAll(p.getAllocationTagsList());
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/49] hadoop git commit: YARN-7653. Node group support for AllocationTagsManager. (Panagiotis Garefalakis via asuresh)

Posted by as...@apache.org.
YARN-7653. Node group support for AllocationTagsManager. (Panagiotis Garefalakis via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94429e3c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94429e3c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94429e3c

Branch: refs/heads/YARN-6592
Commit: 94429e3c23501514af5bba268860325370be172d
Parents: 6062844
Author: Arun Suresh <as...@apache.org>
Authored: Fri Dec 22 07:24:37 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Fri Dec 29 17:29:15 2017 -0800

----------------------------------------------------------------------
 .../server/resourcemanager/ResourceManager.java |   2 +-
 .../constraint/AllocationTagsManager.java       | 282 ++++++++++++++-----
 .../rmcontainer/TestRMContainerImpl.java        |   2 +-
 .../constraint/TestAllocationTagsManager.java   | 269 ++++++++++++------
 4 files changed, 392 insertions(+), 163 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94429e3c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index adda465..d71f224 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -493,7 +493,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
   }
 
   protected AllocationTagsManager createAllocationTagsManager() {
-    return new AllocationTagsManager();
+    return new AllocationTagsManager(this.rmContext);
   }
   
   protected DelegationTokenRenewer createDelegationTokenRenewer() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94429e3c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
index c278606..7b0b959 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.log4j.Logger;
 
 import java.util.HashMap;
@@ -38,9 +39,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.function.LongBinaryOperator;
 
 /**
- * Support storing maps between container-tags/applications and
- * nodes. This will be required by affinity/anti-affinity implementation and
- * cardinality.
+ * In-memory mapping between applications/container-tags and nodes/racks.
+ * Required by constrained affinity/anti-affinity and cardinality placement.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
@@ -51,48 +51,54 @@ public class AllocationTagsManager {
 
   private ReentrantReadWriteLock.ReadLock readLock;
   private ReentrantReadWriteLock.WriteLock writeLock;
+  private final RMContext rmContext;
 
-  // Application's tags to node
-  private Map<ApplicationId, NodeToCountedTags> perAppMappings =
+  // Application's tags to Node
+  private Map<ApplicationId, NodeToCountedTags> perAppNodeMappings =
+      new HashMap<>();
+  // Application's tags to Rack
+  private Map<ApplicationId, NodeToCountedTags> perAppRackMappings =
       new HashMap<>();
 
   // Global tags to node mapping (used to fast return aggregated tags
   // cardinality across apps)
-  private NodeToCountedTags globalMapping = new NodeToCountedTags();
+  private NodeToCountedTags<NodeId> globalNodeMapping = new NodeToCountedTags();
+  // Global tags to Rack mapping
+  private NodeToCountedTags<String> globalRackMapping = new NodeToCountedTags();
 
   /**
-   * Store node to counted tags.
+   * Generic store mapping type <T> to counted tags.
+   * Currently used both for NodeId to Tag, Count and Rack to Tag, Count
    */
   @VisibleForTesting
-  static class NodeToCountedTags {
-    // Map<NodeId, Map<Tag, Count>>
-    private Map<NodeId, Map<String, Long>> nodeToTagsWithCount =
-        new HashMap<>();
+  static class NodeToCountedTags<T> {
+    // Map<Type, Map<Tag, Count>>
+    private Map<T, Map<String, Long>> typeToTagsWithCount = new HashMap<>();
 
     // protected by external locks
-    private void addTagsToNode(NodeId nodeId, Set<String> tags) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.computeIfAbsent(nodeId,
-          k -> new HashMap<>());
+    private void addTags(T type, Set<String> tags) {
+      Map<String, Long> innerMap =
+          typeToTagsWithCount.computeIfAbsent(type, k -> new HashMap<>());
 
       for (String tag : tags) {
         Long count = innerMap.get(tag);
         if (count == null) {
           innerMap.put(tag, 1L);
-        } else{
+        } else {
           innerMap.put(tag, count + 1);
         }
       }
     }
 
     // protected by external locks
-    private void addTagToNode(NodeId nodeId, String tag) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.computeIfAbsent(nodeId,
-          k -> new HashMap<>());
+    private void addTag(T type, String tag) {
+      Map<String, Long> innerMap =
+          typeToTagsWithCount.computeIfAbsent(type, k -> new HashMap<>());
 
       Long count = innerMap.get(tag);
       if (count == null) {
         innerMap.put(tag, 1L);
-      } else{
+      } else {
         innerMap.put(tag, count + 1);
       }
     }
@@ -104,17 +110,17 @@ public class AllocationTagsManager {
       } else {
         if (count <= 0) {
           LOG.warn(
-              "Trying to remove tags from node, however the count already"
+              "Trying to remove tags from node/rack, however the count already"
                   + " becomes 0 or less, it could be a potential bug.");
         }
         innerMap.remove(tag);
       }
     }
 
-    private void removeTagsFromNode(NodeId nodeId, Set<String> tags) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+    private void removeTags(T type, Set<String> tags) {
+      Map<String, Long> innerMap = typeToTagsWithCount.get(type);
       if (innerMap == null) {
-        LOG.warn("Failed to find node=" + nodeId
+        LOG.warn("Failed to find node/rack=" + type
             + " while trying to remove tags, please double check.");
         return;
       }
@@ -124,14 +130,14 @@ public class AllocationTagsManager {
       }
 
       if (innerMap.isEmpty()) {
-        nodeToTagsWithCount.remove(nodeId);
+        typeToTagsWithCount.remove(type);
       }
     }
 
-    private void removeTagFromNode(NodeId nodeId, String tag) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+    private void removeTag(T type, String tag) {
+      Map<String, Long> innerMap = typeToTagsWithCount.get(type);
       if (innerMap == null) {
-        LOG.warn("Failed to find node=" + nodeId
+        LOG.warn("Failed to find node/rack=" + type
             + " while trying to remove tags, please double check.");
         return;
       }
@@ -139,12 +145,12 @@ public class AllocationTagsManager {
       removeTagFromInnerMap(innerMap, tag);
 
       if (innerMap.isEmpty()) {
-        nodeToTagsWithCount.remove(nodeId);
+        typeToTagsWithCount.remove(type);
       }
     }
 
-    private long getCardinality(NodeId nodeId, String tag) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+    private long getCardinality(T type, String tag) {
+      Map<String, Long> innerMap = typeToTagsWithCount.get(type);
       if (innerMap == null) {
         return 0;
       }
@@ -152,9 +158,9 @@ public class AllocationTagsManager {
       return value == null ? 0 : value;
     }
 
-    private long getCardinality(NodeId nodeId, Set<String> tags,
+    private long getCardinality(T type, Set<String> tags,
         LongBinaryOperator op) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+      Map<String, Long> innerMap = typeToTagsWithCount.get(type);
       if (innerMap == null) {
         return 0;
       }
@@ -193,29 +199,40 @@ public class AllocationTagsManager {
     }
 
     private boolean isEmpty() {
-      return nodeToTagsWithCount.isEmpty();
+      return typeToTagsWithCount.isEmpty();
     }
 
     @VisibleForTesting
-    public Map<NodeId, Map<String, Long>> getNodeToTagsWithCount() {
-      return nodeToTagsWithCount;
+    public Map<T, Map<String, Long>> getTypeToTagsWithCount() {
+      return typeToTagsWithCount;
     }
   }
 
   @VisibleForTesting
-  Map<ApplicationId, NodeToCountedTags> getPerAppMappings() {
-    return perAppMappings;
+  Map<ApplicationId, NodeToCountedTags> getPerAppNodeMappings() {
+    return perAppNodeMappings;
+  }
+
+  @VisibleForTesting
+  Map<ApplicationId, NodeToCountedTags> getPerAppRackMappings() {
+    return perAppRackMappings;
+  }
+
+  @VisibleForTesting
+  NodeToCountedTags getGlobalNodeMapping() {
+    return globalNodeMapping;
   }
 
   @VisibleForTesting
-  NodeToCountedTags getGlobalMapping() {
-    return globalMapping;
+  NodeToCountedTags getGlobalRackMapping() {
+    return globalRackMapping;
   }
 
-  public AllocationTagsManager() {
+  public AllocationTagsManager(RMContext context) {
     ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
     readLock = lock.readLock();
     writeLock = lock.writeLock();
+    rmContext = context;
   }
 
   /**
@@ -243,21 +260,30 @@ public class AllocationTagsManager {
 
     writeLock.lock();
     try {
-      NodeToCountedTags perAppTagsMapping = perAppMappings.computeIfAbsent(
-          applicationId, k -> new NodeToCountedTags());
-
+      NodeToCountedTags perAppTagsMapping = perAppNodeMappings
+          .computeIfAbsent(applicationId, k -> new NodeToCountedTags());
+      NodeToCountedTags perAppRackTagsMapping = perAppRackMappings
+          .computeIfAbsent(applicationId, k -> new NodeToCountedTags());
+      // Covering test-cases where context is mocked
+      String nodeRack = (rmContext.getRMNodes() != null
+          && rmContext.getRMNodes().get(nodeId) != null)
+              ? rmContext.getRMNodes().get(nodeId).getRackName()
+              : "default-rack";
       if (useSet) {
-        perAppTagsMapping.addTagsToNode(nodeId, allocationTags);
-        globalMapping.addTagsToNode(nodeId, allocationTags);
+        perAppTagsMapping.addTags(nodeId, allocationTags);
+        perAppRackTagsMapping.addTags(nodeRack, allocationTags);
+        globalNodeMapping.addTags(nodeId, allocationTags);
+        globalRackMapping.addTags(nodeRack, allocationTags);
       } else {
-        perAppTagsMapping.addTagToNode(nodeId, applicationIdTag);
-        globalMapping.addTagToNode(nodeId, applicationIdTag);
+        perAppTagsMapping.addTag(nodeId, applicationIdTag);
+        perAppRackTagsMapping.addTag(nodeRack, applicationIdTag);
+        globalNodeMapping.addTag(nodeId, applicationIdTag);
+        globalRackMapping.addTag(nodeRack, applicationIdTag);
       }
 
       if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "Added container=" + containerId + " with tags=[" + StringUtils
-                .join(allocationTags, ",") + "]");
+        LOG.debug("Added container=" + containerId + " with tags=["
+            + StringUtils.join(allocationTags, ",") + "]");
       }
     } finally {
       writeLock.unlock();
@@ -287,27 +313,40 @@ public class AllocationTagsManager {
 
     writeLock.lock();
     try {
-      NodeToCountedTags perAppTagsMapping = perAppMappings.get(applicationId);
+      NodeToCountedTags perAppTagsMapping =
+          perAppNodeMappings.get(applicationId);
+      NodeToCountedTags perAppRackTagsMapping =
+          perAppRackMappings.get(applicationId);
       if (perAppTagsMapping == null) {
         return;
       }
-
+      // Covering test-cases where context is mocked
+      String nodeRack = (rmContext.getRMNodes() != null
+          && rmContext.getRMNodes().get(nodeId) != null)
+              ? rmContext.getRMNodes().get(nodeId).getRackName()
+              : "default-rack";
       if (useSet) {
-        perAppTagsMapping.removeTagsFromNode(nodeId, allocationTags);
-        globalMapping.removeTagsFromNode(nodeId, allocationTags);
+        perAppTagsMapping.removeTags(nodeId, allocationTags);
+        perAppRackTagsMapping.removeTags(nodeRack, allocationTags);
+        globalNodeMapping.removeTags(nodeId, allocationTags);
+        globalRackMapping.removeTags(nodeRack, allocationTags);
       } else {
-        perAppTagsMapping.removeTagFromNode(nodeId, applicationIdTag);
-        globalMapping.removeTagFromNode(nodeId, applicationIdTag);
+        perAppTagsMapping.removeTag(nodeId, applicationIdTag);
+        perAppRackTagsMapping.removeTag(nodeRack, applicationIdTag);
+        globalNodeMapping.removeTag(nodeId, applicationIdTag);
+        globalRackMapping.removeTag(nodeRack, applicationIdTag);
       }
 
       if (perAppTagsMapping.isEmpty()) {
-        perAppMappings.remove(applicationId);
+        perAppNodeMappings.remove(applicationId);
+      }
+      if (perAppRackTagsMapping.isEmpty()) {
+        perAppRackMappings.remove(applicationId);
       }
 
       if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "Removed container=" + containerId + " with tags=[" + StringUtils
-                .join(allocationTags, ",") + "]");
+        LOG.debug("Removed container=" + containerId + " with tags=["
+            + StringUtils.join(allocationTags, ",") + "]");
       }
     } finally {
       writeLock.unlock();
@@ -315,18 +354,16 @@ public class AllocationTagsManager {
   }
 
   /**
-   * Get cardinality for following conditions. External can pass-in a binary op
-   * to implement customized logic.   *
+   * Get Node cardinality for a specific tag.
+   * When applicationId is null, method returns aggregated cardinality
+   *
    * @param nodeId        nodeId, required.
    * @param applicationId applicationId. When null is specified, return
    *                      aggregated cardinality among all nodes.
    * @param tag           allocation tag, see
    *                      {@link SchedulingRequest#getAllocationTags()},
-   *                      When multiple tags specified. Returns cardinality
-   *                      depends on op. If a specified tag doesn't exist,
-   *                      0 will be its cardinality.
-   *                      When null/empty tags specified, all tags
-   *                      (of the node/app) will be considered.
+   *                      If a specified tag doesn't exist,
+   *                      method returns 0.
    * @return cardinality of specified query on the node.
    * @throws InvalidAllocationTagsQueryException when illegal query
    *                                            parameter specified
@@ -338,14 +375,14 @@ public class AllocationTagsManager {
     try {
       if (nodeId == null) {
         throw new InvalidAllocationTagsQueryException(
-            "Must specify nodeId/tags/op to query cardinality");
+            "Must specify nodeId/tag to query cardinality");
       }
 
       NodeToCountedTags mapping;
       if (applicationId != null) {
-        mapping = perAppMappings.get(applicationId);
-      } else{
-        mapping = globalMapping;
+        mapping = perAppNodeMappings.get(applicationId);
+      } else {
+        mapping = globalNodeMapping;
       }
 
       if (mapping == null) {
@@ -359,11 +396,54 @@ public class AllocationTagsManager {
   }
 
   /**
+   * Get Rack cardinality for a specific tag.
+   *
+   * @param rack          rack, required.
+   * @param applicationId applicationId. When null is specified, return
+   *                      aggregated cardinality among all nodes.
+   * @param tag           allocation tag, see
+   *                      {@link SchedulingRequest#getAllocationTags()},
+   *                      If a specified tag doesn't exist,
+   *                      method returns 0.
+   * @return cardinality of specified query on the rack.
+   * @throws InvalidAllocationTagsQueryException when illegal query
+   *                                            parameter specified
+   */
+  public long getRackCardinality(String rack, ApplicationId applicationId,
+      String tag) throws InvalidAllocationTagsQueryException {
+    readLock.lock();
+
+    try {
+      if (rack == null) {
+        throw new InvalidAllocationTagsQueryException(
+            "Must specify rack/tag to query cardinality");
+      }
+
+      NodeToCountedTags mapping;
+      if (applicationId != null) {
+        mapping = perAppRackMappings.get(applicationId);
+      } else {
+        mapping = globalRackMapping;
+      }
+
+      if (mapping == null) {
+        return 0;
+      }
+
+      return mapping.getCardinality(rack, tag);
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+
+
+  /**
    * Check if given tag exists on node.
    *
    * @param nodeId        nodeId, required.
    * @param applicationId applicationId. When null is specified, return
-   *                      aggregated cardinality among all nodes.
+   *                      aggregation among all applications.
    * @param tag           allocation tag, see
    *                      {@link SchedulingRequest#getAllocationTags()},
    *                      When multiple tags specified. Returns cardinality
@@ -387,7 +467,7 @@ public class AllocationTagsManager {
    *
    * @param nodeId        nodeId, required.
    * @param applicationId applicationId. When null is specified, return
-   *                      aggregated cardinality among all nodes.
+   *                      aggregated cardinality among all applications.
    * @param tags          allocation tags, see
    *                      {@link SchedulingRequest#getAllocationTags()},
    *                      When multiple tags specified. Returns cardinality
@@ -396,7 +476,7 @@ public class AllocationTagsManager {
    *                      specified, all tags (of the node/app) will be
    *                      considered.
    * @param op            operator. Such as Long::max, Long::sum, etc. Required.
-   *                      This sparameter only take effect when #values >= 2.
+   *                      This parameter only take effect when #values >= 2.
    * @return cardinality of specified query on the node.
    * @throws InvalidAllocationTagsQueryException when illegal query
    *                                            parameter specified
@@ -414,9 +494,9 @@ public class AllocationTagsManager {
 
       NodeToCountedTags mapping;
       if (applicationId != null) {
-        mapping = perAppMappings.get(applicationId);
-      } else{
-        mapping = globalMapping;
+        mapping = perAppNodeMappings.get(applicationId);
+      } else {
+        mapping = globalNodeMapping;
       }
 
       if (mapping == null) {
@@ -428,4 +508,52 @@ public class AllocationTagsManager {
       readLock.unlock();
     }
   }
+
+  /**
+   * Get cardinality for following conditions. External can pass-in a binary op
+   * to implement customized logic.
+   *
+   * @param rack          rack, required.
+   * @param applicationId applicationId. When null is specified, return
+   *                      aggregated cardinality among all applications.
+   * @param tags          allocation tags, see
+   *                      {@link SchedulingRequest#getAllocationTags()},
+   *                      When multiple tags specified. Returns cardinality
+   *                      depends on op. If a specified tag doesn't exist, 0
+   *                      will be its cardinality. When null/empty tags
+   *                      specified, all tags (of the rack/app) will be
+   *                      considered.
+   * @param op            operator. Such as Long::max, Long::sum, etc. Required.
+   *                      This parameter only take effect when #values >= 2.
+   * @return cardinality of specified query on the rack.
+   * @throws InvalidAllocationTagsQueryException when illegal query
+   *                                            parameter specified
+   */
+  public long getRackCardinalityByOp(String rack, ApplicationId applicationId,
+      Set<String> tags, LongBinaryOperator op)
+      throws InvalidAllocationTagsQueryException {
+    readLock.lock();
+
+    try {
+      if (rack == null || op == null) {
+        throw new InvalidAllocationTagsQueryException(
+            "Must specify rack/tags/op to query cardinality");
+      }
+
+      NodeToCountedTags mapping;
+      if (applicationId != null) {
+        mapping = perAppRackMappings.get(applicationId);
+      } else {
+        mapping = globalRackMapping;
+      }
+
+      if (mapping == null) {
+        return 0;
+      }
+
+      return mapping.getCardinality(rack, tags, op);
+    } finally {
+      readLock.unlock();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94429e3c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
index 538d128..b927870 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
@@ -405,8 +405,8 @@ public class TestRMContainerImpl {
 
     RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
     SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class);
-    AllocationTagsManager tagsManager = new AllocationTagsManager();
     RMContext rmContext = mock(RMContext.class);
+    AllocationTagsManager tagsManager = new AllocationTagsManager(rmContext);
     when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
     when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
     when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94429e3c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
index 4bb2a18..0ce1614 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
@@ -20,202 +20,300 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
 
-import com.google.common.collect.ImmutableSet;
+import java.util.List;
+
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 
+import com.google.common.collect.ImmutableSet;
+
 /**
  * Test functionality of AllocationTagsManager.
  */
 public class TestAllocationTagsManager {
+  private RMContext rmContext;
+
+  @Before
+  public void setup() {
+    MockRM rm = new MockRM();
+    rm.start();
+    MockNodes.resetHostIds();
+    List<RMNode> rmNodes =
+        MockNodes.newNodes(2, 4, Resource.newInstance(4096, 4));
+    for (RMNode rmNode : rmNodes) {
+      rm.getRMContext().getRMNodes().putIfAbsent(rmNode.getNodeID(), rmNode);
+    }
+    rmContext = rm.getRMContext();
+  }
+
+
   @Test
   public void testAllocationTagsManagerSimpleCases()
       throws InvalidAllocationTagsQueryException {
-    AllocationTagsManager atm = new AllocationTagsManager();
+
+    AllocationTagsManager atm = new AllocationTagsManager(rmContext);
 
     /**
      * Construct test case:
-     * Node1:
+     * Node1 (rack0):
      *    container_1_1 (mapper/reducer/app_1)
      *    container_1_3 (service/app_1)
      *
-     * Node2:
+     * Node2 (rack0):
      *    container_1_2 (mapper/reducer/app_1)
      *    container_1_4 (reducer/app_1)
      *    container_2_1 (service/app_2)
      */
 
     // 3 Containers from app1
-    atm.addContainer(NodeId.fromString("node1:1234"),
+    atm.addContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.addContainer(NodeId.fromString("node2:1234"),
+    atm.addContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.addContainer(NodeId.fromString("node1:1234"),
+    atm.addContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
         ImmutableSet.of("service"));
 
-    atm.addContainer(NodeId.fromString("node2:1234"),
+    atm.addContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
         ImmutableSet.of("reducer"));
 
     // 1 Container from app2
-    atm.addContainer(NodeId.fromString("node2:1234"),
+    atm.addContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
         ImmutableSet.of("service"));
 
-    // Get Cardinality of app1 on node1, with tag "mapper"
+    // Get Node Cardinality of app1 on node1, with tag "mapper"
     Assert.assertEquals(1,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node1:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
             TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
             Long::max));
 
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=min
+    // Get Rack Cardinality of app1 on rack0, with tag "mapper"
+    Assert.assertEquals(2, atm.getRackCardinality("rack0",
+        TestUtils.getMockApplicationId(1), "mapper"));
+
+    // Get Node Cardinality of app1 on node2, with tag "mapper/reducer", op=min
     Assert.assertEquals(1,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1),
             ImmutableSet.of("mapper", "reducer"), Long::min));
 
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=max
+    // Get Node Cardinality of app1 on node2, with tag "mapper/reducer", op=max
     Assert.assertEquals(2,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1),
             ImmutableSet.of("mapper", "reducer"), Long::max));
 
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
+    // Get Node Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
     Assert.assertEquals(3,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1),
             ImmutableSet.of("mapper", "reducer"), Long::sum));
 
-    // Get Cardinality by passing single tag.
+    // Get Node Cardinality by passing single tag.
     Assert.assertEquals(1,
-        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinality(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1), "mapper"));
 
     Assert.assertEquals(2,
-        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinality(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1), "reducer"));
 
-    // Get Cardinality of app1 on node2, with tag "no_existed/reducer", op=min
+    // Get Node Cardinality of app1 on node2, with tag "no_existed/reducer",
+    // op=min
     Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1),
             ImmutableSet.of("no_existed", "reducer"), Long::min));
 
-    // Get Cardinality of app1 on node2, with tag "<applicationId>", op=max
+    // Get Node Cardinality of app1 on node2, with tag "<applicationId>", op=max
     // (Expect this returns #containers from app1 on node2)
+    Assert
+        .assertEquals(2,
+            atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
+                TestUtils.getMockApplicationId(1),
+                ImmutableSet.of(AllocationTagsNamespaces.APP_ID
+                    + TestUtils.getMockApplicationId(1).toString()),
+                Long::max));
+
+    // Get Node Cardinality of app1 on node2, with empty tag set, op=max
     Assert.assertEquals(2,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1), ImmutableSet
-                .of(AllocationTagsNamespaces.APP_ID + TestUtils
-                    .getMockApplicationId(1).toString()), Long::max));
-
-    // Get Cardinality of app1 on node2, with empty tag set, op=max
-    Assert.assertEquals(2,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
 
-    // Get Cardinality of all apps on node2, with empty tag set, op=sum
-    Assert.assertEquals(7,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"), null,
-            ImmutableSet.of(), Long::sum));
+    // Get Node Cardinality of all apps on node2, with empty tag set, op=sum
+    Assert.assertEquals(7, atm.getNodeCardinalityByOp(
+        NodeId.fromString("host2:123"), null, ImmutableSet.of(), Long::sum));
 
-    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    // Get Node Cardinality of app_1 on node2, with empty tag set, op=sum
     Assert.assertEquals(5,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::sum));
 
-    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    // Get Node Cardinality of app_1 on node2, with empty tag set, op=sum
     Assert.assertEquals(2,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(2), ImmutableSet.of(), Long::sum));
 
     // Finish all containers:
-    atm.removeContainer(NodeId.fromString("node1:1234"),
+    atm.removeContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.removeContainer(NodeId.fromString("node2:1234"),
+    atm.removeContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.removeContainer(NodeId.fromString("node1:1234"),
+    atm.removeContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
         ImmutableSet.of("service"));
 
-    atm.removeContainer(NodeId.fromString("node2:1234"),
+    atm.removeContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
         ImmutableSet.of("reducer"));
 
-    atm.removeContainer(NodeId.fromString("node2:1234"),
+    atm.removeContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
         ImmutableSet.of("service"));
 
     // Expect all cardinality to be 0
     // Get Cardinality of app1 on node1, with tag "mapper"
     Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node1:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
             TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
             Long::max));
 
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=min
+    // Get Node Cardinality of app1 on node2, with tag "mapper/reducer", op=min
     Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1),
             ImmutableSet.of("mapper", "reducer"), Long::min));
 
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=max
+    // Get Node Cardinality of app1 on node2, with tag "mapper/reducer", op=max
     Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1),
             ImmutableSet.of("mapper", "reducer"), Long::max));
 
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
+    // Get Node Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
     Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1),
             ImmutableSet.of("mapper", "reducer"), Long::sum));
 
-    // Get Cardinality of app1 on node2, with tag "<applicationId>", op=max
+    // Get Node Cardinality of app1 on node2, with tag "<applicationId>", op=max
     // (Expect this returns #containers from app1 on node2)
     Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1),
             ImmutableSet.of(TestUtils.getMockApplicationId(1).toString()),
             Long::max));
 
     Assert.assertEquals(0,
-        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinality(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1),
             TestUtils.getMockApplicationId(1).toString()));
 
-    // Get Cardinality of app1 on node2, with empty tag set, op=max
+    // Get Node Cardinality of app1 on node2, with empty tag set, op=max
     Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
 
-    // Get Cardinality of all apps on node2, with empty tag set, op=sum
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"), null,
-            ImmutableSet.of(), Long::sum));
+    // Get Node Cardinality of all apps on node2, with empty tag set, op=sum
+    Assert.assertEquals(0, atm.getNodeCardinalityByOp(
+        NodeId.fromString("host2:123"), null, ImmutableSet.of(), Long::sum));
 
-    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    // Get Node Cardinality of app_1 on node2, with empty tag set, op=sum
     Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::sum));
 
-    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    // Get Node Cardinality of app_2 on node2, with empty tag set, op=sum
     Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
             TestUtils.getMockApplicationId(2), ImmutableSet.of(), Long::sum));
   }
 
+
+  @Test
+  public void testAllocationTagsManagerRackMapping()
+      throws InvalidAllocationTagsQueryException {
+
+    AllocationTagsManager atm = new AllocationTagsManager(rmContext);
+
+    /**
+     * Construct Rack test case:
+     * Node1 (rack0):
+     *    container_1_1 (mapper/reducer/app_1)
+     *    container_1_4 (reducer/app_2)
+     *
+     * Node2 (rack0):
+     *    container_1_2 (mapper/reducer/app_2)
+     *    container_1_3 (service/app_1)
+     *
+     * Node5 (rack1):
+     *    container_2_1 (service/app_2)
+     */
+
+    // 3 Containers from app1
+    atm.addContainer(NodeId.fromString("host1:123"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("host2:123"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("host1:123"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 4),
+        ImmutableSet.of("reducer"));
+
+    atm.addContainer(NodeId.fromString("host2:123"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    // 1 Container from app2
+    atm.addContainer(NodeId.fromString("host2:123"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // Get Rack Cardinality of app1 on rack0, with tag "mapper"
+    Assert.assertEquals(1, atm.getRackCardinality("rack0",
+        TestUtils.getMockApplicationId(1), "mapper"));
+
+    // Get Rack Cardinality of app2 on rack0, with tag "reducer"
+    Assert.assertEquals(2, atm.getRackCardinality("rack0",
+        TestUtils.getMockApplicationId(2), "reducer"));
+
+    // Get Rack Cardinality of all apps on rack0, with tag "reducer"
+    Assert.assertEquals(3, atm.getRackCardinality("rack0", null, "reducer"));
+
+    // Get Rack Cardinality of app_1 on rack0, with empty tag set, op=max
+    Assert.assertEquals(2, atm.getRackCardinalityByOp("rack0",
+        TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
+
+    // Get Rack Cardinality of app_1 on rack0, with empty tag set, op=min
+    Assert.assertEquals(1, atm.getRackCardinalityByOp("rack0",
+        TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::min));
+
+    // Get Rack Cardinality of all apps on rack0, with empty tag set, op=min
+    Assert.assertEquals(3, atm.getRackCardinalityByOp("rack0", null,
+        ImmutableSet.of(), Long::max));
+  }
+
   @Test
   public void testAllocationTagsManagerMemoryAfterCleanup()
       throws InvalidAllocationTagsQueryException {
@@ -223,54 +321,57 @@ public class TestAllocationTagsManager {
      * Make sure YARN cleans up all memory once container/app finishes.
      */
 
-    AllocationTagsManager atm = new AllocationTagsManager();
+    AllocationTagsManager atm = new AllocationTagsManager(rmContext);
 
     // Add a bunch of containers
-    atm.addContainer(NodeId.fromString("node1:1234"),
+    atm.addContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.addContainer(NodeId.fromString("node2:1234"),
+    atm.addContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.addContainer(NodeId.fromString("node1:1234"),
+    atm.addContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
         ImmutableSet.of("service"));
 
-    atm.addContainer(NodeId.fromString("node2:1234"),
+    atm.addContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
         ImmutableSet.of("reducer"));
 
-    atm.addContainer(NodeId.fromString("node2:1234"),
+    atm.addContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
         ImmutableSet.of("service"));
 
     // Remove all these containers
-    atm.removeContainer(NodeId.fromString("node1:1234"),
+    atm.removeContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.removeContainer(NodeId.fromString("node2:1234"),
+    atm.removeContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.removeContainer(NodeId.fromString("node1:1234"),
+    atm.removeContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
         ImmutableSet.of("service"));
 
-    atm.removeContainer(NodeId.fromString("node2:1234"),
+    atm.removeContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
         ImmutableSet.of("reducer"));
 
-    atm.removeContainer(NodeId.fromString("node2:1234"),
+    atm.removeContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
         ImmutableSet.of("service"));
 
     // Check internal data structure
     Assert.assertEquals(0,
-        atm.getGlobalMapping().getNodeToTagsWithCount().size());
-    Assert.assertEquals(0, atm.getPerAppMappings().size());
+        atm.getGlobalNodeMapping().getTypeToTagsWithCount().size());
+    Assert.assertEquals(0, atm.getPerAppNodeMappings().size());
+    Assert.assertEquals(0,
+        atm.getGlobalRackMapping().getTypeToTagsWithCount().size());
+    Assert.assertEquals(0, atm.getPerAppRackMappings().size());
   }
 
   @Test
@@ -280,26 +381,26 @@ public class TestAllocationTagsManager {
      * Make sure YARN cleans up all memory once container/app finishes.
      */
 
-    AllocationTagsManager atm = new AllocationTagsManager();
+    AllocationTagsManager atm = new AllocationTagsManager(rmContext);
 
     // Add a bunch of containers
-    atm.addContainer(NodeId.fromString("node1:1234"),
+    atm.addContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.addContainer(NodeId.fromString("node2:1234"),
+    atm.addContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
-    atm.addContainer(NodeId.fromString("node1:1234"),
+    atm.addContainer(NodeId.fromString("host1:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
         ImmutableSet.of("service"));
 
-    atm.addContainer(NodeId.fromString("node2:1234"),
+    atm.addContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
         ImmutableSet.of("reducer"));
 
-    atm.addContainer(NodeId.fromString("node2:1234"),
+    atm.addContainer(NodeId.fromString("host2:123"),
         TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
         ImmutableSet.of("service"));
 
@@ -317,7 +418,7 @@ public class TestAllocationTagsManager {
     // No op
     caughtException = false;
     try {
-      atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+      atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
           TestUtils.getMockApplicationId(2), ImmutableSet.of("mapper"), null);
     } catch (InvalidAllocationTagsQueryException e) {
       caughtException = true;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/49] hadoop git commit: YARN-7616. Map YARN application status to Service Status more accurately. (Contributed by Gour Saha)

Posted by as...@apache.org.
YARN-7616. Map YARN application status to Service Status more accurately.  (Contributed by Gour Saha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41b58101
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41b58101
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41b58101

Branch: refs/heads/YARN-6592
Commit: 41b581012a83a17db785343362c718363e13e8f5
Parents: 94a2ac6
Author: Eric Yang <ey...@apache.org>
Authored: Tue Dec 19 19:14:45 2017 -0500
Committer: Eric Yang <ey...@apache.org>
Committed: Tue Dec 19 19:14:45 2017 -0500

----------------------------------------------------------------------
 .../hadoop/yarn/service/ServiceMaster.java      | 37 ++++++++++++++
 .../hadoop/yarn/service/ServiceScheduler.java   |  4 ++
 .../yarn/service/client/ServiceClient.java      | 26 ++++++----
 .../yarn/service/component/Component.java       | 53 ++++++++++++++++++--
 .../component/instance/ComponentInstance.java   |  6 +--
 .../yarn/service/TestYarnNativeServices.java    | 34 +++++++++++--
 6 files changed, 137 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b58101/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
index 1283604..75cc9c5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager;
+import org.apache.hadoop.yarn.service.api.records.ServiceState;
 import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException;
 import org.apache.hadoop.yarn.service.monitor.ServiceMonitor;
 import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
@@ -237,6 +238,7 @@ public class ServiceMaster extends CompositeService {
       SliderFileSystem fs) throws IOException {
     context.service = ServiceApiUtil
         .loadServiceFrom(fs, new Path(serviceDefPath));
+    context.service.setState(ServiceState.ACCEPTED);
     LOG.info(context.service.toString());
   }
 
@@ -257,6 +259,41 @@ public class ServiceMaster extends CompositeService {
     super.serviceStop();
   }
 
+  // This method should be called whenever there is an increment or decrement
+  // of a READY state component of a service
+  public static synchronized void checkAndUpdateServiceState(
+      ServiceScheduler scheduler, boolean isIncrement) {
+    ServiceState curState = scheduler.getApp().getState();
+    if (!isIncrement) {
+      // set it to STARTED every time a component moves out of STABLE state
+      scheduler.getApp().setState(ServiceState.STARTED);
+    } else {
+      // otherwise check the state of all components
+      boolean isStable = true;
+      for (org.apache.hadoop.yarn.service.api.records.Component comp : scheduler
+          .getApp().getComponents()) {
+        if (comp.getState() !=
+            org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE) {
+          isStable = false;
+          break;
+        }
+      }
+      if (isStable) {
+        scheduler.getApp().setState(ServiceState.STABLE);
+      } else {
+        // mark new state as started only if current state is stable, otherwise
+        // leave it as is
+        if (curState == ServiceState.STABLE) {
+          scheduler.getApp().setState(ServiceState.STARTED);
+        }
+      }
+    }
+    if (curState != scheduler.getApp().getState()) {
+      LOG.info("Service state changed from {} -> {}", curState,
+          scheduler.getApp().getState());
+    }
+  }
+
   private void printSystemEnv() {
     for (Map.Entry<String, String> envs : System.getenv().entrySet()) {
       LOG.info("{} = {}", envs.getKey(), envs.getValue());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b58101/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
index 2697050..45cdd28 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.service.api.ServiceApiConstants;
 import org.apache.hadoop.yarn.service.api.records.Service;
+import org.apache.hadoop.yarn.service.api.records.ServiceState;
 import org.apache.hadoop.yarn.service.api.records.ConfigFile;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent;
@@ -284,6 +285,9 @@ public class ServiceScheduler extends CompositeService {
     }
     registerServiceInstance(context.attemptId, app);
 
+    // Since AM has been started and registered, the service is in STARTED state
+    app.setState(ServiceState.STARTED);
+
     // recover components based on containers sent from RM
     recoverComponents(response);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b58101/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 81c56d2..d1ccc4f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -268,7 +268,8 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
       long ret = orig - Long.parseLong(newNumber.substring(1));
       if (ret < 0) {
         LOG.warn(MessageFormat.format(
-            "[COMPONENT {}]: component count goes to negative ({}{} = {}), reset it to 0.",
+            "[COMPONENT {0}]: component count goes to negative ({1}{2} = {3}),"
+                + " ignore and reset it to 0.",
             component.getName(), orig, newNumber, ret));
         ret = 0;
       }
@@ -878,18 +879,23 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
     return newTimeout;
   }
 
-  public ServiceState convertState(FinalApplicationStatus status) {
-    switch (status) {
-    case UNDEFINED:
+  public ServiceState convertState(YarnApplicationState state) {
+    switch (state) {
+    case NEW:
+    case NEW_SAVING:
+    case SUBMITTED:
+    case ACCEPTED:
       return ServiceState.ACCEPTED;
-    case FAILED:
+    case RUNNING:
+      return ServiceState.STARTED;
+    case FINISHED:
     case KILLED:
-      return ServiceState.FAILED;
-    case ENDED:
-    case SUCCEEDED:
       return ServiceState.STOPPED;
+    case FAILED:
+      return ServiceState.FAILED;
+    default:
+      return ServiceState.ACCEPTED;
     }
-    return ServiceState.ACCEPTED;
   }
 
   public String getStatusString(String appId)
@@ -917,7 +923,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
     ApplicationReport appReport = yarnClient.getApplicationReport(currentAppId);
     Service appSpec = new Service();
     appSpec.setName(serviceName);
-    appSpec.setState(convertState(appReport.getFinalApplicationStatus()));
+    appSpec.setState(convertState(appReport.getYarnApplicationState()));
     ApplicationTimeout lifetime =
         appReport.getApplicationTimeouts().get(ApplicationTimeoutType.LIFETIME);
     if (lifetime != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b58101/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
index 9c5cbae..a84c1b1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
@@ -31,7 +31,9 @@ import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceId;
 import org.apache.hadoop.yarn.service.ContainerFailureTracker;
 import org.apache.hadoop.yarn.service.ServiceContext;
 import org.apache.hadoop.yarn.service.ServiceScheduler;
+import org.apache.hadoop.yarn.service.api.records.ServiceState;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent;
+import org.apache.hadoop.yarn.service.ServiceMaster;
 import org.apache.hadoop.yarn.service.ServiceMetrics;
 import org.apache.hadoop.yarn.service.provider.ProviderUtils;
 import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
@@ -209,6 +211,7 @@ public class Component implements EventHandler<ComponentEvent> {
         component.createNumCompInstances(delta);
         component.componentSpec.setState(
             org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING);
+        component.getScheduler().getApp().setState(ServiceState.STARTED);
         return FLEXING;
       } else if (delta < 0){
         delta = 0 - delta;
@@ -229,14 +232,11 @@ public class Component implements EventHandler<ComponentEvent> {
           component.instanceIdCounter.decrementAndGet();
           instance.destroy();
         }
-        component.componentSpec.setState(
-            org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE);
+        checkAndUpdateComponentState(component, false);
         return STABLE;
       } else {
         LOG.info("[FLEX COMPONENT " + component.getName() + "]: already has " +
             event.getDesired() + " instances, ignoring");
-        component.componentSpec.setState(
-            org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE);
         return STABLE;
       }
     }
@@ -289,7 +289,7 @@ public class Component implements EventHandler<ComponentEvent> {
 
   private static ComponentState checkIfStable(Component component) {
     // if desired == running
-    if (component.componentMetrics.containersRunning.value() == component
+    if (component.componentMetrics.containersReady.value() == component
         .getComponentSpec().getNumberOfContainers()) {
       component.componentSpec.setState(
           org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE);
@@ -301,6 +301,46 @@ public class Component implements EventHandler<ComponentEvent> {
     }
   }
 
+  // This method should be called whenever there is an increment or decrement
+  // of a READY state container of a component
+  public static synchronized void checkAndUpdateComponentState(
+      Component component, boolean isIncrement) {
+    org.apache.hadoop.yarn.service.api.records.ComponentState curState =
+        component.componentSpec.getState();
+    if (isIncrement) {
+      // check if all containers are in READY state
+      if (component.componentMetrics.containersReady
+          .value() == component.componentMetrics.containersDesired.value()) {
+        component.componentSpec.setState(
+            org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE);
+        if (curState != component.componentSpec.getState()) {
+          LOG.info("[COMPONENT {}] state changed from {} -> {}",
+              component.componentSpec.getName(), curState,
+              component.componentSpec.getState());
+        }
+        // component state change will trigger re-check of service state
+        ServiceMaster.checkAndUpdateServiceState(component.scheduler,
+            isIncrement);
+      }
+    } else {
+      // container moving out of READY state could be because of FLEX down so
+      // still need to verify the count before changing the component state
+      if (component.componentMetrics.containersReady
+          .value() < component.componentMetrics.containersDesired.value()) {
+        component.componentSpec.setState(
+            org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING);
+        if (curState != component.componentSpec.getState()) {
+          LOG.info("[COMPONENT {}] state changed from {} -> {}",
+              component.componentSpec.getName(), curState,
+              component.componentSpec.getState());
+        }
+        // component state change will trigger re-check of service state
+        ServiceMaster.checkAndUpdateServiceState(component.scheduler,
+            isIncrement);
+      }
+    }
+  }
+
   private static class ContainerCompletedTransition extends BaseTransition {
     @Override
     public void transition(Component component, ComponentEvent event) {
@@ -310,6 +350,7 @@ public class Component implements EventHandler<ComponentEvent> {
               STOP).setStatus(event.getStatus()));
       component.componentSpec.setState(
           org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING);
+      component.getScheduler().getApp().setState(ServiceState.STARTED);
     }
   }
 
@@ -472,11 +513,13 @@ public class Component implements EventHandler<ComponentEvent> {
   public void incContainersReady() {
     componentMetrics.containersReady.incr();
     scheduler.getServiceMetrics().containersReady.incr();
+    checkAndUpdateComponentState(this, true);
   }
 
   public void decContainersReady() {
     componentMetrics.containersReady.decr();
     scheduler.getServiceMetrics().containersReady.decr();
+    checkAndUpdateComponentState(this, false);
   }
 
   public int getNumReadyInstances() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b58101/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
index 31fa5c7..0e3e11b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
@@ -147,7 +147,6 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
               new ContainerStatusRetriever(compInstance.scheduler,
                   event.getContainerId(), compInstance), 0, 1,
               TimeUnit.SECONDS);
-      compInstance.component.incRunningContainers();
       long containerStartTime = System.currentTimeMillis();
       try {
         ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils
@@ -171,6 +170,7 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
       compInstance.containerSpec = container;
       compInstance.getCompSpec().addContainer(container);
       compInstance.containerStartedTime = containerStartTime;
+      compInstance.component.incRunningContainers();
 
       if (compInstance.timelineServiceEnabled) {
         compInstance.serviceTimelinePublisher
@@ -183,8 +183,8 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
     @Override
     public void transition(ComponentInstance compInstance,
         ComponentInstanceEvent event) {
-      compInstance.component.incContainersReady();
       compInstance.containerSpec.setState(ContainerState.READY);
+      compInstance.component.incContainersReady();
       if (compInstance.timelineServiceEnabled) {
         compInstance.serviceTimelinePublisher
             .componentInstanceBecomeReady(compInstance.containerSpec);
@@ -196,8 +196,8 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
     @Override
     public void transition(ComponentInstance compInstance,
         ComponentInstanceEvent event) {
-      compInstance.component.decContainersReady();
       compInstance.containerSpec.setState(ContainerState.RUNNING_BUT_UNREADY);
+      compInstance.component.decContainersReady();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b58101/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
index 1c517d9..debab8b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.yarn.api.records.*;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.service.api.records.Service;
+import org.apache.hadoop.yarn.service.api.records.ServiceState;
 import org.apache.hadoop.yarn.service.api.records.Component;
 import org.apache.hadoop.yarn.service.api.records.Container;
 import org.apache.hadoop.yarn.service.api.records.ContainerState;
@@ -90,25 +91,25 @@ public class TestYarnNativeServices extends ServiceTestUtils {
     // check app.json is persisted.
     Assert.assertTrue(
         getFS().exists(new Path(appDir, exampleApp.getName() + ".json")));
-    waitForAllCompToBeReady(client, exampleApp);
+    waitForServiceToBeStable(client, exampleApp);
 
     // Flex two components, each from 2 container to 3 containers.
     flexComponents(client, exampleApp, 3L);
     // wait for flex to be completed, increase from 2 to 3 containers.
-    waitForAllCompToBeReady(client, exampleApp);
+    waitForServiceToBeStable(client, exampleApp);
     // check all instances name for each component are in sequential order.
     checkCompInstancesInOrder(client, exampleApp);
 
     // flex down to 1
     flexComponents(client, exampleApp, 1L);
-    waitForAllCompToBeReady(client, exampleApp);
+    waitForServiceToBeStable(client, exampleApp);
     checkCompInstancesInOrder(client, exampleApp);
 
     // check component dir and registry are cleaned up.
 
     // flex up again to 2
     flexComponents(client, exampleApp, 2L);
-    waitForAllCompToBeReady(client, exampleApp);
+    waitForServiceToBeStable(client, exampleApp);
     checkCompInstancesInOrder(client, exampleApp);
 
     // stop the service
@@ -145,7 +146,7 @@ public class TestYarnNativeServices extends ServiceTestUtils {
     exampleApp.addComponent(compb);
 
     client.actionCreate(exampleApp);
-    waitForAllCompToBeReady(client, exampleApp);
+    waitForServiceToBeStable(client, exampleApp);
 
     // check that containers for compa are launched before containers for compb
     checkContainerLaunchDependencies(client, exampleApp, "compa", "compb");
@@ -372,6 +373,29 @@ public class TestYarnNativeServices extends ServiceTestUtils {
     return allContainers;
   }
 
+  /**
+   * Wait until service state becomes stable. A service is stable when all
+   * requested containers of all components are running and in ready state.
+   *
+   * @param client
+   * @param exampleApp
+   * @throws TimeoutException
+   * @throws InterruptedException
+   */
+  private void waitForServiceToBeStable(ServiceClient client,
+      Service exampleApp) throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(() -> {
+      try {
+        Service retrievedApp = client.getStatus(exampleApp.getName());
+        System.out.println(retrievedApp);
+        return retrievedApp.getState() == ServiceState.STABLE;
+      } catch (Exception e) {
+        e.printStackTrace();
+        return false;
+      }
+    }, 2000, 200000);
+  }
+
   private ServiceClient createClient() throws Exception {
     ServiceClient client = new ServiceClient() {
       @Override protected Path addJarResource(String appName,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/49] hadoop git commit: YARN-7448. [API] Add SchedulingRequest to the AllocateRequest. (Panagiotis Garefalakis via asuresh)

Posted by as...@apache.org.
YARN-7448. [API] Add SchedulingRequest to the AllocateRequest. (Panagiotis Garefalakis via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1866f28f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1866f28f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1866f28f

Branch: refs/heads/YARN-6592
Commit: 1866f28f3859a761a1699218cc5d759d2b5952b2
Parents: 44f41ae
Author: Arun Suresh <as...@apache.org>
Authored: Fri Nov 17 10:42:43 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Fri Dec 29 17:29:15 2017 -0800

----------------------------------------------------------------------
 .../api/protocolrecords/AllocateRequest.java    | 42 ++++++++++
 .../hadoop/yarn/api/records/ResourceSizing.java | 27 +++++++
 .../yarn/api/records/SchedulingRequest.java     |  1 +
 .../src/main/proto/yarn_service_protos.proto    |  1 +
 .../impl/pb/AllocateRequestPBImpl.java          | 83 ++++++++++++++++++++
 .../records/impl/pb/ResourceSizingPBImpl.java   |  2 +-
 .../impl/pb/SchedulingRequestPBImpl.java        | 16 ++++
 .../hadoop/yarn/api/TestPBImplRecords.java      | 19 +++++
 8 files changed, 190 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1866f28f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
index ae0891e..d8d2347 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.api.protocolrecords;
 
+import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience.Public;
@@ -28,6 +29,7 @@ import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.util.Records;
 
@@ -212,6 +214,32 @@ public abstract class AllocateRequest {
   public abstract void setUpdateRequests(
       List<UpdateContainerRequest> updateRequests);
 
+  /**
+   * Get the list of Scheduling requests being sent by the
+   * <code>ApplicationMaster</code>.
+   * @return list of {@link SchedulingRequest} being sent by the
+   *         <code>ApplicationMaster</code>.
+   */
+  @Public
+  @Unstable
+  public List<SchedulingRequest> getSchedulingRequests() {
+    return Collections.EMPTY_LIST;
+  }
+
+  /**
+   * Set the list of Scheduling requests to inform the
+   * <code>ResourceManager</code> about the application's resource requirements
+   * (potentially including allocation tags & placement constraints).
+   * @param schedulingRequests list of <code>SchedulingRequest</code> to update
+   *          the <code>ResourceManager</code> about the application's resource
+   *          requirements.
+   */
+  @Public
+  @Unstable
+  public void setSchedulingRequests(
+      List<SchedulingRequest> schedulingRequests) {
+  }
+
   @Public
   @Unstable
   public static AllocateRequestBuilder newBuilder() {
@@ -314,6 +342,20 @@ public abstract class AllocateRequest {
     }
 
     /**
+     * Set the <code>schedulingRequests</code> of the request.
+     * @see AllocateRequest#setSchedulingRequests(List)
+     * @param schedulingRequests <code>SchedulingRequest</code> of the request
+     * @return {@link AllocateRequestBuilder}
+     */
+    @Public
+    @Unstable
+    public AllocateRequestBuilder schedulingRequests(
+        List<SchedulingRequest> schedulingRequests) {
+      allocateRequest.setSchedulingRequests(schedulingRequests);
+      return this;
+    }
+
+    /**
      * Return generated {@link AllocateRequest} object.
      * @return {@link AllocateRequest}
      */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1866f28f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java
index d82be11..8cdc63f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java
@@ -61,4 +61,31 @@ public abstract class ResourceSizing {
   @Public
   @Unstable
   public abstract void setResources(Resource resources);
+
+  @Override
+  public int hashCode() {
+    int result = getResources().hashCode();
+    result = 31 * result + getNumAllocations();
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if(obj == null || getClass() != obj.getClass()) {
+      return false;
+    }
+
+    ResourceSizing that = (ResourceSizing) obj;
+
+    if(getNumAllocations() != that.getNumAllocations()) {
+      return  false;
+    }
+    if(!getResources().equals(that.getResources())) {
+      return false;
+    }
+    return true;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1866f28f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
index 47a0697..e32dd24 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java
@@ -49,6 +49,7 @@ public abstract class SchedulingRequest {
     return SchedulingRequest.newBuilder()
         .allocationRequestId(allocationRequestId).priority(priority)
         .executionType(executionType).allocationTags(allocationTags)
+        .resourceSizing(resourceSizing)
         .placementConstraintExpression(placementConstraintExpression).build();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1866f28f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index 68e585d..e49c4e3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -91,6 +91,7 @@ message AllocateRequestProto {
   optional int32 response_id = 4;
   optional float progress = 5;
   repeated UpdateContainerRequestProto update_requests = 7;
+  repeated SchedulingRequestProto scheduling_requests = 10;
 }
 
 message NMTokenProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1866f28f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
index 0f0f571..b460044 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
@@ -29,14 +29,17 @@ import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourceBlacklistRequestPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourceRequestPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.SchedulingRequestPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.UpdateContainerRequestPBImpl;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceBlacklistRequestProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.SchedulingRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.UpdateContainerRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProtoOrBuilder;
@@ -53,6 +56,7 @@ public class AllocateRequestPBImpl extends AllocateRequest {
   private List<ResourceRequest> ask = null;
   private List<ContainerId> release = null;
   private List<UpdateContainerRequest> updateRequests = null;
+  private List<SchedulingRequest> schedulingRequests = null;
   private ResourceBlacklistRequest blacklistRequest = null;
   
   public AllocateRequestPBImpl() {
@@ -101,6 +105,9 @@ public class AllocateRequestPBImpl extends AllocateRequest {
     if (this.updateRequests != null) {
       addUpdateRequestsToProto();
     }
+    if (this.schedulingRequests != null) {
+      addSchedulingRequestsToProto();
+    }
     if (this.blacklistRequest != null) {
       builder.setBlacklistRequest(convertToProtoFormat(this.blacklistRequest));
     }
@@ -178,6 +185,23 @@ public class AllocateRequestPBImpl extends AllocateRequest {
   }
 
   @Override
+  public List<SchedulingRequest> getSchedulingRequests() {
+    initSchedulingRequests();
+    return this.schedulingRequests;
+  }
+
+  @Override
+  public void setSchedulingRequests(
+      List<SchedulingRequest> schedulingRequests) {
+    if (schedulingRequests == null) {
+      return;
+    }
+    initSchedulingRequests();
+    this.schedulingRequests.clear();
+    this.schedulingRequests.addAll(schedulingRequests);
+  }
+
+  @Override
   public ResourceBlacklistRequest getResourceBlacklistRequest() {
     AllocateRequestProtoOrBuilder p = viaProto ? proto : builder;
     if (this.blacklistRequest != null) {
@@ -261,6 +285,20 @@ public class AllocateRequestPBImpl extends AllocateRequest {
     }
   }
 
+  private void initSchedulingRequests() {
+    if (this.schedulingRequests != null) {
+      return;
+    }
+    AllocateRequestProtoOrBuilder p = viaProto ? proto : builder;
+    List<SchedulingRequestProto> list =
+        p.getSchedulingRequestsList();
+    this.schedulingRequests = new ArrayList<>();
+
+    for (SchedulingRequestProto c : list) {
+      this.schedulingRequests.add(convertFromProtoFormat(c));
+    }
+  }
+
   private void addUpdateRequestsToProto() {
     maybeInitBuilder();
     builder.clearUpdateRequests();
@@ -297,6 +335,41 @@ public class AllocateRequestPBImpl extends AllocateRequest {
     builder.addAllUpdateRequests(iterable);
   }
 
+  private void addSchedulingRequestsToProto() {
+    maybeInitBuilder();
+    builder.clearSchedulingRequests();
+    if (schedulingRequests == null) {
+      return;
+    }
+    Iterable<SchedulingRequestProto> iterable =
+        new Iterable<SchedulingRequestProto>() {
+          @Override
+          public Iterator<SchedulingRequestProto> iterator() {
+            return new Iterator<SchedulingRequestProto>() {
+
+              private Iterator<SchedulingRequest> iter =
+                  schedulingRequests.iterator();
+
+              @Override
+              public boolean hasNext() {
+                return iter.hasNext();
+              }
+
+              @Override
+              public SchedulingRequestProto next() {
+                return convertToProtoFormat(iter.next());
+              }
+
+              @Override
+              public void remove() {
+                throw new UnsupportedOperationException();
+              }
+            };
+
+          }
+        };
+    builder.addAllSchedulingRequests(iterable);
+  }
   @Override
   public List<ContainerId> getReleaseList() {
     initReleases();
@@ -377,6 +450,16 @@ public class AllocateRequestPBImpl extends AllocateRequest {
     return ((UpdateContainerRequestPBImpl) t).getProto();
   }
 
+  private SchedulingRequestPBImpl convertFromProtoFormat(
+      SchedulingRequestProto p) {
+    return new SchedulingRequestPBImpl(p);
+  }
+
+  private SchedulingRequestProto convertToProtoFormat(
+      SchedulingRequest t) {
+    return ((SchedulingRequestPBImpl) t).getProto();
+  }
+
   private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
     return new ContainerIdPBImpl(p);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1866f28f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
index 05bb3bd..f98e488 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
@@ -112,6 +112,6 @@ public class ResourceSizingPBImpl extends ResourceSizing {
   }
 
   private ResourceProto convertToProtoFormat(Resource r) {
-    return ((ResourcePBImpl) r).getProto();
+    return ProtoUtils.convertToProtoFormat(r);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1866f28f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
index 7826b36..305856a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
@@ -263,4 +263,20 @@ public class SchedulingRequestPBImpl extends SchedulingRequest {
     this.allocationTags = new HashSet<>();
     this.allocationTags.addAll(p.getAllocationTagsList());
   }
+
+  @Override
+  public int hashCode() {
+    return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null) {
+      return false;
+    }
+    if (other.getClass().isAssignableFrom(this.getClass())) {
+      return this.getProto().equals(this.getClass().cast(other).getProto());
+    }
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1866f28f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
index c5585c2..a0b907d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
@@ -149,8 +149,10 @@ import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
 import org.apache.hadoop.yarn.api.records.ResourceTypeInfo;
 import org.apache.hadoop.yarn.api.records.ResourceUtilization;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.SerializedException;
 import org.apache.hadoop.yarn.api.records.StrictPreemptionContract;
 import org.apache.hadoop.yarn.api.records.Token;
@@ -189,7 +191,9 @@ import org.apache.hadoop.yarn.api.records.impl.pb.ResourceBlacklistRequestPBImpl
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourceOptionPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourceRequestPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ResourceSizingPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourceTypeInfoPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.SchedulingRequestPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.SerializedExceptionPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.StrictPreemptionContractPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
@@ -225,6 +229,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ResourceBlacklistRequestProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceOptionProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceSizingProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.SchedulingRequestProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.SerializedExceptionProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.StrictPreemptionContractProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.URLProto;
@@ -428,6 +434,8 @@ public class TestPBImplRecords extends BasePBImplRecordsTest {
     generateByNewInstance(QueueConfigurations.class);
     generateByNewInstance(CollectorInfo.class);
     generateByNewInstance(ResourceTypeInfo.class);
+    generateByNewInstance(ResourceSizing.class);
+    generateByNewInstance(SchedulingRequest.class);
   }
 
   @Test
@@ -907,6 +915,17 @@ public class TestPBImplRecords extends BasePBImplRecordsTest {
   }
 
   @Test
+  public void testResourceSizingPBImpl() throws Exception {
+    validatePBImplRecord(ResourceSizingPBImpl.class, ResourceSizingProto.class);
+  }
+
+  @Test
+  public void testSchedulingRequestPBImpl() throws Exception {
+    validatePBImplRecord(SchedulingRequestPBImpl.class,
+        SchedulingRequestProto.class);
+  }
+
+  @Test
   public void testSerializedExceptionPBImpl() throws Exception {
     validatePBImplRecord(SerializedExceptionPBImpl.class,
         SerializedExceptionProto.class);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/49] hadoop git commit: YARN-7542. Fix issue that causes some Running Opportunistic Containers to be recovered as PAUSED. (Sampada Dehankar via asuresh)

Posted by as...@apache.org.
YARN-7542. Fix issue that causes some Running Opportunistic Containers to be recovered as PAUSED. (Sampada Dehankar via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a55884c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a55884c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a55884c6

Branch: refs/heads/YARN-6592
Commit: a55884c68eb175f1c9f61771386c086bf1ee65a9
Parents: 5bf7e59
Author: Arun Suresh <as...@apache.org>
Authored: Thu Dec 28 22:20:42 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Thu Dec 28 22:20:42 2017 -0800

----------------------------------------------------------------------
 .../containermanager/launcher/RecoveredContainerLaunch.java        | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a55884c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java
index a3ccf00..17ddd77 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoveredContainerLaunch.java
@@ -72,7 +72,7 @@ public class RecoveredContainerLaunch extends ContainerLaunch {
     String containerIdStr = containerId.toString();
 
     dispatcher.getEventHandler().handle(new ContainerEvent(containerId,
-        ContainerEventType.RECOVER_PAUSED_CONTAINER));
+        ContainerEventType.CONTAINER_LAUNCHED));
 
     boolean notInterrupted = true;
     try {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/49] hadoop git commit: YARN-7613. Implement Basic algorithm for constraint based placement. (Panagiotis Garefalakis via asuresh)

Posted by as...@apache.org.
YARN-7613. Implement Basic algorithm for constraint based placement. (Panagiotis Garefalakis via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c5fa65b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c5fa65b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c5fa65b

Branch: refs/heads/YARN-6592
Commit: 1c5fa65b94556645c0a6b3f431c2e98b811dd3ce
Parents: 47f3f64
Author: Arun Suresh <as...@apache.org>
Authored: Wed Dec 27 22:59:22 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Fri Dec 29 17:29:15 2017 -0800

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |   4 +
 .../src/main/resources/yarn-default.xml         |   8 +-
 .../rmcontainer/RMContainerImpl.java            |  10 +-
 .../constraint/AllocationTagsManager.java       | 121 ++++++++++---
 .../algorithm/DefaultPlacementAlgorithm.java    | 172 +++++++++++++++++++
 .../iterators/PopularTagsIterator.java          |  71 ++++++++
 .../algorithm/iterators/SerialIterator.java     |  53 ++++++
 .../algorithm/iterators/package-info.java       |  29 ++++
 .../constraint/algorithm/package-info.java      |  29 ++++
 .../constraint/processor/BatchedRequests.java   |  45 ++++-
 .../processor/PlacementProcessor.java           |  32 ++--
 .../processor/SamplePlacementAlgorithm.java     | 144 ----------------
 .../constraint/TestAllocationTagsManager.java   | 156 ++++++++++++-----
 .../TestBatchedRequestsIterators.java           |  82 +++++++++
 .../constraint/TestPlacementProcessor.java      |   4 +-
 15 files changed, 721 insertions(+), 239 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c5fa65b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 03c24d4..af83d8d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -534,6 +534,10 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_CLASS =
       RM_PREFIX + "placement-constraints.algorithm.class";
 
+  /** Used for BasicPlacementAlgorithm - default SERIAL. **/
+  public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_ITERATOR =
+      RM_PREFIX + "placement-constraints.algorithm.iterator";
+
   public static final String RM_PLACEMENT_CONSTRAINTS_ENABLED =
       RM_PREFIX + "placement-constraints.enabled";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c5fa65b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 0285069..62bbdb1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -145,7 +145,13 @@
   <property>
     <description>Constraint Placement Algorithm to be used.</description>
     <name>yarn.resourcemanager.placement-constraints.algorithm.class</name>
-    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor.SamplePlacementAlgorithm</value>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm.DefaultPlacementAlgorithm</value>
+  </property>
+
+  <property>
+    <description>Placement Algorithm Requests Iterator to be used.</description>
+    <name>yarn.resourcemanager.placement-constraints.algorithm.iterator</name>
+    <value>SERIAL</value>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c5fa65b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index c873509..2c4ef7b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -579,9 +579,8 @@ public class RMContainerImpl implements RMContainer {
     public void transition(RMContainerImpl container, RMContainerEvent event) {
       // Notify placementManager
       container.rmContext.getAllocationTagsManager().addContainer(
-          container.getNodeId(),
-          container.getApplicationAttemptId().getApplicationId(),
-          container.getContainerId(), container.getAllocationTags());
+          container.getNodeId(), container.getContainerId(),
+          container.getAllocationTags());
 
       container.eventHandler.handle(new RMAppAttemptEvent(
           container.appAttemptId, RMAppAttemptEventType.CONTAINER_ALLOCATED));
@@ -696,9 +695,8 @@ public class RMContainerImpl implements RMContainer {
     public void transition(RMContainerImpl container, RMContainerEvent event) {
       // Notify placementManager
       container.rmContext.getAllocationTagsManager().removeContainer(
-          container.getNodeId(),
-          container.getApplicationAttemptId().getApplicationId(),
-          container.getContainerId(), container.getAllocationTags());
+          container.getNodeId(), container.getContainerId(),
+          container.getAllocationTags());
 
       RMContainerFinishedEvent finishedEvent = (RMContainerFinishedEvent) event;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c5fa65b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
index 7b0b959..4bb3e79 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
@@ -24,6 +24,7 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -54,24 +55,27 @@ public class AllocationTagsManager {
   private final RMContext rmContext;
 
   // Application's tags to Node
-  private Map<ApplicationId, NodeToCountedTags> perAppNodeMappings =
+  private Map<ApplicationId, TypeToCountedTags> perAppNodeMappings =
       new HashMap<>();
   // Application's tags to Rack
-  private Map<ApplicationId, NodeToCountedTags> perAppRackMappings =
+  private Map<ApplicationId, TypeToCountedTags> perAppRackMappings =
       new HashMap<>();
+  // Application's Temporary containers mapping
+  private Map<ApplicationId, Map<NodeId, Map<ContainerId, Set<String>>>>
+      appTempMappings = new HashMap<>();
 
   // Global tags to node mapping (used to fast return aggregated tags
   // cardinality across apps)
-  private NodeToCountedTags<NodeId> globalNodeMapping = new NodeToCountedTags();
+  private TypeToCountedTags<NodeId> globalNodeMapping = new TypeToCountedTags();
   // Global tags to Rack mapping
-  private NodeToCountedTags<String> globalRackMapping = new NodeToCountedTags();
+  private TypeToCountedTags<String> globalRackMapping = new TypeToCountedTags();
 
   /**
    * Generic store mapping type <T> to counted tags.
    * Currently used both for NodeId to Tag, Count and Rack to Tag, Count
    */
   @VisibleForTesting
-  static class NodeToCountedTags<T> {
+  static class TypeToCountedTags<T> {
     // Map<Type, Map<Tag, Count>>
     private Map<T, Map<String, Long>> typeToTagsWithCount = new HashMap<>();
 
@@ -209,25 +213,31 @@ public class AllocationTagsManager {
   }
 
   @VisibleForTesting
-  Map<ApplicationId, NodeToCountedTags> getPerAppNodeMappings() {
+  Map<ApplicationId, TypeToCountedTags> getPerAppNodeMappings() {
     return perAppNodeMappings;
   }
 
   @VisibleForTesting
-  Map<ApplicationId, NodeToCountedTags> getPerAppRackMappings() {
+  Map<ApplicationId, TypeToCountedTags> getPerAppRackMappings() {
     return perAppRackMappings;
   }
 
   @VisibleForTesting
-  NodeToCountedTags getGlobalNodeMapping() {
+  TypeToCountedTags getGlobalNodeMapping() {
     return globalNodeMapping;
   }
 
   @VisibleForTesting
-  NodeToCountedTags getGlobalRackMapping() {
+  TypeToCountedTags getGlobalRackMapping() {
     return globalRackMapping;
   }
 
+  @VisibleForTesting
+  public Map<NodeId, Map<ContainerId, Set<String>>> getAppTempMappings(
+      ApplicationId applicationId) {
+    return appTempMappings.get(applicationId);
+  }
+
   public AllocationTagsManager(RMContext context) {
     ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
     readLock = lock.readLock();
@@ -235,18 +245,52 @@ public class AllocationTagsManager {
     rmContext = context;
   }
 
+  //
+
+  /**
+   * Method adds a temporary fake-container tag to Node mapping.
+   * Used by the constrained placement algorithm to keep track of containers
+   * that are currently placed on nodes but are not yet allocated.
+   * @param nodeId
+   * @param applicationId
+   * @param allocationTags
+   */
+  public void addTempContainer(NodeId nodeId, ApplicationId applicationId,
+      Set<String> allocationTags) {
+    ContainerId tmpContainer = ContainerId.newContainerId(
+        ApplicationAttemptId.newInstance(applicationId, 1), System.nanoTime());
+
+    writeLock.lock();
+    try {
+      Map<NodeId, Map<ContainerId, Set<String>>> appTempMapping =
+          appTempMappings.computeIfAbsent(applicationId, k -> new HashMap<>());
+      Map<ContainerId, Set<String>> containerTempMapping =
+          appTempMapping.computeIfAbsent(nodeId, k -> new HashMap<>());
+      containerTempMapping.put(tmpContainer, allocationTags);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Added TEMP container=" + tmpContainer + " with tags=["
+            + StringUtils.join(allocationTags, ",") + "]");
+      }
+    } finally {
+      writeLock.unlock();
+    }
+
+    addContainer(nodeId, tmpContainer, allocationTags);
+  }
+
   /**
    * Notify container allocated on a node.
    *
    * @param nodeId         allocated node.
-   * @param applicationId  applicationId
    * @param containerId    container id.
    * @param allocationTags allocation tags, see
    *                       {@link SchedulingRequest#getAllocationTags()}
    *                       application_id will be added to allocationTags.
    */
-  public void addContainer(NodeId nodeId, ApplicationId applicationId,
-      ContainerId containerId, Set<String> allocationTags) {
+  public void addContainer(NodeId nodeId, ContainerId containerId,
+      Set<String> allocationTags) {
+    ApplicationId applicationId =
+        containerId.getApplicationAttemptId().getApplicationId();
     String applicationIdTag =
         AllocationTagsNamespaces.APP_ID + applicationId.toString();
 
@@ -260,10 +304,10 @@ public class AllocationTagsManager {
 
     writeLock.lock();
     try {
-      NodeToCountedTags perAppTagsMapping = perAppNodeMappings
-          .computeIfAbsent(applicationId, k -> new NodeToCountedTags());
-      NodeToCountedTags perAppRackTagsMapping = perAppRackMappings
-          .computeIfAbsent(applicationId, k -> new NodeToCountedTags());
+      TypeToCountedTags perAppTagsMapping = perAppNodeMappings
+          .computeIfAbsent(applicationId, k -> new TypeToCountedTags());
+      TypeToCountedTags perAppRackTagsMapping = perAppRackMappings
+          .computeIfAbsent(applicationId, k -> new TypeToCountedTags());
       // Covering test-cases where context is mocked
       String nodeRack = (rmContext.getRMNodes() != null
           && rmContext.getRMNodes().get(nodeId) != null)
@@ -294,12 +338,13 @@ public class AllocationTagsManager {
    * Notify container removed.
    *
    * @param nodeId         nodeId
-   * @param applicationId  applicationId
    * @param containerId    containerId.
    * @param allocationTags allocation tags for given container
    */
-  public void removeContainer(NodeId nodeId, ApplicationId applicationId,
+  public void removeContainer(NodeId nodeId,
       ContainerId containerId, Set<String> allocationTags) {
+    ApplicationId applicationId =
+        containerId.getApplicationAttemptId().getApplicationId();
     String applicationIdTag =
         AllocationTagsNamespaces.APP_ID + applicationId.toString();
     boolean useSet = false;
@@ -313,9 +358,9 @@ public class AllocationTagsManager {
 
     writeLock.lock();
     try {
-      NodeToCountedTags perAppTagsMapping =
+      TypeToCountedTags perAppTagsMapping =
           perAppNodeMappings.get(applicationId);
-      NodeToCountedTags perAppRackTagsMapping =
+      TypeToCountedTags perAppRackTagsMapping =
           perAppRackMappings.get(applicationId);
       if (perAppTagsMapping == null) {
         return;
@@ -354,6 +399,34 @@ public class AllocationTagsManager {
   }
 
   /**
+   * Method removes temporary containers associated with an application
+   * Used by the placement algorithm to clean temporary tags at the end of
+   * a placement cycle.
+   * @param applicationId Application Id.
+   */
+  public void cleanTempContainers(ApplicationId applicationId) {
+
+    if (!appTempMappings.get(applicationId).isEmpty()) {
+      appTempMappings.get(applicationId).entrySet().stream().forEach(nodeE -> {
+        nodeE.getValue().entrySet().stream().forEach(containerE -> {
+          removeContainer(nodeE.getKey(), containerE.getKey(),
+              containerE.getValue());
+        });
+      });
+      writeLock.lock();
+      try {
+        appTempMappings.remove(applicationId);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Removed TEMP containers of app=" + applicationId);
+        }
+      } finally {
+        writeLock.unlock();
+      }
+    }
+  }
+
+
+  /**
    * Get Node cardinality for a specific tag.
    * When applicationId is null, method returns aggregated cardinality
    *
@@ -378,7 +451,7 @@ public class AllocationTagsManager {
             "Must specify nodeId/tag to query cardinality");
       }
 
-      NodeToCountedTags mapping;
+      TypeToCountedTags mapping;
       if (applicationId != null) {
         mapping = perAppNodeMappings.get(applicationId);
       } else {
@@ -419,7 +492,7 @@ public class AllocationTagsManager {
             "Must specify rack/tag to query cardinality");
       }
 
-      NodeToCountedTags mapping;
+      TypeToCountedTags mapping;
       if (applicationId != null) {
         mapping = perAppRackMappings.get(applicationId);
       } else {
@@ -492,7 +565,7 @@ public class AllocationTagsManager {
             "Must specify nodeId/tags/op to query cardinality");
       }
 
-      NodeToCountedTags mapping;
+      TypeToCountedTags mapping;
       if (applicationId != null) {
         mapping = perAppNodeMappings.get(applicationId);
       } else {
@@ -540,7 +613,7 @@ public class AllocationTagsManager {
             "Must specify rack/tags/op to query cardinality");
       }
 
-      NodeToCountedTags mapping;
+      TypeToCountedTags mapping;
       if (applicationId != null) {
         mapping = perAppRackMappings.get(applicationId);
       } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c5fa65b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
new file mode 100644
index 0000000..395c156
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
@@ -0,0 +1,172 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraintTransformations;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.InvalidAllocationTagsQueryException;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithm;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmInput;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmOutput;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmOutputCollector;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.PlacedSchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor.BatchedRequests;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor.NodeCandidateSelector;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Basic placement algorithm.
+ * Supports different Iterators at SchedulingRequest level including:
+ * Serial, PopularTags
+ */
+public class DefaultPlacementAlgorithm implements ConstraintPlacementAlgorithm {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(DefaultPlacementAlgorithm.class);
+
+  private AllocationTagsManager tagsManager;
+  private PlacementConstraintManager constraintManager;
+  private NodeCandidateSelector nodeSelector;
+
+  @Override
+  public void init(RMContext rmContext) {
+    this.tagsManager = rmContext.getAllocationTagsManager();
+    this.constraintManager = rmContext.getPlacementConstraintManager();
+    this.nodeSelector =
+        filter -> ((AbstractYarnScheduler) (rmContext).getScheduler())
+            .getNodes(filter);
+  }
+
+  /**
+   * TODO: Method will be moved to PlacementConstraintsUtil class (YARN-7682)
+   * @param applicationId
+   * @param allocationTags
+   * @param nodeId
+   * @param tagsManager
+   * @return boolean
+   * @throws InvalidAllocationTagsQueryException
+   */
+  public boolean canAssign(ApplicationId applicationId,
+      Set<String> allocationTags, NodeId nodeId,
+      AllocationTagsManager tagsManager)
+      throws InvalidAllocationTagsQueryException {
+    PlacementConstraint constraint =
+        constraintManager.getConstraint(applicationId, allocationTags);
+    if (constraint == null) {
+      return true;
+    }
+    // TODO: proper transformations
+    // Currently works only for simple anti-affinity
+    // NODE scope target expressions
+    PlacementConstraintTransformations.SpecializedConstraintTransformer transformer =
+        new PlacementConstraintTransformations.SpecializedConstraintTransformer(
+            constraint);
+    PlacementConstraint transform = transformer.transform();
+    PlacementConstraint.TargetConstraint targetConstraint =
+        (PlacementConstraint.TargetConstraint) transform.getConstraintExpr();
+    // Assume a single target expression tag;
+    // The Sample Algorithm assumes a constraint will always be a simple
+    // Target Constraint with a single entry in the target set.
+    // As mentioned in the class javadoc - This algorithm should be
+    // used mostly for testing and validating end-2-end workflow.
+    String targetTag = targetConstraint.getTargetExpressions().iterator().next()
+        .getTargetValues().iterator().next();
+    // TODO: Assuming anti-affinity constraint
+    long nodeCardinality =
+        tagsManager.getNodeCardinality(nodeId, applicationId, targetTag);
+    if (nodeCardinality != 0) {
+      return false;
+    }
+    // return true if it is a valid placement
+    return true;
+  }
+
+  public boolean attemptPlacementOnNode(ApplicationId appId,
+      SchedulingRequest schedulingRequest, SchedulerNode schedulerNode)
+      throws InvalidAllocationTagsQueryException {
+    int numAllocs = schedulingRequest.getResourceSizing().getNumAllocations();
+    if (numAllocs > 0) {
+      if (canAssign(appId,
+          schedulingRequest.getAllocationTags(), schedulerNode.getNodeID(),
+          tagsManager)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+
+  @Override
+  public void place(ConstraintPlacementAlgorithmInput input,
+      ConstraintPlacementAlgorithmOutputCollector collector) {
+    BatchedRequests requests = (BatchedRequests) input;
+    ConstraintPlacementAlgorithmOutput resp =
+        new ConstraintPlacementAlgorithmOutput(requests.getApplicationId());
+    List<SchedulerNode> allNodes = nodeSelector.selectNodes(null);
+
+    Iterator<SchedulingRequest> requestIterator = requests.iterator();
+    while (requestIterator.hasNext()) {
+      SchedulingRequest schedulingRequest = requestIterator.next();
+      Iterator<SchedulerNode> nodeIter = allNodes.iterator();
+      int numAllocs = schedulingRequest.getResourceSizing().getNumAllocations();
+      while (nodeIter.hasNext() && numAllocs > 0) {
+        SchedulerNode node = nodeIter.next();
+        try {
+          if (attemptPlacementOnNode(requests.getApplicationId(),
+              schedulingRequest, node)) {
+            schedulingRequest.getResourceSizing()
+                .setNumAllocations(--numAllocs);
+            PlacedSchedulingRequest placedReq =
+                new PlacedSchedulingRequest(schedulingRequest);
+            placedReq.setPlacementAttempt(requests.getPlacementAttempt());
+            placedReq.getNodes().add(node);
+            resp.getPlacedRequests().add(placedReq);
+            numAllocs =
+                schedulingRequest.getResourceSizing().getNumAllocations();
+            // Add temp-container tags for current placement cycle
+            this.tagsManager.addTempContainer(node.getNodeID(),
+                requests.getApplicationId(),
+                schedulingRequest.getAllocationTags());
+          }
+        } catch (InvalidAllocationTagsQueryException e) {
+          LOG.warn("Got exception from TagManager !", e);
+        }
+      }
+    }
+    // Add all requests whose numAllocations still > 0 to rejected list.
+    requests.getSchedulingRequests().stream()
+        .filter(sReq -> sReq.getResourceSizing().getNumAllocations() > 0)
+        .forEach(rejReq -> resp.getRejectedRequests().add(rejReq));
+    collector.collect(resp);
+    // Clean current temp-container tags
+    this.tagsManager.cleanTempContainers(requests.getApplicationId());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c5fa65b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/PopularTagsIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/PopularTagsIterator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/PopularTagsIterator.java
new file mode 100644
index 0000000..ca3e351
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/PopularTagsIterator.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm.iterators;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+
+/**
+ * Traverse Scheduling requests with the most popular tags (count) first.
+ * Currently the count is per Batch but could use TagManager for global count.
+ */
+public class PopularTagsIterator implements Iterator<SchedulingRequest> {
+
+  private final List<SchedulingRequest> schedulingRequestList;
+  private int cursor;
+
+  public PopularTagsIterator(Collection<SchedulingRequest> schedulingRequests) {
+    this.schedulingRequestList = new ArrayList<>(schedulingRequests);
+    // Most popular First
+    Collections.sort(schedulingRequestList,
+        (o1, o2) -> (int) getTagPopularity(o2) - (int) getTagPopularity(o1));
+
+    this.cursor = 0;
+  }
+
+  private long getTagPopularity(SchedulingRequest o1) {
+    long max = 0;
+    for (String tag : o1.getAllocationTags()) {
+      long count = schedulingRequestList.stream()
+          .filter(req -> req.getAllocationTags().contains(tag)).count();
+      if (count > max) {
+        max = count;
+      }
+    }
+    return max;
+  }
+
+  @Override
+  public boolean hasNext() {
+    return (cursor < schedulingRequestList.size());
+  }
+
+  @Override
+  public SchedulingRequest next() {
+    if (hasNext()) {
+      return schedulingRequestList.get(cursor++);
+    }
+    throw new NoSuchElementException();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c5fa65b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/SerialIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/SerialIterator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/SerialIterator.java
new file mode 100644
index 0000000..68733a2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/SerialIterator.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm.iterators;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+
+/**
+ * Traverse Scheduling Requests in the same order as they arrive
+ */
+public class SerialIterator implements Iterator<SchedulingRequest> {
+
+  private final List<SchedulingRequest> schedulingRequestList;
+  private int cursor;
+
+  public SerialIterator(Collection<SchedulingRequest> schedulingRequests) {
+    this.schedulingRequestList = new ArrayList<>(schedulingRequests);
+    this.cursor = 0;
+  }
+
+  @Override
+  public boolean hasNext() {
+    return (cursor < schedulingRequestList.size());
+  }
+
+  @Override
+  public SchedulingRequest next() {
+    if (hasNext()) {
+      return schedulingRequestList.get(cursor++);
+    }
+    throw new NoSuchElementException();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c5fa65b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/package-info.java
new file mode 100644
index 0000000..c84671e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/iterators/package-info.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement
+ * contains classes related to scheduling containers using placement
+ * constraints.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm.iterators;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c5fa65b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/package-info.java
new file mode 100644
index 0000000..bb82077
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/package-info.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement
+ * contains classes related to scheduling containers using placement
+ * constraints.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c5fa65b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
index fe92d2f..8b04860 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
@@ -21,12 +21,15 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm.iterators.PopularTagsIterator;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm.iterators.SerialIterator;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmInput;
 
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.Map;
 import java.util.Set;
 
@@ -35,7 +38,8 @@ import java.util.Set;
  * to place as a batch. The placement algorithm tends to give more optimal
  * placements if more requests are batched together.
  */
-class BatchedRequests implements ConstraintPlacementAlgorithmInput {
+public class BatchedRequests
+    implements ConstraintPlacementAlgorithmInput, Iterable<SchedulingRequest> {
 
   // PlacementAlgorithmOutput attempt - the number of times the requests in this
   // batch has been placed but was rejected by the scheduler.
@@ -44,19 +48,46 @@ class BatchedRequests implements ConstraintPlacementAlgorithmInput {
   private final ApplicationId applicationId;
   private final Collection<SchedulingRequest> requests;
   private final Map<String, Set<NodeId>> blacklist = new HashMap<>();
+  private IteratorType iteratorType;
 
-  BatchedRequests(ApplicationId applicationId,
+  /**
+   * Iterator Type.
+   */
+  public enum IteratorType {
+    SERIAL,
+    POPULAR_TAGS
+  }
+
+  public BatchedRequests(IteratorType type, ApplicationId applicationId,
       Collection<SchedulingRequest> requests, int attempt) {
+    this.iteratorType = type;
     this.applicationId = applicationId;
     this.requests = requests;
     this.placementAttempt = attempt;
   }
 
   /**
+   * Exposes SchedulingRequest Iterator interface which can be used
+   * to traverse requests using different heuristics i.e. Tag Popularity
+   * @return SchedulingRequest Iterator.
+   */
+  @Override
+  public Iterator<SchedulingRequest> iterator() {
+    switch (this.iteratorType) {
+    case SERIAL:
+      return new SerialIterator(requests);
+    case POPULAR_TAGS:
+      return new PopularTagsIterator(requests);
+    default:
+      return null;
+    }
+  }
+
+  /**
    * Get Application Id.
    * @return Application Id.
    */
-  ApplicationId getApplicationId() {
+  public ApplicationId getApplicationId() {
     return applicationId;
   }
 
@@ -73,11 +104,11 @@ class BatchedRequests implements ConstraintPlacementAlgorithmInput {
    * Add a Scheduling request to the batch.
    * @param req Scheduling Request.
    */
-  void addToBatch(SchedulingRequest req) {
+  public void addToBatch(SchedulingRequest req) {
     requests.add(req);
   }
 
-  void addToBlacklist(Set<String> tags, SchedulerNode node) {
+  public void addToBlacklist(Set<String> tags, SchedulerNode node) {
     if (tags != null && !tags.isEmpty()) {
       // We are currently assuming a single allocation tag
       // per scheduler request currently.
@@ -90,7 +121,7 @@ class BatchedRequests implements ConstraintPlacementAlgorithmInput {
    * Get placement attempt.
    * @return PlacementAlgorithmOutput placement Attempt.
    */
-  int getPlacementAttempt() {
+  public int getPlacementAttempt() {
     return placementAttempt;
   }
 
@@ -99,7 +130,7 @@ class BatchedRequests implements ConstraintPlacementAlgorithmInput {
    * @param tag Tag.
    * @return Set of blacklisted Nodes.
    */
-  Set<NodeId> getBlacklist(String tag) {
+  public Set<NodeId> getBlacklist(String tag) {
     return blacklist.getOrDefault(tag, Collections.EMPTY_SET);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c5fa65b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
index d613d4e..8e9c79c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
@@ -35,8 +35,10 @@ import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm.DefaultPlacementAlgorithm;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithm;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.PlacedSchedulingRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.SchedulingResponse;
@@ -98,6 +100,7 @@ public class PlacementProcessor implements ApplicationMasterServiceProcessor {
   private Map<ApplicationId, List<SchedulingRequest>> requestsToReject =
       new ConcurrentHashMap<>();
 
+  private BatchedRequests.IteratorType iteratorType;
   private PlacementDispatcher placementDispatcher;
 
 
@@ -122,9 +125,20 @@ public class PlacementProcessor implements ApplicationMasterServiceProcessor {
     if (instances != null && !instances.isEmpty()) {
       algorithm = instances.get(0);
     } else {
-      algorithm = new SamplePlacementAlgorithm();
+      algorithm = new DefaultPlacementAlgorithm();
+    }
+    LOG.info("Placement Algorithm [{}]", algorithm.getClass().getName());
+
+    String iteratorName = ((RMContextImpl) amsContext).getYarnConfiguration()
+        .get(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_ALGORITHM_ITERATOR,
+            BatchedRequests.IteratorType.SERIAL.name());
+    LOG.info("Placement Algorithm Iterator[{}]", iteratorName);
+    try {
+      iteratorType = BatchedRequests.IteratorType.valueOf(iteratorName);
+    } catch (IllegalArgumentException e) {
+      throw new YarnRuntimeException(
+          "Could not instantiate Placement Algorithm Iterator: ", e);
     }
-    LOG.info("Planning Algorithm [{}]", algorithm.getClass().getName());
 
     int algoPSize = ((RMContextImpl) amsContext).getYarnConfiguration().getInt(
         YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_ALGORITHM_POOL_SIZE,
@@ -188,9 +202,8 @@ public class PlacementProcessor implements ApplicationMasterServiceProcessor {
   private void dispatchRequestsForPlacement(ApplicationAttemptId appAttemptId,
       List<SchedulingRequest> schedulingRequests) {
     if (schedulingRequests != null && !schedulingRequests.isEmpty()) {
-      this.placementDispatcher.dispatch(
-          new BatchedRequests(appAttemptId.getApplicationId(),
-              schedulingRequests, 1));
+      this.placementDispatcher.dispatch(new BatchedRequests(iteratorType,
+          appAttemptId.getApplicationId(), schedulingRequests, 1));
     }
   }
 
@@ -329,11 +342,10 @@ public class PlacementProcessor implements ApplicationMasterServiceProcessor {
       }
     }
     if (!isAdded) {
-      BatchedRequests br =
-          new BatchedRequests(schedulerResponse.getApplicationId(),
-              Collections.singleton(
-                  schedulerResponse.getSchedulingRequest()),
-              placementAttempt + 1);
+      BatchedRequests br = new BatchedRequests(iteratorType,
+          schedulerResponse.getApplicationId(),
+          Collections.singleton(schedulerResponse.getSchedulingRequest()),
+          placementAttempt + 1);
       reqsToRetry.add(br);
       br.addToBlacklist(
           schedulerResponse.getSchedulingRequest().getAllocationTags(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c5fa65b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/SamplePlacementAlgorithm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/SamplePlacementAlgorithm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/SamplePlacementAlgorithm.java
deleted file mode 100644
index 8d49801..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/SamplePlacementAlgorithm.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor;
-
-import org.apache.hadoop.yarn.api.records.SchedulingRequest;
-import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
-import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint;
-import org.apache.hadoop.yarn.api.resource.PlacementConstraintTransformations.SpecializedConstraintTransformer;
-import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.InvalidAllocationTagsQueryException;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithm;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmInput;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmOutput;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmOutputCollector;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.PlacedSchedulingRequest;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Sample Test algorithm. Assumes anti-affinity always
- * It also assumes the numAllocations in resource sizing is always = 1
- *
- * NOTE: This is just a sample implementation. Not be actually used
- */
-public class SamplePlacementAlgorithm implements ConstraintPlacementAlgorithm {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(SamplePlacementAlgorithm.class);
-
-  private AllocationTagsManager tagsManager;
-  private PlacementConstraintManager constraintManager;
-  private NodeCandidateSelector nodeSelector;
-
-  @Override
-  public void init(RMContext rmContext) {
-    this.tagsManager = rmContext.getAllocationTagsManager();
-    this.constraintManager = rmContext.getPlacementConstraintManager();
-    this.nodeSelector =
-        filter -> ((AbstractYarnScheduler)(rmContext)
-            .getScheduler()).getNodes(filter);
-  }
-
-  @Override
-  public void place(ConstraintPlacementAlgorithmInput input,
-      ConstraintPlacementAlgorithmOutputCollector collector) {
-    BatchedRequests requests = (BatchedRequests)input;
-    ConstraintPlacementAlgorithmOutput resp =
-        new ConstraintPlacementAlgorithmOutput(requests.getApplicationId());
-    List<SchedulerNode> allNodes = nodeSelector.selectNodes(null);
-    Map<String, List<SchedulingRequest>> tagIndexedRequests = new HashMap<>();
-    requests.getSchedulingRequests()
-        .stream()
-        .filter(r -> r.getAllocationTags() != null)
-        .forEach(
-            req -> req.getAllocationTags().forEach(
-                tag -> tagIndexedRequests.computeIfAbsent(tag,
-                    k -> new ArrayList<>()).add(req))
-        );
-    for (Map.Entry<String, List<SchedulingRequest>> entry :
-        tagIndexedRequests.entrySet()) {
-      String tag = entry.getKey();
-      PlacementConstraint constraint =
-          constraintManager.getConstraint(requests.getApplicationId(),
-              Collections.singleton(tag));
-      if (constraint != null) {
-        // Currently works only for simple anti-affinity
-        // NODE scope target expressions
-        SpecializedConstraintTransformer transformer =
-            new SpecializedConstraintTransformer(constraint);
-        PlacementConstraint transform = transformer.transform();
-        TargetConstraint targetConstraint =
-            (TargetConstraint) transform.getConstraintExpr();
-        // Assume a single target expression tag;
-        // The Sample Algorithm assumes a constraint will always be a simple
-        // Target Constraint with a single entry in the target set.
-        // As mentioned in the class javadoc - This algorithm should be
-        // used mostly for testing and validating end-2-end workflow.
-        String targetTag =
-            targetConstraint.getTargetExpressions().iterator().next()
-            .getTargetValues().iterator().next();
-        // iterate over all nodes
-        Iterator<SchedulerNode> nodeIter = allNodes.iterator();
-        List<SchedulingRequest> schedulingRequests = entry.getValue();
-        Iterator<SchedulingRequest> reqIter = schedulingRequests.iterator();
-        while (reqIter.hasNext()) {
-          SchedulingRequest sReq = reqIter.next();
-          int numAllocs = sReq.getResourceSizing().getNumAllocations();
-          while (numAllocs > 0 && nodeIter.hasNext()) {
-            SchedulerNode node = nodeIter.next();
-            long nodeCardinality = 0;
-            try {
-              nodeCardinality = tagsManager.getNodeCardinality(
-                  node.getNodeID(), requests.getApplicationId(),
-                  targetTag);
-              if (nodeCardinality == 0 &&
-                  !requests.getBlacklist(tag).contains(node.getNodeID())) {
-                numAllocs--;
-                sReq.getResourceSizing().setNumAllocations(numAllocs);
-                PlacedSchedulingRequest placedReq =
-                    new PlacedSchedulingRequest(sReq);
-                placedReq.setPlacementAttempt(requests.getPlacementAttempt());
-                placedReq.getNodes().add(node);
-                resp.getPlacedRequests().add(placedReq);
-              }
-            } catch (InvalidAllocationTagsQueryException e) {
-              LOG.warn("Got exception from TagManager !", e);
-            }
-          }
-        }
-      }
-    }
-    // Add all requests whose numAllocations still > 0 to rejected list.
-    requests.getSchedulingRequests().stream()
-        .filter(sReq -> sReq.getResourceSizing().getNumAllocations() > 0)
-        .forEach(rejReq -> resp.getRejectedRequests().add(rejReq));
-    collector.collect(resp);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c5fa65b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
index 0ce1614..f1d5663 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
@@ -75,24 +75,24 @@ public class TestAllocationTagsManager {
 
     // 3 Containers from app1
     atm.addContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.addContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        TestUtils.getMockContainerId(1, 3),
         ImmutableSet.of("service"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        TestUtils.getMockContainerId(1, 4),
         ImmutableSet.of("reducer"));
 
     // 1 Container from app2
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        TestUtils.getMockContainerId(2, 3),
         ImmutableSet.of("service"));
 
     // Get Node Cardinality of app1 on node1, with tag "mapper"
@@ -170,24 +170,21 @@ public class TestAllocationTagsManager {
 
     // Finish all containers:
     atm.removeContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.removeContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.removeContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(1, 3), ImmutableSet.of("service"));
 
     atm.removeContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
-        ImmutableSet.of("reducer"));
+        TestUtils.getMockContainerId(1, 4), ImmutableSet.of("reducer"));
 
     atm.removeContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(2, 3), ImmutableSet.of("service"));
 
     // Expect all cardinality to be 0
     // Get Cardinality of app1 on node1, with tag "mapper"
@@ -270,25 +267,22 @@ public class TestAllocationTagsManager {
 
     // 3 Containers from app1
     atm.addContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 2),
+        TestUtils.getMockContainerId(2, 2),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.addContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 4),
-        ImmutableSet.of("reducer"));
+        TestUtils.getMockContainerId(2, 4), ImmutableSet.of("reducer"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(1, 3), ImmutableSet.of("service"));
 
     // 1 Container from app2
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(2, 3), ImmutableSet.of("service"));
 
     // Get Rack Cardinality of app1 on rack0, with tag "mapper"
     Assert.assertEquals(1, atm.getRackCardinality("rack0",
@@ -325,45 +319,39 @@ public class TestAllocationTagsManager {
 
     // Add a bunch of containers
     atm.addContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.addContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(1, 3), ImmutableSet.of("service"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
-        ImmutableSet.of("reducer"));
+        TestUtils.getMockContainerId(1, 4), ImmutableSet.of("reducer"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(2, 3), ImmutableSet.of("service"));
 
     // Remove all these containers
     atm.removeContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.removeContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.removeContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(1, 3), ImmutableSet.of("service"));
 
     atm.removeContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
-        ImmutableSet.of("reducer"));
+        TestUtils.getMockContainerId(1, 4), ImmutableSet.of("reducer"));
 
     atm.removeContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(2, 3), ImmutableSet.of("service"));
 
     // Check internal data structure
     Assert.assertEquals(0,
@@ -375,6 +363,87 @@ public class TestAllocationTagsManager {
   }
 
   @Test
+  public void testTempContainerAllocations()
+      throws InvalidAllocationTagsQueryException {
+    /**
+     * Construct both TEMP and normal containers: Node1: TEMP container_1_1
+     * (mapper/reducer/app_1) container_1_2 (service/app_1)
+     *
+     * Node2: container_1_3 (reducer/app_1) TEMP container_2_1 (service/app_2)
+     */
+
+    AllocationTagsManager atm = new AllocationTagsManager(rmContext);
+
+    // 3 Containers from app1
+    atm.addTempContainer(NodeId.fromString("host1:123"),
+        TestUtils.getMockApplicationId(1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("host1:123"),
+        TestUtils.getMockContainerId(1, 2), ImmutableSet.of("service"));
+
+    atm.addContainer(NodeId.fromString("host2:123"),
+        TestUtils.getMockContainerId(1, 3), ImmutableSet.of("reducer"));
+
+    // 1 Container from app2
+    atm.addTempContainer(NodeId.fromString("host2:123"),
+        TestUtils.getMockApplicationId(2), ImmutableSet.of("service"));
+
+    // Expect tag mappings to be present including temp Tags
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
+            Long::sum));
+
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("service"),
+            Long::sum));
+
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
+            TestUtils.getMockApplicationId(2), ImmutableSet.of("service"),
+            Long::sum));
+
+    // Do a temp Tag cleanup on app2
+    atm.cleanTempContainers(TestUtils.getMockApplicationId(2));
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
+            TestUtils.getMockApplicationId(2), ImmutableSet.of("service"),
+            Long::sum));
+    // Expect app1 to be unaffected
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
+            Long::sum));
+    // Do a cleanup on app1 as well
+    atm.cleanTempContainers(TestUtils.getMockApplicationId(1));
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
+            Long::sum));
+
+    // Non temp-tags should be unaffected
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("service"),
+            Long::sum));
+
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
+            TestUtils.getMockApplicationId(2), ImmutableSet.of("service"),
+            Long::sum));
+
+    // Expect app2 with no containers, and app1 with 2 containers across 2 nodes
+    Assert.assertEquals(2,
+        atm.getPerAppNodeMappings().get(TestUtils.getMockApplicationId(1))
+            .getTypeToTagsWithCount().size());
+
+    Assert.assertNull(
+        atm.getPerAppNodeMappings().get(TestUtils.getMockApplicationId(2)));
+  }
+
+  @Test
   public void testQueryCardinalityWithIllegalParameters()
       throws InvalidAllocationTagsQueryException {
     /**
@@ -385,24 +454,21 @@ public class TestAllocationTagsManager {
 
     // Add a bunch of containers
     atm.addContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        TestUtils.getMockContainerId(1, 1),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        TestUtils.getMockContainerId(1, 2),
         ImmutableSet.of("mapper", "reducer"));
 
     atm.addContainer(NodeId.fromString("host1:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(1, 3), ImmutableSet.of("service"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
-        ImmutableSet.of("reducer"));
+        TestUtils.getMockContainerId(1, 4), ImmutableSet.of("reducer"));
 
     atm.addContainer(NodeId.fromString("host2:123"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
+        TestUtils.getMockContainerId(2, 3), ImmutableSet.of("service"));
 
     // No node-id
     boolean caughtException = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c5fa65b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestBatchedRequestsIterators.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestBatchedRequestsIterators.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestBatchedRequestsIterators.java
new file mode 100644
index 0000000..0e7b715
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestBatchedRequestsIterators.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.TestPlacementProcessor.schedulingRequest;
+
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor.BatchedRequests;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test Request Iterator.
+ */
+public class TestBatchedRequestsIterators {
+
+  @Test
+  public void testSerialIterator() throws Exception {
+    List<SchedulingRequest> schedulingRequestList =
+        Arrays.asList(schedulingRequest(1, 1, 1, 512, "foo"),
+            schedulingRequest(1, 2, 1, 512, "foo"),
+            schedulingRequest(1, 3, 1, 512, "foo"),
+            schedulingRequest(1, 4, 1, 512, "foo"));
+
+    BatchedRequests batchedRequests = new BatchedRequests(
+        BatchedRequests.IteratorType.SERIAL, null, schedulingRequestList, 1);
+
+    Iterator<SchedulingRequest> requestIterator = batchedRequests.iterator();
+    long prevAllocId = 0;
+    while (requestIterator.hasNext()) {
+      SchedulingRequest request = requestIterator.next();
+      Assert.assertTrue(request.getAllocationRequestId() > prevAllocId);
+      prevAllocId = request.getAllocationRequestId();
+    }
+  }
+
+  @Test
+  public void testPopularTagsIterator() throws Exception {
+    List<SchedulingRequest> schedulingRequestList =
+        Arrays.asList(schedulingRequest(1, 1, 1, 512, "pri", "foo"),
+            schedulingRequest(1, 2, 1, 512, "bar"),
+            schedulingRequest(1, 3, 1, 512, "foo", "pri"),
+            schedulingRequest(1, 4, 1, 512, "test"),
+            schedulingRequest(1, 5, 1, 512, "pri", "bar"));
+
+    BatchedRequests batchedRequests =
+        new BatchedRequests(BatchedRequests.IteratorType.POPULAR_TAGS, null,
+            schedulingRequestList, 1);
+
+    Iterator<SchedulingRequest> requestIterator = batchedRequests.iterator();
+    long recCcount = 0;
+    while (requestIterator.hasNext()) {
+      SchedulingRequest request = requestIterator.next();
+      if (recCcount < 3) {
+        Assert.assertTrue(request.getAllocationTags().contains("pri"));
+      } else {
+        Assert.assertTrue(request.getAllocationTags().contains("bar")
+            || request.getAllocationTags().contains("test"));
+      }
+      recCcount++;
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c5fa65b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
index db8ae15..87dd5b7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
@@ -373,13 +373,13 @@ public class TestPlacementProcessor {
         rej.getReason());
   }
 
-  private static SchedulingRequest schedulingRequest(
+  protected static SchedulingRequest schedulingRequest(
       int priority, long allocReqId, int cores, int mem, String... tags) {
     return schedulingRequest(priority, allocReqId, cores, mem,
         ExecutionType.GUARANTEED, tags);
   }
 
-  private static SchedulingRequest schedulingRequest(
+  protected static SchedulingRequest schedulingRequest(
       int priority, long allocReqId, int cores, int mem,
       ExecutionType execType, String... tags) {
     return SchedulingRequest.newBuilder()


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/49] hadoop git commit: YARN-7543. Add check for max cpu limit and missing file for YARN service. (Contributed by Jian He)

Posted by as...@apache.org.
YARN-7543.  Add check for max cpu limit and missing file for YARN service.
            (Contributed by Jian He)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/989c7510
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/989c7510
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/989c7510

Branch: refs/heads/YARN-6592
Commit: 989c75109a619deeaee7461864e7cb3c289c9421
Parents: c0aeb66
Author: Eric Yang <ey...@apache.org>
Authored: Tue Dec 19 16:45:04 2017 -0500
Committer: Eric Yang <ey...@apache.org>
Committed: Tue Dec 19 16:45:04 2017 -0500

----------------------------------------------------------------------
 .../hadoop/yarn/service/utils/ServiceApiUtil.java     | 14 ++++++++++----
 .../hadoop/yarn/service/utils/ServiceUtils.java       |  4 ++++
 2 files changed, 14 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/989c7510/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
index d5ea45c..7f85c95 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
@@ -330,13 +330,19 @@ public class ServiceApiUtil {
       org.apache.hadoop.yarn.api.records.Resource maxResource,
       Service service) throws YarnException {
     for (Component component : service.getComponents()) {
-      // only handle mem now.
       long mem = Long.parseLong(component.getResource().getMemory());
       if (mem > maxResource.getMemorySize()) {
         throw new YarnException(
-            "Component " + component.getName() + " memory size (" + mem
-                + ") is larger than configured max container memory size ("
-                + maxResource.getMemorySize() + ")");
+            "Component " + component.getName() + ": specified memory size ("
+                + mem + ") is larger than configured max container memory " +
+                "size (" + maxResource.getMemorySize() + ")");
+      }
+      int cpu = component.getResource().getCpus();
+      if (cpu > maxResource.getVirtualCores()) {
+        throw new YarnException(
+            "Component " + component.getName() + ": specified number of " +
+                "virtual core (" + cpu + ") is larger than configured max " +
+                "virtual core size (" + maxResource.getVirtualCores() + ")");
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/989c7510/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java
index 173001b..915b836 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java
@@ -411,6 +411,10 @@ public final class ServiceUtils {
         return;
       }
       for (File jarFile : listOfJars) {
+        if (!jarFile.exists()) {
+          log.debug("File does not exist, skipping: " + jarFile);
+          continue;
+        }
         LocalResource res = sliderFileSystem.submitFile(jarFile, tempPath, libDir, jarFile.getName());
         providerResources.put(libDir + "/" + jarFile.getName(), res);
       }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/49] hadoop git commit: HDFS-12915. Fix findbugs warning in INodeFile$HeaderFormat.getBlockLayoutRedundancy. (Contributed by Chris Douglas)

Posted by as...@apache.org.
HDFS-12915. Fix findbugs warning in INodeFile$HeaderFormat.getBlockLayoutRedundancy. (Contributed by Chris Douglas)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e3e1b8c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e3e1b8c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e3e1b8c

Branch: refs/heads/YARN-6592
Commit: 6e3e1b8cde737e4c03b0f5279cab0239e7069a72
Parents: a55884c
Author: Lei Xu <le...@apache.org>
Authored: Fri Dec 29 12:21:57 2017 -0800
Committer: Lei Xu <le...@apache.org>
Committed: Fri Dec 29 12:21:57 2017 -0800

----------------------------------------------------------------------
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 71 +++++++++++++-------
 1 file changed, 46 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e3e1b8c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 3f2fb33..906a940 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -33,10 +33,11 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -45,7 +46,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
-import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
@@ -53,11 +53,11 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.LongBitFormat;
+import org.apache.hadoop.util.StringUtils;
+import static org.apache.hadoop.io.erasurecode.ErasureCodeConstants.REPLICATION_POLICY_ID;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
-import org.apache.hadoop.util.StringUtils;
 
 /** I-node for closed file. */
 @InterfaceAudience.Private
@@ -186,28 +186,49 @@ public class INodeFile extends INodeWithAdditionalFields
      * Construct block layout redundancy based on the given BlockType,
      * replication factor and EC PolicyID.
      */
-    static long getBlockLayoutRedundancy(final BlockType blockType,
-        final Short replication, final Byte erasureCodingPolicyID) {
-      long layoutRedundancy = 0;
-      if (blockType == STRIPED) {
-        Preconditions.checkArgument(replication == null &&
-            erasureCodingPolicyID != null);
-        Preconditions.checkArgument(ErasureCodingPolicyManager.getInstance()
-                .getByID(erasureCodingPolicyID) != null,
-            "Could not find EC policy with ID 0x" + StringUtils
-                .byteToHexString(erasureCodingPolicyID));
+    static long getBlockLayoutRedundancy(BlockType blockType,
+        Short replication, Byte erasureCodingPolicyID) {
+      if (null == erasureCodingPolicyID) {
+        erasureCodingPolicyID = REPLICATION_POLICY_ID;
+      }
+      long layoutRedundancy = 0xFF & erasureCodingPolicyID;
+      switch (blockType) {
+      case STRIPED:
+        if (replication != null) {
+          throw new IllegalArgumentException(
+              "Illegal replication for STRIPED block type");
+        }
+        if (erasureCodingPolicyID == REPLICATION_POLICY_ID) {
+          throw new IllegalArgumentException(
+              "Illegal REPLICATION policy for STRIPED block type");
+        }
+        if (null == ErasureCodingPolicyManager.getInstance()
+            .getByID(erasureCodingPolicyID)) {
+          throw new IllegalArgumentException(String.format(
+                "Could not find EC policy with ID 0x%02x",
+                erasureCodingPolicyID));
+        }
+
+        // valid parameters for STRIPED
         layoutRedundancy |= BLOCK_TYPE_MASK_STRIPED;
-        // Following bitwise OR with signed byte erasureCodingPolicyID is safe
-        // as the PolicyID can never be in negative.
-        layoutRedundancy |= erasureCodingPolicyID;
-      } else {
-        Preconditions.checkArgument(erasureCodingPolicyID == null ||
-                erasureCodingPolicyID ==
-                    ErasureCodeConstants.REPLICATION_POLICY_ID);
-        Preconditions.checkArgument(replication != null && replication >= 0 &&
-            replication <= MAX_REDUNDANCY,
-            "Invalid replication value " + replication);
+        break;
+      case CONTIGUOUS:
+        if (erasureCodingPolicyID != REPLICATION_POLICY_ID) {
+          throw new IllegalArgumentException(String.format(
+              "Illegal EC policy 0x%02x for CONTIGUOUS block type",
+              erasureCodingPolicyID));
+        }
+        if (null == replication ||
+            replication < 0 || replication > MAX_REDUNDANCY) {
+          throw new IllegalArgumentException("Invalid replication value "
+              + replication);
+        }
+
+        // valid parameters for CONTIGUOUS
         layoutRedundancy |= replication;
+        break;
+      default:
+        throw new IllegalArgumentException("Unknown blockType: " + blockType);
       }
       return layoutRedundancy;
     }
@@ -599,7 +620,7 @@ public class INodeFile extends INodeWithAdditionalFields
     if (isStriped()) {
       return HeaderFormat.getECPolicyID(header);
     }
-    return ErasureCodeConstants.REPLICATION_POLICY_ID;
+    return REPLICATION_POLICY_ID;
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/49] hadoop git commit: YARN-7670. Modifications to the ResourceScheduler API to support SchedulingRequests. (asuresh)

Posted by as...@apache.org.
YARN-7670. Modifications to the ResourceScheduler API to support SchedulingRequests. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/defb1387
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/defb1387
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/defb1387

Branch: refs/heads/YARN-6592
Commit: defb13871c47a7f008f864194930c2ef4b4df659
Parents: 1ff35b7
Author: Arun Suresh <as...@apache.org>
Authored: Tue Dec 19 08:59:23 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Fri Dec 29 17:29:15 2017 -0800

----------------------------------------------------------------------
 .../scheduler/AbstractYarnScheduler.java        | 18 +++++
 .../scheduler/ResourceScheduler.java            | 13 ++++
 .../scheduler/capacity/CapacityScheduler.java   | 76 ++++++++++++++++++--
 .../common/ResourceAllocationCommitter.java     | 12 +++-
 .../scheduler/common/fica/FiCaSchedulerApp.java | 19 ++---
 .../TestCapacitySchedulerAsyncScheduling.java   | 10 +--
 6 files changed, 130 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/defb1387/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index cf5e13b..1589d84 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerError;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -296,6 +297,10 @@ public abstract class AbstractYarnScheduler
     return nodeTracker.getNodes(nodeFilter);
   }
 
+  public List<N> getNodes(final NodeFilter filter) {
+    return nodeTracker.getNodes(filter);
+  }
+
   public boolean shouldContainersBeAutoUpdated() {
     return this.autoUpdateContainers;
   }
@@ -1439,4 +1444,17 @@ public abstract class AbstractYarnScheduler
       throw new IOException(e);
     }
   }
+
+  /**
+   * Default implementation. Always returns false.
+   * @param appAttempt ApplicationAttempt.
+   * @param schedulingRequest SchedulingRequest.
+   * @param schedulerNode SchedulerNode.
+   * @return Success or not.
+   */
+  @Override
+  public boolean attemptAllocationOnNode(SchedulerApplicationAttempt appAttempt,
+      SchedulingRequest schedulingRequest, SchedulerNode schedulerNode) {
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/defb1387/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
index d96d625..5a56ac7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable;
 
@@ -58,4 +59,16 @@ public interface ResourceScheduler extends YarnScheduler, Recoverable {
    * @return the number of available {@link NodeId} by resource name.
    */
   List<NodeId> getNodeIds(String resourceName);
+
+  /**
+   * Attempts to allocate a SchedulerRequest on a Node.
+   * NOTE: This ignores the numAllocations in the resource sizing and tries
+   *       to allocate a SINGLE container only.
+   * @param appAttempt ApplicationAttempt.
+   * @param schedulingRequest SchedulingRequest.
+   * @param schedulerNode SchedulerNode.
+   * @return true if proposal was accepted.
+   */
+  boolean attemptAllocationOnNode(SchedulerApplicationAttempt appAttempt,
+      SchedulingRequest schedulingRequest, SchedulerNode schedulerNode);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/defb1387/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 000f59c..84273de 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueACL;
@@ -57,6 +58,7 @@ import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -81,6 +83,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptE
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
@@ -98,7 +101,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication;
 
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesLogger;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
@@ -140,6 +145,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.Candida
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SimpleCandidateNodeSet;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AppPriorityACLsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
+import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.server.utils.Lock;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
@@ -551,7 +558,7 @@ public class CapacityScheduler extends
 
           try {
             cs.writeLock.lock();
-            cs.tryCommit(cs.getClusterResource(), request);
+            cs.tryCommit(cs.getClusterResource(), request, true);
           } finally {
             cs.writeLock.unlock();
           }
@@ -2489,10 +2496,67 @@ public class CapacityScheduler extends
       resourceCommitterService.addNewCommitRequest(request);
     } else{
       // Otherwise do it sync-ly.
-      tryCommit(cluster, request);
+      tryCommit(cluster, request, true);
     }
   }
 
+  @Override
+  public boolean attemptAllocationOnNode(SchedulerApplicationAttempt appAttempt,
+      SchedulingRequest schedulingRequest, SchedulerNode schedulerNode) {
+    if (schedulingRequest.getResourceSizing() != null) {
+      if (schedulingRequest.getResourceSizing().getNumAllocations() > 1) {
+        LOG.warn("The SchedulingRequest has requested more than 1 allocation," +
+            " but only 1 will be attempted !!");
+      }
+      if (!appAttempt.isStopped()) {
+        ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode>
+            resourceCommitRequest = createResourceCommitRequest(
+            appAttempt, schedulingRequest, schedulerNode);
+        return tryCommit(getClusterResource(), resourceCommitRequest, false);
+      }
+    }
+    return false;
+  }
+
+  // This assumes numContainers = 1 for the request.
+  private ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode>
+      createResourceCommitRequest(SchedulerApplicationAttempt appAttempt,
+      SchedulingRequest schedulingRequest, SchedulerNode schedulerNode) {
+    ContainerAllocationProposal<FiCaSchedulerApp, FiCaSchedulerNode> allocated =
+        null;
+    Resource resource = schedulingRequest.getResourceSizing().getResources();
+    if (Resources.greaterThan(calculator, getClusterResource(),
+        resource, Resources.none())) {
+      ContainerId cId =
+          ContainerId.newContainerId(appAttempt.getApplicationAttemptId(),
+              appAttempt.getAppSchedulingInfo().getNewContainerId());
+      Container container = BuilderUtils.newContainer(
+          cId, schedulerNode.getNodeID(), schedulerNode.getHttpAddress(),
+          resource, schedulingRequest.getPriority(), null,
+          ExecutionType.GUARANTEED,
+          schedulingRequest.getAllocationRequestId());
+      RMContainer rmContainer = new RMContainerImpl(container,
+          SchedulerRequestKey.extractFrom(container),
+          appAttempt.getApplicationAttemptId(), container.getNodeId(),
+          appAttempt.getUser(), rmContext, false);
+
+      allocated = new ContainerAllocationProposal<>(
+          getSchedulerContainer(rmContainer, true),
+          null, null, NodeType.NODE_LOCAL, NodeType.NODE_LOCAL,
+          SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY,
+          resource);
+    }
+
+    if (null != allocated) {
+      List<ContainerAllocationProposal<FiCaSchedulerApp, FiCaSchedulerNode>>
+          allocationsList = new ArrayList<>();
+      allocationsList.add(allocated);
+
+      return new ResourceCommitRequest<>(allocationsList, null, null);
+    }
+    return null;
+  }
+
   @VisibleForTesting
   public ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode>
       createResourceCommitRequest(CSAssignment csAssignment) {
@@ -2570,7 +2634,8 @@ public class CapacityScheduler extends
   }
 
   @Override
-  public void tryCommit(Resource cluster, ResourceCommitRequest r) {
+  public boolean tryCommit(Resource cluster, ResourceCommitRequest r,
+      boolean updatePending) {
     ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode> request =
         (ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode>) r;
 
@@ -2600,6 +2665,7 @@ public class CapacityScheduler extends
       LOG.debug("Try to commit allocation proposal=" + request);
     }
 
+    boolean isSuccess = false;
     if (attemptId != null) {
       FiCaSchedulerApp app = getApplicationAttempt(attemptId);
       // Required sanity check for attemptId - when async-scheduling enabled,
@@ -2607,8 +2673,9 @@ public class CapacityScheduler extends
       // and proposal queue was not be consumed in time
       if (app != null && attemptId.equals(app.getApplicationAttemptId())) {
         if (app.accept(cluster, request)) {
-          app.apply(cluster, request);
+          app.apply(cluster, request, updatePending);
           LOG.info("Allocation proposal accepted");
+          isSuccess = true;
         } else{
           LOG.info("Failed to accept allocation proposal");
         }
@@ -2619,6 +2686,7 @@ public class CapacityScheduler extends
         }
       }
     }
+    return isSuccess;
   }
 
   public int getAsyncSchedulingPendingBacklogs() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/defb1387/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/ResourceAllocationCommitter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/ResourceAllocationCommitter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/ResourceAllocationCommitter.java
index bdea97d..2e36b2e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/ResourceAllocationCommitter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/ResourceAllocationCommitter.java
@@ -25,5 +25,15 @@ import org.apache.hadoop.yarn.api.records.Resource;
  * plus global scheduling functionality
  */
 public interface ResourceAllocationCommitter {
-  void tryCommit(Resource cluster, ResourceCommitRequest proposal);
+
+  /**
+   * Try to commit the allocation Proposal. This also gives the option of
+   * not updating a pending queued request.
+   * @param cluster Cluster Resource.
+   * @param proposal Proposal.
+   * @param updatePending Decrement pending if successful.
+   * @return Is successful or not.
+   */
+  boolean tryCommit(Resource cluster, ResourceCommitRequest proposal,
+      boolean updatePending);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/defb1387/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 776a7e9..9fda1f4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -485,8 +485,8 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
     return accepted;
   }
 
-  public void apply(Resource cluster,
-      ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode> request) {
+  public void apply(Resource cluster, ResourceCommitRequest<FiCaSchedulerApp,
+      FiCaSchedulerNode> request, boolean updatePending) {
     boolean reReservation = false;
 
     try {
@@ -531,12 +531,15 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
           liveContainers.put(containerId, rmContainer);
 
           // Deduct pending resource requests
-          ContainerRequest containerRequest = appSchedulingInfo.allocate(
-              allocation.getAllocationLocalityType(),
-              schedulerContainer.getSchedulerNode(),
-              schedulerContainer.getSchedulerRequestKey(),
-              schedulerContainer.getRmContainer().getContainer());
-          ((RMContainerImpl) rmContainer).setContainerRequest(containerRequest);
+          if (updatePending) {
+            ContainerRequest containerRequest = appSchedulingInfo.allocate(
+                allocation.getAllocationLocalityType(),
+                schedulerContainer.getSchedulerNode(),
+                schedulerContainer.getSchedulerRequestKey(),
+                schedulerContainer.getRmContainer().getContainer());
+            ((RMContainerImpl) rmContainer).setContainerRequest(
+                containerRequest);
+          }
 
           attemptResourceUsage.incUsed(schedulerContainer.getNodePartition(),
               allocation.getAllocatedOrReservedResource());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/defb1387/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
index 77596e2..6cb21d4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
@@ -259,7 +259,7 @@ public class TestCapacitySchedulerAsyncScheduling {
     reservedProposals.add(reservedForAttempt1Proposal);
     ResourceCommitRequest request =
         new ResourceCommitRequest(null, reservedProposals, null);
-    scheduler.tryCommit(scheduler.getClusterResource(), request);
+    scheduler.tryCommit(scheduler.getClusterResource(), request, true);
     Assert.assertNull("Outdated proposal should not be accepted!",
         sn2.getReservedContainer());
 
@@ -380,7 +380,7 @@ public class TestCapacitySchedulerAsyncScheduling {
           // call real apply
           try {
             cs.tryCommit((Resource) invocation.getArguments()[0],
-                (ResourceCommitRequest) invocation.getArguments()[1]);
+                (ResourceCommitRequest) invocation.getArguments()[1], true);
           } catch (Exception e) {
             e.printStackTrace();
             Assert.fail();
@@ -388,12 +388,12 @@ public class TestCapacitySchedulerAsyncScheduling {
           isChecked.set(true);
         } else {
           cs.tryCommit((Resource) invocation.getArguments()[0],
-              (ResourceCommitRequest) invocation.getArguments()[1]);
+              (ResourceCommitRequest) invocation.getArguments()[1], true);
         }
         return null;
       }
     }).when(spyCs).tryCommit(Mockito.any(Resource.class),
-        Mockito.any(ResourceCommitRequest.class));
+        Mockito.any(ResourceCommitRequest.class), Mockito.anyBoolean());
 
     spyCs.handle(new NodeUpdateSchedulerEvent(sn1.getRMNode()));
 
@@ -468,7 +468,7 @@ public class TestCapacitySchedulerAsyncScheduling {
       newProposals.add(newContainerProposal);
       ResourceCommitRequest request =
           new ResourceCommitRequest(newProposals, null, null);
-      scheduler.tryCommit(scheduler.getClusterResource(), request);
+      scheduler.tryCommit(scheduler.getClusterResource(), request, true);
     }
     // make sure node resource can't be over-allocated!
     Assert.assertTrue("Node resource is Over-allocated!",


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/49] hadoop git commit: HADOOP-15143. NPE due to Invalid KerberosTicket in UGI. Contributed by Mukul Kumar Singh.

Posted by as...@apache.org.
HADOOP-15143. NPE due to Invalid KerberosTicket in UGI. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d31c9d8c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d31c9d8c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d31c9d8c

Branch: refs/heads/YARN-6592
Commit: d31c9d8c495794a803fb20729b5ed6b374e23eb4
Parents: 52babbb
Author: Jitendra Pandey <ji...@apache.org>
Authored: Wed Dec 27 23:17:07 2017 -0800
Committer: Jitendra Pandey <ji...@apache.org>
Committed: Wed Dec 27 23:17:07 2017 -0800

----------------------------------------------------------------------
 .../hadoop/security/UserGroupInformation.java   |  5 +-
 .../security/TestFixKerberosTicketOrder.java    | 77 ++++++++++++++++++++
 2 files changed, 81 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d31c9d8c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index f7aea31..726e811 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -1253,7 +1253,10 @@ public class UserGroupInformation {
         Object cred = iter.next();
         if (cred instanceof KerberosTicket) {
           KerberosTicket ticket = (KerberosTicket) cred;
-          if (!ticket.getServer().getName().startsWith("krbtgt")) {
+          if (ticket.isDestroyed() || ticket.getServer() == null) {
+            LOG.warn("Ticket is already destroyed, remove it.");
+            iter.remove();
+          } else if (!ticket.getServer().getName().startsWith("krbtgt")) {
             LOG.warn(
                 "The first kerberos ticket is not TGT"
                     + "(the server principal is {}), remove and destroy it.",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d31c9d8c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java
index 4b75a36..cbea393 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java
@@ -155,4 +155,81 @@ public class TestFixKerberosTicketOrder extends KerberosSecurityTestcase {
             .filter(t -> t.getServer().getName().startsWith(server2Protocol))
             .findAny().isPresent());
   }
+
+  @Test
+  public void testWithDestroyedTGT() throws Exception {
+    UserGroupInformation ugi =
+        UserGroupInformation.loginUserFromKeytabAndReturnUGI(clientPrincipal,
+            keytabFile.getCanonicalPath());
+    ugi.doAs(new PrivilegedExceptionAction<Void>() {
+
+      @Override
+      public Void run() throws Exception {
+        SaslClient client = Sasl.createSaslClient(
+            new String[] {AuthMethod.KERBEROS.getMechanismName()},
+            clientPrincipal, server1Protocol, host, props, null);
+        client.evaluateChallenge(new byte[0]);
+        client.dispose();
+        return null;
+      }
+    });
+
+    Subject subject = ugi.getSubject();
+
+    // mark the ticket as destroyed
+    for (KerberosTicket ticket : subject
+        .getPrivateCredentials(KerberosTicket.class)) {
+      if (ticket.getServer().getName().startsWith("krbtgt")) {
+        ticket.destroy();
+        break;
+      }
+    }
+
+    ugi.fixKerberosTicketOrder();
+
+    // verify that after fixing, the tgt ticket should be removed
+    assertFalse("The first ticket is not tgt",
+        subject.getPrivateCredentials().stream()
+            .filter(c -> c instanceof KerberosTicket)
+            .map(c -> ((KerberosTicket) c).getServer().getName()).findFirst()
+            .isPresent());
+
+
+    // should fail as we send a service ticket instead of tgt to KDC.
+    intercept(SaslException.class,
+        () -> ugi.doAs(new PrivilegedExceptionAction<Void>() {
+
+          @Override
+          public Void run() throws Exception {
+            SaslClient client = Sasl.createSaslClient(
+                new String[] {AuthMethod.KERBEROS.getMechanismName()},
+                clientPrincipal, server2Protocol, host, props, null);
+            client.evaluateChallenge(new byte[0]);
+            client.dispose();
+            return null;
+          }
+        }));
+
+    // relogin to get a new ticket
+    ugi.reloginFromKeytab();
+
+    // make sure we can get new service ticket after the relogin.
+    ugi.doAs(new PrivilegedExceptionAction<Void>() {
+
+      @Override
+      public Void run() throws Exception {
+        SaslClient client = Sasl.createSaslClient(
+            new String[] {AuthMethod.KERBEROS.getMechanismName()},
+            clientPrincipal, server2Protocol, host, props, null);
+        client.evaluateChallenge(new byte[0]);
+        client.dispose();
+        return null;
+      }
+    });
+
+    assertTrue("No service ticket for " + server2Protocol + " found",
+        subject.getPrivateCredentials(KerberosTicket.class).stream()
+            .filter(t -> t.getServer().getName().startsWith(server2Protocol))
+            .findAny().isPresent());
+  }
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/49] hadoop git commit: HDFS-12938. TestErasureCodigCLI testAll failing consistently. (Contributed by Ajay Kumar)

Posted by as...@apache.org.
HDFS-12938. TestErasureCodigCLI testAll failing consistently. (Contributed by Ajay Kumar)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b318bed0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b318bed0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b318bed0

Branch: refs/heads/YARN-6592
Commit: b318bed01affa150d70661f263efff9a5c9422f6
Parents: c8ff0cc
Author: Lei Xu <le...@apache.org>
Authored: Thu Dec 21 10:28:24 2017 -0800
Committer: Lei Xu <le...@apache.org>
Committed: Thu Dec 21 10:28:24 2017 -0800

----------------------------------------------------------------------
 .../hadoop-hdfs/src/test/resources/testErasureCodingConf.xml     | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b318bed0/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index bd451eb..fc0c060 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -311,7 +311,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Warning: setting erasure coding policy on an non-empty directory will not automatically convert existing data to RS-6-3-1024</expected-output>
+          <expected-output>Warning: setting erasure coding policy on a non-empty directory will not automatically convert existing files to RS-6-3-1024</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -353,7 +353,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Warning: unsetting erasure coding policy on an non-empty directory will not automatically convert existing data to replicated data</expected-output>
+          <expected-output>Warning: unsetting erasure coding policy on a non-empty directory will not automatically convert existing files to replicated data</expected-output>
         </comparator>
       </comparators>
     </test>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/49] hadoop git commit: YARN-6595. [API] Add Placement Constraints at the application level. (Arun Suresh via kkaranasos)

Posted by as...@apache.org.
YARN-6595. [API] Add Placement Constraints at the application level. (Arun Suresh via kkaranasos)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44f41ae9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44f41ae9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44f41ae9

Branch: refs/heads/YARN-6592
Commit: 44f41ae9b3b2e749a01f90eb6d5f2667c508fb09
Parents: d41eec8
Author: Konstantinos Karanasos <kk...@apache.org>
Authored: Mon Nov 13 15:25:24 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Fri Dec 29 17:29:15 2017 -0800

----------------------------------------------------------------------
 .../RegisterApplicationMasterRequest.java       |  42 ++++-
 .../yarn/api/resource/PlacementConstraint.java  | 156 +++++++++++++++++++
 .../src/main/proto/yarn_protos.proto            |   6 +
 .../src/main/proto/yarn_service_protos.proto    |   1 +
 .../RegisterApplicationMasterRequestPBImpl.java | 106 ++++++++++++-
 .../hadoop/yarn/api/BasePBImplRecordsTest.java  |  11 ++
 6 files changed, 313 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44f41ae9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java
index 395e190..f2d537a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java
@@ -18,11 +18,16 @@
 
 package org.apache.hadoop.yarn.api.protocolrecords;
 
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.util.Records;
-
 /**
  * The request sent by the {@code ApplicationMaster} to {@code ResourceManager}
  * on registration.
@@ -132,4 +137,39 @@ public abstract class RegisterApplicationMasterRequest {
   @Public
   @Stable
   public abstract void setTrackingUrl(String trackingUrl);
+
+  /**
+   * Return all Placement Constraints specified at the Application level. The
+   * mapping is from a set of allocation tags to a
+   * <code>PlacementConstraint</code> associated with the tags, i.e., each
+   * {@link org.apache.hadoop.yarn.api.records.SchedulingRequest} that has those
+   * tags will be placed taking into account the corresponding constraint.
+   *
+   * @return A map of Placement Constraints.
+   */
+  @Public
+  @Unstable
+  public Map<Set<String>, PlacementConstraint> getPlacementConstraints() {
+    return new HashMap<>();
+  }
+
+  /**
+   * Set Placement Constraints applicable to the
+   * {@link org.apache.hadoop.yarn.api.records.SchedulingRequest}s
+   * of this application.
+   * The mapping is from a set of allocation tags to a
+   * <code>PlacementConstraint</code> associated with the tags.
+   * For example:
+   *  Map &lt;
+   *   &lt;hb_regionserver&gt; -&gt; node_anti_affinity,
+   *   &lt;hb_regionserver, hb_master&gt; -&gt; rack_affinity,
+   *   ...
+   *  &gt;
+   * @param placementConstraints Placement Constraint Mapping.
+   */
+  @Public
+  @Unstable
+  public void setPlacementConstraints(
+      Map<Set<String>, PlacementConstraint> placementConstraints) {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44f41ae9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
index f0e3982..b6e851a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
@@ -54,6 +54,26 @@ public class PlacementConstraint {
     return constraintExpr;
   }
 
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (!(o instanceof PlacementConstraint)) {
+      return false;
+    }
+
+    PlacementConstraint that = (PlacementConstraint) o;
+
+    return getConstraintExpr() != null ? getConstraintExpr().equals(that
+        .getConstraintExpr()) : that.getConstraintExpr() == null;
+  }
+
+  @Override
+  public int hashCode() {
+    return getConstraintExpr() != null ? getConstraintExpr().hashCode() : 0;
+  }
+
   /**
    * Interface used to enable the elements of the constraint tree to be visited.
    */
@@ -174,6 +194,38 @@ public class PlacementConstraint {
     }
 
     @Override
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (!(o instanceof SingleConstraint)) {
+        return false;
+      }
+
+      SingleConstraint that = (SingleConstraint) o;
+
+      if (getMinCardinality() != that.getMinCardinality()) {
+        return false;
+      }
+      if (getMaxCardinality() != that.getMaxCardinality()) {
+        return false;
+      }
+      if (!getScope().equals(that.getScope())) {
+        return false;
+      }
+      return getTargetExpressions().equals(that.getTargetExpressions());
+    }
+
+    @Override
+    public int hashCode() {
+      int result = getScope().hashCode();
+      result = 31 * result + getMinCardinality();
+      result = 31 * result + getMaxCardinality();
+      result = 31 * result + getTargetExpressions().hashCode();
+      return result;
+    }
+
+    @Override
     public <T> T accept(Visitor<T> visitor) {
       return visitor.visit(this);
     }
@@ -332,6 +384,34 @@ public class PlacementConstraint {
     }
 
     @Override
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (!(o instanceof TargetConstraint)) {
+        return false;
+      }
+
+      TargetConstraint that = (TargetConstraint) o;
+
+      if (getOp() != that.getOp()) {
+        return false;
+      }
+      if (!getScope().equals(that.getScope())) {
+        return false;
+      }
+      return getTargetExpressions().equals(that.getTargetExpressions());
+    }
+
+    @Override
+    public int hashCode() {
+      int result = getOp().hashCode();
+      result = 31 * result + getScope().hashCode();
+      result = 31 * result + getTargetExpressions().hashCode();
+      return result;
+    }
+
+    @Override
     public <T> T accept(Visitor<T> visitor) {
       return visitor.visit(this);
     }
@@ -388,6 +468,34 @@ public class PlacementConstraint {
     public <T> T accept(Visitor<T> visitor) {
       return visitor.visit(this);
     }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (o == null || getClass() != o.getClass()) {
+        return false;
+      }
+
+      CardinalityConstraint that = (CardinalityConstraint) o;
+
+      if (minCardinality != that.minCardinality) {
+        return false;
+      }
+      if (maxCardinality != that.maxCardinality) {
+        return false;
+      }
+      return scope != null ? scope.equals(that.scope) : that.scope == null;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = scope != null ? scope.hashCode() : 0;
+      result = 31 * result + minCardinality;
+      result = 31 * result + maxCardinality;
+      return result;
+    }
   }
 
   /**
@@ -406,6 +514,25 @@ public class PlacementConstraint {
      * @return the children of the composite constraint
      */
     public abstract List<R> getChildren();
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (o == null || getClass() != o.getClass()) {
+        return false;
+      }
+
+      return getChildren() != null ? getChildren().equals(
+          ((CompositeConstraint)o).getChildren()) :
+          ((CompositeConstraint)o).getChildren() == null;
+    }
+
+    @Override
+    public int hashCode() {
+      return getChildren() != null ? getChildren().hashCode() : 0;
+    }
   }
 
   /**
@@ -563,5 +690,34 @@ public class PlacementConstraint {
     public <T> T accept(Visitor<T> visitor) {
       return visitor.visit(this);
     }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (o == null || getClass() != o.getClass()) {
+        return false;
+      }
+
+      TimedPlacementConstraint that = (TimedPlacementConstraint) o;
+
+      if (schedulingDelay != that.schedulingDelay) {
+        return false;
+      }
+      if (constraint != null ? !constraint.equals(that.constraint) :
+          that.constraint != null) {
+        return false;
+      }
+      return delayUnit == that.delayUnit;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = constraint != null ? constraint.hashCode() : 0;
+      result = 31 * result + (int) (schedulingDelay ^ (schedulingDelay >>> 32));
+      result = 31 * result + (delayUnit != null ? delayUnit.hashCode() : 0);
+      return result;
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44f41ae9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 2dbdefb..ac43d1b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -648,6 +648,12 @@ message CompositePlacementConstraintProto {
   repeated TimedPlacementConstraintProto timedChildConstraints = 3;
 }
 
+// This associates a set of allocation tags to a Placement Constraint.
+message PlacementConstraintMapEntryProto {
+  repeated string allocation_tags = 1;
+  optional PlacementConstraintProto placement_constraint = 2;
+}
+
 ////////////////////////////////////////////////////////////////////////
 ////// From reservation_protocol /////////////////////////////////////
 ////////////////////////////////////////////////////////////////////////

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44f41ae9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index 4e97c74..68e585d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -38,6 +38,7 @@ message RegisterApplicationMasterRequestProto {
   optional string host = 1;
   optional int32 rpc_port = 2;
   optional string tracking_url = 3;
+  repeated PlacementConstraintMapEntryProto placement_constraints = 4;
 }
 
 message RegisterApplicationMasterResponseProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44f41ae9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java
index 037dfd9..64bee85 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java
@@ -21,24 +21,41 @@ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.pb.PlacementConstraintFromProtoConverter;
+import org.apache.hadoop.yarn.api.pb.PlacementConstraintToProtoConverter;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.proto.YarnProtos;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProtoOrBuilder;
 
 import com.google.protobuf.TextFormat;
 
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 @Private
 @Unstable
-public class RegisterApplicationMasterRequestPBImpl extends RegisterApplicationMasterRequest {
-  RegisterApplicationMasterRequestProto proto = RegisterApplicationMasterRequestProto.getDefaultInstance();
-  RegisterApplicationMasterRequestProto.Builder builder = null;
+public class RegisterApplicationMasterRequestPBImpl
+    extends RegisterApplicationMasterRequest {
+  private RegisterApplicationMasterRequestProto proto =
+      RegisterApplicationMasterRequestProto.getDefaultInstance();
+  private RegisterApplicationMasterRequestProto.Builder builder = null;
+  private Map<Set<String>, PlacementConstraint> placementConstraints = null;
   boolean viaProto = false;
   
   public RegisterApplicationMasterRequestPBImpl() {
     builder = RegisterApplicationMasterRequestProto.newBuilder();
   }
 
-  public RegisterApplicationMasterRequestPBImpl(RegisterApplicationMasterRequestProto proto) {
+  public RegisterApplicationMasterRequestPBImpl(
+      RegisterApplicationMasterRequestProto proto) {
     this.proto = proto;
     viaProto = true;
   }
@@ -71,6 +88,30 @@ public class RegisterApplicationMasterRequestPBImpl extends RegisterApplicationM
   }
 
   private void mergeLocalToBuilder() {
+    if (this.placementConstraints != null) {
+      addPlacementConstraintMap();
+    }
+  }
+
+  private void addPlacementConstraintMap() {
+    maybeInitBuilder();
+    builder.clearPlacementConstraints();
+    if (this.placementConstraints == null) {
+      return;
+    }
+    List<YarnProtos.PlacementConstraintMapEntryProto> protoList =
+        new ArrayList<>();
+    for (Map.Entry<Set<String>, PlacementConstraint> entry :
+        this.placementConstraints.entrySet()) {
+      protoList.add(
+          YarnProtos.PlacementConstraintMapEntryProto.newBuilder()
+              .addAllAllocationTags(entry.getKey())
+              .setPlacementConstraint(
+                  new PlacementConstraintToProtoConverter(
+                      entry.getValue()).convert())
+              .build());
+    }
+    builder.addAllPlacementConstraints(protoList);
   }
 
   private void mergeLocalToProto() {
@@ -90,7 +131,8 @@ public class RegisterApplicationMasterRequestPBImpl extends RegisterApplicationM
 
   @Override
   public String getHost() {
-    RegisterApplicationMasterRequestProtoOrBuilder p = viaProto ? proto : builder;
+    RegisterApplicationMasterRequestProtoOrBuilder p =
+        viaProto ? proto : builder;
     return p.getHost();
   }
 
@@ -106,7 +148,8 @@ public class RegisterApplicationMasterRequestPBImpl extends RegisterApplicationM
 
   @Override
   public int getRpcPort() {
-    RegisterApplicationMasterRequestProtoOrBuilder p = viaProto ? proto : builder;
+    RegisterApplicationMasterRequestProtoOrBuilder p =
+        viaProto ? proto : builder;
     return p.getRpcPort();
   }
 
@@ -118,7 +161,8 @@ public class RegisterApplicationMasterRequestPBImpl extends RegisterApplicationM
 
   @Override
   public String getTrackingUrl() {
-    RegisterApplicationMasterRequestProtoOrBuilder p = viaProto ? proto : builder;
+    RegisterApplicationMasterRequestProtoOrBuilder p =
+        viaProto ? proto : builder;
     return p.getTrackingUrl();
   }
 
@@ -131,4 +175,50 @@ public class RegisterApplicationMasterRequestPBImpl extends RegisterApplicationM
     }
     builder.setTrackingUrl(url);
   }
-}  
+
+  private void initPlacementConstraintMap() {
+    if (this.placementConstraints != null) {
+      return;
+    }
+    RegisterApplicationMasterRequestProtoOrBuilder p =
+        viaProto ? proto : builder;
+    List<YarnProtos.PlacementConstraintMapEntryProto> pcmList =
+        p.getPlacementConstraintsList();
+    this.placementConstraints = new HashMap<>();
+    for (YarnProtos.PlacementConstraintMapEntryProto e : pcmList) {
+      this.placementConstraints.put(
+          new HashSet<>(e.getAllocationTagsList()),
+          new PlacementConstraintFromProtoConverter(
+              e.getPlacementConstraint()).convert());
+    }
+  }
+
+  @Override
+  public Map<Set<String>, PlacementConstraint> getPlacementConstraints() {
+    initPlacementConstraintMap();
+    return this.placementConstraints;
+  }
+
+  @Override
+  public void setPlacementConstraints(
+      Map<Set<String>, PlacementConstraint> constraints) {
+    maybeInitBuilder();
+    if (constraints == null) {
+      builder.clearPlacementConstraints();
+    } else {
+      removeEmptyKeys(constraints);
+    }
+    this.placementConstraints = constraints;
+  }
+
+  private void removeEmptyKeys(
+      Map<Set<String>, PlacementConstraint> constraintMap) {
+    Iterator<Set<String>> iter = constraintMap.keySet().iterator();
+    while (iter.hasNext()) {
+      Set<String> aTags = iter.next();
+      if (aTags.size() == 0) {
+        iter.remove();
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44f41ae9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java
index 8694651..ebd66af 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java
@@ -22,12 +22,19 @@ import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
 import org.junit.Assert;
 
 import java.lang.reflect.*;
 import java.nio.ByteBuffer;
 import java.util.*;
 
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints
+    .PlacementTargets.allocationTag;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetIn;
+
 /**
  * Generic helper class to validate protocol records.
  */
@@ -85,6 +92,10 @@ public class BasePBImplRecordsTest {
         ByteBuffer buff = ByteBuffer.allocate(4);
         rand.nextBytes(buff.array());
         return buff;
+      } else if (type.equals(PlacementConstraint.class)) {
+        PlacementConstraint.AbstractConstraint sConstraintExpr =
+            targetIn(NODE, allocationTag("foo"));
+        ret = PlacementConstraints.build(sConstraintExpr);
       }
     } else if (type instanceof ParameterizedType) {
       ParameterizedType pt = (ParameterizedType)type;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/49] hadoop git commit: YARN-7612. Add Processor Framework for Rich Placement Constraints. (asuresh)

Posted by as...@apache.org.
YARN-7612. Add Processor Framework for Rich Placement Constraints. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47f3f64e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47f3f64e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47f3f64e

Branch: refs/heads/YARN-6592
Commit: 47f3f64e413cc306a2e0715c25dcba09a93542ff
Parents: 058513d
Author: Arun Suresh <as...@apache.org>
Authored: Fri Dec 22 15:51:20 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Fri Dec 29 17:29:15 2017 -0800

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |  26 ++
 .../src/main/resources/yarn-default.xml         |  30 ++
 .../ApplicationMasterService.java               |  15 +
 .../rmcontainer/RMContainerImpl.java            |   7 +-
 .../scheduler/capacity/CapacityScheduler.java   |   2 +
 .../constraint/processor/BatchedRequests.java   | 105 +++++
 .../processor/NodeCandidateSelector.java        |  38 ++
 .../processor/PlacementDispatcher.java          | 145 +++++++
 .../processor/PlacementProcessor.java           | 343 ++++++++++++++++
 .../processor/SamplePlacementAlgorithm.java     | 144 +++++++
 .../constraint/processor/package-info.java      |  29 ++
 .../yarn/server/resourcemanager/MockAM.java     |  26 ++
 .../yarn/server/resourcemanager/MockRM.java     |  14 +
 .../constraint/TestPlacementProcessor.java      | 394 +++++++++++++++++++
 14 files changed, 1316 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f3f64e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 1b6bd0e..03c24d4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -529,6 +529,32 @@ public class YarnConfiguration extends Configuration {
   /** The class to use as the resource scheduler.*/
   public static final String RM_SCHEDULER = 
     RM_PREFIX + "scheduler.class";
+
+  /** Placement Algorithm. */
+  public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_CLASS =
+      RM_PREFIX + "placement-constraints.algorithm.class";
+
+  public static final String RM_PLACEMENT_CONSTRAINTS_ENABLED =
+      RM_PREFIX + "placement-constraints.enabled";
+
+  public static final boolean DEFAULT_RM_PLACEMENT_CONSTRAINTS_ENABLED = true;
+
+  public static final String RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS =
+      RM_PREFIX + "placement-constraints.retry-attempts";
+
+  public static final int DEFAULT_RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS = 3;
+
+  public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_POOL_SIZE =
+      RM_PREFIX + "placement-constraints.algorithm.pool-size";
+
+  public static final int DEFAULT_RM_PLACEMENT_CONSTRAINTS_ALGORITHM_POOL_SIZE =
+      1;
+
+  public static final String RM_PLACEMENT_CONSTRAINTS_SCHEDULER_POOL_SIZE =
+      RM_PREFIX + "placement-constraints.scheduler.pool-size";
+
+  public static final int DEFAULT_RM_PLACEMENT_CONSTRAINTS_SCHEDULER_POOL_SIZE =
+      1;
  
   public static final String DEFAULT_RM_SCHEDULER = 
       "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f3f64e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index d450eca..0285069 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -131,6 +131,36 @@
   </property>
 
   <property>
+    <description>Enable Constraint Placement.</description>
+    <name>yarn.resourcemanager.placement-constraints.enabled</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>Number of times to retry placing of rejected SchedulingRequests</description>
+    <name>yarn.resourcemanager.placement-constraints.retry-attempts</name>
+    <value>3</value>
+  </property>
+
+  <property>
+    <description>Constraint Placement Algorithm to be used.</description>
+    <name>yarn.resourcemanager.placement-constraints.algorithm.class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor.SamplePlacementAlgorithm</value>
+  </property>
+
+  <property>
+    <description>Threadpool size for the Algorithm used for placement constraint processing.</description>
+    <name>yarn.resourcemanager.placement-constraints.algorithm.pool-size</name>
+    <value>1</value>
+  </property>
+
+  <property>
+    <description>Threadpool size for the Scheduler invocation phase of placement constraint processing.</description>
+    <name>yarn.resourcemanager.placement-constraints.scheduler.pool-size</name>
+    <value>1</value>
+  </property>
+
+  <property>
     <description>
       Comma separated class names of ApplicationMasterServiceProcessor
       implementations. The processors will be applied in the order

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f3f64e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index 90c42be..aa1177d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor.PlacementProcessor;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
@@ -114,11 +115,25 @@ public class ApplicationMasterService extends AbstractService implements
         YarnConfiguration.RM_SCHEDULER_ADDRESS,
         YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS,
         YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT);
+    initializeProcessingChain(conf);
+  }
+
+  private void initializeProcessingChain(Configuration conf) {
     amsProcessingChain.init(rmContext, null);
+    boolean enablePlacementConstraints = conf.getBoolean(
+        YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_ENABLED,
+        YarnConfiguration.DEFAULT_RM_PLACEMENT_CONSTRAINTS_ENABLED);
+    if (enablePlacementConstraints) {
+      amsProcessingChain.addProcessor(new PlacementProcessor());
+    }
     List<ApplicationMasterServiceProcessor> processors = getProcessorList(conf);
     if (processors != null) {
       Collections.reverse(processors);
       for (ApplicationMasterServiceProcessor p : processors) {
+        // Ensure only single instance of PlacementProcessor is included
+        if (enablePlacementConstraints && p instanceof PlacementProcessor) {
+          continue;
+        }
         this.amsProcessingChain.addProcessor(p);
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f3f64e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index 184cdfc..c873509 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -190,8 +190,7 @@ public class RMContainerImpl implements RMContainer {
   private boolean isExternallyAllocated;
   private SchedulerRequestKey allocatedSchedulerKey;
 
-  // TODO, set it when container allocated by scheduler (From SchedulingRequest)
-  private Set<String> allocationTags = null;
+  private volatile Set<String> allocationTags = null;
 
   public RMContainerImpl(Container container, SchedulerRequestKey schedulerKey,
       ApplicationAttemptId appAttemptId, NodeId nodeId, String user,
@@ -510,6 +509,10 @@ public class RMContainerImpl implements RMContainer {
     return allocationTags;
   }
 
+  public void setAllocationTags(Set<String> tags) {
+    this.allocationTags = tags;
+  }
+
   private static class BaseTransition implements
       SingleArcTransition<RMContainerImpl, RMContainerEvent> {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f3f64e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index d92ce58..f03d7d1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -2539,6 +2539,8 @@ public class CapacityScheduler extends
           SchedulerRequestKey.extractFrom(container),
           appAttempt.getApplicationAttemptId(), container.getNodeId(),
           appAttempt.getUser(), rmContext, false);
+      ((RMContainerImpl)rmContainer).setAllocationTags(
+          new HashSet<>(schedulingRequest.getAllocationTags()));
 
       allocated = new ContainerAllocationProposal<>(
           getSchedulerContainer(rmContainer, true),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f3f64e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
new file mode 100644
index 0000000..fe92d2f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmInput;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * A grouping of Scheduling Requests which are sent to the PlacementAlgorithm
+ * to place as a batch. The placement algorithm tends to give more optimal
+ * placements if more requests are batched together.
+ */
+class BatchedRequests implements ConstraintPlacementAlgorithmInput {
+
+  // PlacementAlgorithmOutput attempt - the number of times the requests in this
+  // batch has been placed but was rejected by the scheduler.
+  private final int placementAttempt;
+
+  private final ApplicationId applicationId;
+  private final Collection<SchedulingRequest> requests;
+  private final Map<String, Set<NodeId>> blacklist = new HashMap<>();
+
+  BatchedRequests(ApplicationId applicationId,
+      Collection<SchedulingRequest> requests, int attempt) {
+    this.applicationId = applicationId;
+    this.requests = requests;
+    this.placementAttempt = attempt;
+  }
+
+  /**
+   * Get Application Id.
+   * @return Application Id.
+   */
+  ApplicationId getApplicationId() {
+    return applicationId;
+  }
+
+  /**
+   * Get Collection of SchedulingRequests in this batch.
+   * @return Collection of Scheduling Requests.
+   */
+  @Override
+  public Collection<SchedulingRequest> getSchedulingRequests() {
+    return requests;
+  }
+
+  /**
+   * Add a Scheduling request to the batch.
+   * @param req Scheduling Request.
+   */
+  void addToBatch(SchedulingRequest req) {
+    requests.add(req);
+  }
+
+  void addToBlacklist(Set<String> tags, SchedulerNode node) {
+    if (tags != null && !tags.isEmpty()) {
+      // We are currently assuming a single allocation tag
+      // per scheduler request currently.
+      blacklist.computeIfAbsent(tags.iterator().next(),
+          k -> new HashSet<>()).add(node.getNodeID());
+    }
+  }
+
+  /**
+   * Get placement attempt.
+   * @return PlacementAlgorithmOutput placement Attempt.
+   */
+  int getPlacementAttempt() {
+    return placementAttempt;
+  }
+
+  /**
+   * Get any blacklisted nodes associated with tag.
+   * @param tag Tag.
+   * @return Set of blacklisted Nodes.
+   */
+  Set<NodeId> getBlacklist(String tag) {
+    return blacklist.getOrDefault(tag, Collections.EMPTY_SET);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f3f64e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/NodeCandidateSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/NodeCandidateSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/NodeCandidateSelector.java
new file mode 100644
index 0000000..4299050
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/NodeCandidateSelector.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor;
+
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeFilter;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+
+import java.util.List;
+
+/**
+ * A read only implementation of the ClusterNodeTracker which exposes a method
+ * to simply return a filtered list of nodes.
+ */
+public interface NodeCandidateSelector {
+
+  /**
+   * Select a list of nodes given a filter.
+   * @param filter a NodeFilter.
+   * @return List of SchedulerNodes.
+   */
+  List<SchedulerNode> selectNodes(NodeFilter filter);
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f3f64e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementDispatcher.java
new file mode 100644
index 0000000..6a00ba8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementDispatcher.java
@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithm;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmOutput;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmOutputCollector;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.PlacedSchedulingRequest;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+/**
+ * This class initializes the Constraint Placement Algorithm. It dispatches
+ * input to the algorithm and collects output from it.
+ */
+class PlacementDispatcher implements
+    ConstraintPlacementAlgorithmOutputCollector {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(PlacementDispatcher.class);
+  private ConstraintPlacementAlgorithm algorithm;
+  private ExecutorService algorithmThreadPool;
+
+  private Map<ApplicationId, List<PlacedSchedulingRequest>>
+      placedRequests = new ConcurrentHashMap<>();
+  private Map<ApplicationId, List<SchedulingRequest>>
+      rejectedRequests = new ConcurrentHashMap<>();
+
+  public void init(RMContext rmContext,
+      ConstraintPlacementAlgorithm placementAlgorithm, int poolSize) {
+    LOG.info("Initializing Constraint Placement Planner:");
+    this.algorithm = placementAlgorithm;
+    this.algorithm.init(rmContext);
+    this.algorithmThreadPool = Executors.newFixedThreadPool(poolSize);
+  }
+
+  void dispatch(final BatchedRequests batchedRequests) {
+    final ConstraintPlacementAlgorithmOutputCollector collector = this;
+    Runnable placingTask = () -> {
+      LOG.debug("Got [{}] requests to place from application [{}].. " +
+              "Attempt count [{}]",
+          batchedRequests.getSchedulingRequests().size(),
+          batchedRequests.getApplicationId(),
+          batchedRequests.getPlacementAttempt());
+      algorithm.place(batchedRequests, collector);
+    };
+    this.algorithmThreadPool.submit(placingTask);
+  }
+
+  public List<PlacedSchedulingRequest> pullPlacedRequests(
+      ApplicationId applicationId) {
+    List<PlacedSchedulingRequest> placedReqs =
+        this.placedRequests.get(applicationId);
+    if (placedReqs != null && !placedReqs.isEmpty()) {
+      List<PlacedSchedulingRequest> retList = new ArrayList<>();
+      synchronized (placedReqs) {
+        if (placedReqs.size() > 0) {
+          retList.addAll(placedReqs);
+          placedReqs.clear();
+        }
+      }
+      return retList;
+    }
+    return Collections.EMPTY_LIST;
+  }
+
+  public List<SchedulingRequest> pullRejectedRequests(
+      ApplicationId applicationId) {
+    List<SchedulingRequest> rejectedReqs =
+        this.rejectedRequests.get(applicationId);
+    if (rejectedReqs != null && !rejectedReqs.isEmpty()) {
+      List<SchedulingRequest> retList = new ArrayList<>();
+      synchronized (rejectedReqs) {
+        if (rejectedReqs.size() > 0) {
+          retList.addAll(rejectedReqs);
+          rejectedReqs.clear();
+        }
+      }
+      return retList;
+    }
+    return Collections.EMPTY_LIST;
+  }
+
+  void clearApplicationState(ApplicationId applicationId) {
+    placedRequests.remove(applicationId);
+    rejectedRequests.remove(applicationId);
+  }
+
+  @Override
+  public void collect(ConstraintPlacementAlgorithmOutput placement) {
+    if (!placement.getPlacedRequests().isEmpty()) {
+      List<PlacedSchedulingRequest> processed =
+          placedRequests.computeIfAbsent(
+              placement.getApplicationId(), k -> new ArrayList<>());
+      synchronized (processed) {
+        LOG.debug(
+            "Planning Algorithm has placed for application [{}]" +
+                " the following [{}]", placement.getApplicationId(),
+            placement.getPlacedRequests());
+        for (PlacedSchedulingRequest esr :
+            placement.getPlacedRequests()) {
+          processed.add(esr);
+        }
+      }
+    }
+    if (!placement.getRejectedRequests().isEmpty()) {
+      List<SchedulingRequest> rejected =
+          rejectedRequests.computeIfAbsent(
+              placement.getApplicationId(), k -> new ArrayList());
+      LOG.warn(
+          "Planning Algorithm has rejected for application [{}]" +
+              " the following [{}]", placement.getApplicationId(),
+          placement.getRejectedRequests());
+      synchronized (rejected) {
+        rejected.addAll(placement.getRejectedRequests());
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f3f64e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
new file mode 100644
index 0000000..d613d4e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
@@ -0,0 +1,343 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor;
+
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceContext;
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceProcessor;
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceUtils;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
+import org.apache.hadoop.yarn.api.records.RejectionReason;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithm;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.PlacedSchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.SchedulingResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.stream.Collectors;
+
+/**
+ * An ApplicationMasterService Processor that performs Constrained placement of
+ * Scheduling Requests. It does the following:
+ * 1. All initialization.
+ * 2. Intercepts placement constraints from the register call and adds it to
+ *    the placement constraint manager.
+ * 3. Dispatches Scheduling Requests to the Planner.
+ */
+public class PlacementProcessor implements ApplicationMasterServiceProcessor {
+
+  /**
+   * Wrapper over the SchedulingResponse that wires in the placement attempt
+   * and last attempted Node.
+   */
+  static final class Response extends SchedulingResponse {
+
+    private final int placementAttempt;
+    private final SchedulerNode attemptedNode;
+
+    private Response(boolean isSuccess, ApplicationId applicationId,
+        SchedulingRequest schedulingRequest, int placementAttempt,
+        SchedulerNode attemptedNode) {
+      super(isSuccess, applicationId, schedulingRequest);
+      this.placementAttempt = placementAttempt;
+      this.attemptedNode = attemptedNode;
+    }
+  }
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(PlacementProcessor.class);
+  private PlacementConstraintManager constraintManager;
+  private ApplicationMasterServiceProcessor nextAMSProcessor;
+
+  private AbstractYarnScheduler scheduler;
+  private ExecutorService schedulingThreadPool;
+  private int retryAttempts;
+  private Map<ApplicationId, List<BatchedRequests>> requestsToRetry =
+      new ConcurrentHashMap<>();
+  private Map<ApplicationId, List<SchedulingRequest>> requestsToReject =
+      new ConcurrentHashMap<>();
+
+  private PlacementDispatcher placementDispatcher;
+
+
+  @Override
+  public void init(ApplicationMasterServiceContext amsContext,
+      ApplicationMasterServiceProcessor nextProcessor) {
+    LOG.info("Initializing Constraint Placement Processor:");
+    this.nextAMSProcessor = nextProcessor;
+    this.constraintManager =
+        ((RMContextImpl)amsContext).getPlacementConstraintManager();
+
+    this.scheduler =
+        (AbstractYarnScheduler)((RMContextImpl)amsContext).getScheduler();
+    // Only the first class is considered - even if a comma separated
+    // list is provided. (This is for simplicity, since getInstances does a
+    // lot of good things by handling things correctly)
+    List<ConstraintPlacementAlgorithm> instances =
+        ((RMContextImpl) amsContext).getYarnConfiguration().getInstances(
+            YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_ALGORITHM_CLASS,
+            ConstraintPlacementAlgorithm.class);
+    ConstraintPlacementAlgorithm algorithm = null;
+    if (instances != null && !instances.isEmpty()) {
+      algorithm = instances.get(0);
+    } else {
+      algorithm = new SamplePlacementAlgorithm();
+    }
+    LOG.info("Planning Algorithm [{}]", algorithm.getClass().getName());
+
+    int algoPSize = ((RMContextImpl) amsContext).getYarnConfiguration().getInt(
+        YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_ALGORITHM_POOL_SIZE,
+        YarnConfiguration.DEFAULT_RM_PLACEMENT_CONSTRAINTS_ALGORITHM_POOL_SIZE);
+    this.placementDispatcher = new PlacementDispatcher();
+    this.placementDispatcher.init(
+        ((RMContextImpl)amsContext), algorithm, algoPSize);
+    LOG.info("Planning Algorithm pool size [{}]", algoPSize);
+
+    int schedPSize = ((RMContextImpl) amsContext).getYarnConfiguration().getInt(
+        YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_SCHEDULER_POOL_SIZE,
+        YarnConfiguration.DEFAULT_RM_PLACEMENT_CONSTRAINTS_SCHEDULER_POOL_SIZE);
+    this.schedulingThreadPool = Executors.newFixedThreadPool(schedPSize);
+    LOG.info("Scheduler pool size [{}]", schedPSize);
+
+    // Number of times a request that is not satisfied by the scheduler
+    // can be retried.
+    this.retryAttempts =
+        ((RMContextImpl) amsContext).getYarnConfiguration().getInt(
+            YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS,
+            YarnConfiguration.DEFAULT_RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS);
+    LOG.info("Num retry attempts [{}]", this.retryAttempts);
+  }
+
+  @Override
+  public void registerApplicationMaster(ApplicationAttemptId appAttemptId,
+      RegisterApplicationMasterRequest request,
+      RegisterApplicationMasterResponse response)
+      throws IOException, YarnException {
+    Map<Set<String>, PlacementConstraint> appPlacementConstraints =
+        request.getPlacementConstraints();
+    processPlacementConstraints(
+        appAttemptId.getApplicationId(), appPlacementConstraints);
+    nextAMSProcessor.registerApplicationMaster(appAttemptId, request, response);
+  }
+
+  private void processPlacementConstraints(ApplicationId applicationId,
+      Map<Set<String>, PlacementConstraint> appPlacementConstraints) {
+    if (appPlacementConstraints != null && !appPlacementConstraints.isEmpty()) {
+      LOG.info("Constraints added for application [{}] against tags [{}]",
+          applicationId, appPlacementConstraints);
+      constraintManager.registerApplication(
+          applicationId, appPlacementConstraints);
+    }
+  }
+
+  @Override
+  public void allocate(ApplicationAttemptId appAttemptId,
+      AllocateRequest request, AllocateResponse response) throws YarnException {
+    List<SchedulingRequest> schedulingRequests =
+        request.getSchedulingRequests();
+    dispatchRequestsForPlacement(appAttemptId, schedulingRequests);
+    reDispatchRetryableRequests(appAttemptId);
+    schedulePlacedRequests(appAttemptId);
+
+    nextAMSProcessor.allocate(appAttemptId, request, response);
+
+    handleRejectedRequests(appAttemptId, response);
+  }
+
+  private void dispatchRequestsForPlacement(ApplicationAttemptId appAttemptId,
+      List<SchedulingRequest> schedulingRequests) {
+    if (schedulingRequests != null && !schedulingRequests.isEmpty()) {
+      this.placementDispatcher.dispatch(
+          new BatchedRequests(appAttemptId.getApplicationId(),
+              schedulingRequests, 1));
+    }
+  }
+
+  private void reDispatchRetryableRequests(ApplicationAttemptId appAttId) {
+    List<BatchedRequests> reqsToRetry =
+        this.requestsToRetry.get(appAttId.getApplicationId());
+    if (reqsToRetry != null && !reqsToRetry.isEmpty()) {
+      synchronized (reqsToRetry) {
+        for (BatchedRequests bReq: reqsToRetry) {
+          this.placementDispatcher.dispatch(bReq);
+        }
+        reqsToRetry.clear();
+      }
+    }
+  }
+
+  private void schedulePlacedRequests(ApplicationAttemptId appAttemptId) {
+    ApplicationId applicationId = appAttemptId.getApplicationId();
+    List<PlacedSchedulingRequest> placedSchedulingRequests =
+        this.placementDispatcher.pullPlacedRequests(applicationId);
+    for (PlacedSchedulingRequest placedReq : placedSchedulingRequests) {
+      SchedulingRequest sReq = placedReq.getSchedulingRequest();
+      for (SchedulerNode node : placedReq.getNodes()) {
+        final SchedulingRequest sReqClone =
+            SchedulingRequest.newInstance(sReq.getAllocationRequestId(),
+                sReq.getPriority(), sReq.getExecutionType(),
+                sReq.getAllocationTags(),
+                ResourceSizing.newInstance(
+                    sReq.getResourceSizing().getResources()),
+                sReq.getPlacementConstraint());
+        SchedulerApplicationAttempt applicationAttempt =
+            this.scheduler.getApplicationAttempt(appAttemptId);
+        Runnable task = () -> {
+          boolean success =
+              scheduler.attemptAllocationOnNode(
+                  applicationAttempt, sReqClone, node);
+          if (!success) {
+            LOG.warn("Unsuccessful allocation attempt [{}] for [{}]",
+                placedReq.getPlacementAttempt(), sReqClone);
+          }
+          handleSchedulingResponse(
+              new Response(success, applicationId, sReqClone,
+              placedReq.getPlacementAttempt(), node));
+        };
+        this.schedulingThreadPool.submit(task);
+      }
+    }
+  }
+
+  private void handleRejectedRequests(ApplicationAttemptId appAttemptId,
+      AllocateResponse response) {
+    List<SchedulingRequest> rejectedRequests =
+        this.placementDispatcher.pullRejectedRequests(
+            appAttemptId.getApplicationId());
+    if (rejectedRequests != null && !rejectedRequests.isEmpty()) {
+      LOG.warn("Following requests of [{}] were rejected by" +
+              " the PlacementAlgorithmOutput Algorithm: {}",
+          appAttemptId.getApplicationId(), rejectedRequests);
+      ApplicationMasterServiceUtils.addToRejectedSchedulingRequests(response,
+          rejectedRequests.stream()
+              .map(sr -> RejectedSchedulingRequest.newInstance(
+                  RejectionReason.COULD_NOT_PLACE_ON_NODE, sr))
+              .collect(Collectors.toList()));
+    }
+    rejectedRequests =
+        this.requestsToReject.get(appAttemptId.getApplicationId());
+    if (rejectedRequests != null && !rejectedRequests.isEmpty()) {
+      synchronized (rejectedRequests) {
+        LOG.warn("Following requests of [{}] exhausted all retry attempts " +
+                "trying to schedule on placed node: {}",
+            appAttemptId.getApplicationId(), rejectedRequests);
+        ApplicationMasterServiceUtils.addToRejectedSchedulingRequests(response,
+            rejectedRequests.stream()
+                .map(sr -> RejectedSchedulingRequest.newInstance(
+                    RejectionReason.COULD_NOT_SCHEDULE_ON_NODE, sr))
+                .collect(Collectors.toList()));
+        rejectedRequests.clear();
+      }
+    }
+  }
+
+  @Override
+  public void finishApplicationMaster(ApplicationAttemptId appAttemptId,
+      FinishApplicationMasterRequest request,
+      FinishApplicationMasterResponse response) {
+    constraintManager.unregisterApplication(appAttemptId.getApplicationId());
+    placementDispatcher.clearApplicationState(appAttemptId.getApplicationId());
+    requestsToReject.remove(appAttemptId.getApplicationId());
+    requestsToRetry.remove(appAttemptId.getApplicationId());
+    nextAMSProcessor.finishApplicationMaster(appAttemptId, request, response);
+  }
+
+  private void handleSchedulingResponse(SchedulingResponse schedulerResponse) {
+    int placementAttempt = ((Response)schedulerResponse).placementAttempt;
+    // Retry this placement as it is not successful and we are still
+    // under max retry. The req is batched with other unsuccessful
+    // requests from the same app
+    if (!schedulerResponse.isSuccess() && placementAttempt < retryAttempts) {
+      List<BatchedRequests> reqsToRetry =
+          requestsToRetry.computeIfAbsent(
+              schedulerResponse.getApplicationId(),
+              k -> new ArrayList<>());
+      synchronized (reqsToRetry) {
+        addToRetryList(schedulerResponse, placementAttempt, reqsToRetry);
+      }
+      LOG.warn("Going to retry request for application [{}] after [{}]" +
+              " attempts: [{}]", schedulerResponse.getApplicationId(),
+          placementAttempt, schedulerResponse.getSchedulingRequest());
+    } else {
+      if (!schedulerResponse.isSuccess()) {
+        LOG.warn("Not retrying request for application [{}] after [{}]" +
+                " attempts: [{}]", schedulerResponse.getApplicationId(),
+            placementAttempt, schedulerResponse.getSchedulingRequest());
+        List<SchedulingRequest> reqsToReject =
+            requestsToReject.computeIfAbsent(
+                schedulerResponse.getApplicationId(),
+                k -> new ArrayList<>());
+        synchronized (reqsToReject) {
+          reqsToReject.add(schedulerResponse.getSchedulingRequest());
+        }
+      }
+    }
+  }
+
+  private void addToRetryList(SchedulingResponse schedulerResponse,
+      int placementAttempt, List<BatchedRequests> reqsToRetry) {
+    boolean isAdded = false;
+    for (BatchedRequests br : reqsToRetry) {
+      if (br.getPlacementAttempt() == placementAttempt + 1) {
+        br.addToBatch(schedulerResponse.getSchedulingRequest());
+        br.addToBlacklist(
+            schedulerResponse.getSchedulingRequest().getAllocationTags(),
+            ((Response) schedulerResponse).attemptedNode);
+        isAdded = true;
+        break;
+      }
+    }
+    if (!isAdded) {
+      BatchedRequests br =
+          new BatchedRequests(schedulerResponse.getApplicationId(),
+              Collections.singleton(
+                  schedulerResponse.getSchedulingRequest()),
+              placementAttempt + 1);
+      reqsToRetry.add(br);
+      br.addToBlacklist(
+          schedulerResponse.getSchedulingRequest().getAllocationTags(),
+          ((Response) schedulerResponse).attemptedNode);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f3f64e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/SamplePlacementAlgorithm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/SamplePlacementAlgorithm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/SamplePlacementAlgorithm.java
new file mode 100644
index 0000000..8d49801
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/SamplePlacementAlgorithm.java
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor;
+
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraintTransformations.SpecializedConstraintTransformer;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.InvalidAllocationTagsQueryException;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithm;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmInput;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmOutput;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.ConstraintPlacementAlgorithmOutputCollector;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.PlacedSchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Sample Test algorithm. Assumes anti-affinity always
+ * It also assumes the numAllocations in resource sizing is always = 1
+ *
+ * NOTE: This is just a sample implementation. Not be actually used
+ */
+public class SamplePlacementAlgorithm implements ConstraintPlacementAlgorithm {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SamplePlacementAlgorithm.class);
+
+  private AllocationTagsManager tagsManager;
+  private PlacementConstraintManager constraintManager;
+  private NodeCandidateSelector nodeSelector;
+
+  @Override
+  public void init(RMContext rmContext) {
+    this.tagsManager = rmContext.getAllocationTagsManager();
+    this.constraintManager = rmContext.getPlacementConstraintManager();
+    this.nodeSelector =
+        filter -> ((AbstractYarnScheduler)(rmContext)
+            .getScheduler()).getNodes(filter);
+  }
+
+  @Override
+  public void place(ConstraintPlacementAlgorithmInput input,
+      ConstraintPlacementAlgorithmOutputCollector collector) {
+    BatchedRequests requests = (BatchedRequests)input;
+    ConstraintPlacementAlgorithmOutput resp =
+        new ConstraintPlacementAlgorithmOutput(requests.getApplicationId());
+    List<SchedulerNode> allNodes = nodeSelector.selectNodes(null);
+    Map<String, List<SchedulingRequest>> tagIndexedRequests = new HashMap<>();
+    requests.getSchedulingRequests()
+        .stream()
+        .filter(r -> r.getAllocationTags() != null)
+        .forEach(
+            req -> req.getAllocationTags().forEach(
+                tag -> tagIndexedRequests.computeIfAbsent(tag,
+                    k -> new ArrayList<>()).add(req))
+        );
+    for (Map.Entry<String, List<SchedulingRequest>> entry :
+        tagIndexedRequests.entrySet()) {
+      String tag = entry.getKey();
+      PlacementConstraint constraint =
+          constraintManager.getConstraint(requests.getApplicationId(),
+              Collections.singleton(tag));
+      if (constraint != null) {
+        // Currently works only for simple anti-affinity
+        // NODE scope target expressions
+        SpecializedConstraintTransformer transformer =
+            new SpecializedConstraintTransformer(constraint);
+        PlacementConstraint transform = transformer.transform();
+        TargetConstraint targetConstraint =
+            (TargetConstraint) transform.getConstraintExpr();
+        // Assume a single target expression tag;
+        // The Sample Algorithm assumes a constraint will always be a simple
+        // Target Constraint with a single entry in the target set.
+        // As mentioned in the class javadoc - This algorithm should be
+        // used mostly for testing and validating end-2-end workflow.
+        String targetTag =
+            targetConstraint.getTargetExpressions().iterator().next()
+            .getTargetValues().iterator().next();
+        // iterate over all nodes
+        Iterator<SchedulerNode> nodeIter = allNodes.iterator();
+        List<SchedulingRequest> schedulingRequests = entry.getValue();
+        Iterator<SchedulingRequest> reqIter = schedulingRequests.iterator();
+        while (reqIter.hasNext()) {
+          SchedulingRequest sReq = reqIter.next();
+          int numAllocs = sReq.getResourceSizing().getNumAllocations();
+          while (numAllocs > 0 && nodeIter.hasNext()) {
+            SchedulerNode node = nodeIter.next();
+            long nodeCardinality = 0;
+            try {
+              nodeCardinality = tagsManager.getNodeCardinality(
+                  node.getNodeID(), requests.getApplicationId(),
+                  targetTag);
+              if (nodeCardinality == 0 &&
+                  !requests.getBlacklist(tag).contains(node.getNodeID())) {
+                numAllocs--;
+                sReq.getResourceSizing().setNumAllocations(numAllocs);
+                PlacedSchedulingRequest placedReq =
+                    new PlacedSchedulingRequest(sReq);
+                placedReq.setPlacementAttempt(requests.getPlacementAttempt());
+                placedReq.getNodes().add(node);
+                resp.getPlacedRequests().add(placedReq);
+              }
+            } catch (InvalidAllocationTagsQueryException e) {
+              LOG.warn("Got exception from TagManager !", e);
+            }
+          }
+        }
+      }
+    }
+    // Add all requests whose numAllocations still > 0 to rejected list.
+    requests.getSchedulingRequests().stream()
+        .filter(sReq -> sReq.getResourceSizing().getNumAllocations() > 0)
+        .forEach(rejReq -> resp.getRejectedRequests().add(rejReq));
+    collector.collect(resp);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f3f64e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/package-info.java
new file mode 100644
index 0000000..7090154
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/package-info.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package o.a.h.yarn.server.resourcemanager.scheduler.constraint.processor
+ * contains classes related to scheduling containers using placement
+ * processor.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f3f64e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
index 12dfe18..975abe6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
@@ -21,7 +21,10 @@ package org.apache.hadoop.yarn.server.resourcemanager;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -39,7 +42,9 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
@@ -57,6 +62,9 @@ public class MockAM {
   private ApplicationMasterProtocol amRMProtocol;
   private UserGroupInformation ugi;
   private volatile AllocateResponse lastResponse;
+  private Map<Set<String>, PlacementConstraint> placementConstraints =
+      new HashMap<>();
+  private List<SchedulingRequest> schedulingRequests = new ArrayList<>();
 
   private final List<ResourceRequest> requests = new ArrayList<ResourceRequest>();
   private final List<ContainerId> releases = new ArrayList<ContainerId>();
@@ -93,6 +101,16 @@ public class MockAM {
     return registerAppAttempt(true);
   }
 
+  public void addPlacementConstraint(Set<String> tags,
+      PlacementConstraint constraint) {
+    placementConstraints.put(tags, constraint);
+  }
+
+  public MockAM addSchedulingRequest(List<SchedulingRequest> reqs) {
+    schedulingRequests.addAll(reqs);
+    return this;
+  }
+
   public RegisterApplicationMasterResponse registerAppAttempt(boolean wait)
       throws Exception {
     if (wait) {
@@ -104,6 +122,9 @@ public class MockAM {
     req.setHost("");
     req.setRpcPort(1);
     req.setTrackingUrl("");
+    if (!placementConstraints.isEmpty()) {
+      req.setPlacementConstraints(this.placementConstraints);
+    }
     if (ugi == null) {
       ugi = UserGroupInformation.createRemoteUser(
           attemptId.toString());
@@ -247,12 +268,17 @@ public class MockAM {
 
   }
 
+
   public AllocateResponse allocate(
       List<ResourceRequest> resourceRequest, List<ContainerId> releases)
       throws Exception {
     final AllocateRequest req =
         AllocateRequest.newInstance(0, 0F, resourceRequest,
           releases, null);
+    if (!schedulingRequests.isEmpty()) {
+      req.setSchedulingRequests(schedulingRequests);
+      schedulingRequests.clear();
+    }
     return allocate(req);
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f3f64e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index 19ca6d7..b32aeb5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -27,6 +27,7 @@ import java.util.Collections;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.DataOutputBuffer;
@@ -65,6 +66,7 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
@@ -1238,6 +1240,18 @@ public class MockRM extends ResourceManager {
     return am;
   }
 
+  public static MockAM launchAndRegisterAM(RMApp app, MockRM rm, MockNM nm,
+      Map<Set<String>, PlacementConstraint> constraints) throws Exception {
+    MockAM am = launchAM(app, rm, nm);
+    for (Map.Entry<Set<String>, PlacementConstraint> e :
+        constraints.entrySet()) {
+      am.addPlacementConstraint(e.getKey(), e.getValue());
+    }
+    am.registerAppAttempt();
+    rm.waitForState(app.getApplicationId(), RMAppState.RUNNING);
+    return am;
+  }
+
   public ApplicationReport getApplicationReport(ApplicationId appId)
       throws YarnException, IOException {
     ApplicationClientProtocol client = getClientRMService();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f3f64e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
new file mode 100644
index 0000000..db8ae15
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
@@ -0,0 +1,394 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
+import org.apache.hadoop.yarn.api.records.RejectionReason;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
+import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static java.lang.Thread.sleep;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.allocationTag;
+
+/**
+ * This tests end2end workflow of the constraint placement framework.
+ */
+public class TestPlacementProcessor {
+
+  private static final int GB = 1024;
+
+  private static final Log LOG =
+      LogFactory.getLog(TestPlacementProcessor.class);
+  private MockRM rm;
+  private DrainDispatcher dispatcher;
+
+  @Before
+  public void createAndStartRM() {
+    CapacitySchedulerConfiguration csConf =
+        new CapacitySchedulerConfiguration();
+    YarnConfiguration conf = new YarnConfiguration(csConf);
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+    conf.setBoolean(
+        YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_ENABLED, true);
+    conf.setInt(
+        YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS, 1);
+    startRM(conf);
+  }
+
+  private void startRM(final YarnConfiguration conf) {
+    dispatcher = new DrainDispatcher();
+    rm = new MockRM(conf) {
+      @Override
+      protected Dispatcher createDispatcher() {
+        return dispatcher;
+      }
+    };
+    rm.start();
+  }
+
+  @After
+  public void stopRM() {
+    if (rm != null) {
+      rm.stop();
+    }
+  }
+
+  @Test(timeout = 300000)
+  public void testPlacement() throws Exception {
+    HashMap<NodeId, MockNM> nodes = new HashMap<>();
+    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm1.getNodeId(), nm1);
+    MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm2.getNodeId(), nm2);
+    MockNM nm3 = new MockNM("h3:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm3.getNodeId(), nm3);
+    MockNM nm4 = new MockNM("h4:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm4.getNodeId(), nm4);
+    nm1.registerNode();
+    nm2.registerNode();
+    nm3.registerNode();
+    nm4.registerNode();
+
+    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2,
+        Collections.singletonMap(
+            Collections.singleton("foo"),
+            PlacementConstraints.build(
+                PlacementConstraints.targetNotIn(NODE, allocationTag("foo")))
+        ));
+    am1.addSchedulingRequest(
+        Arrays.asList(
+            schedulingRequest(1, 1, 1, 512, "foo"),
+            schedulingRequest(1, 2, 1, 512, "foo"),
+            schedulingRequest(1, 3, 1, 512, "foo"),
+            schedulingRequest(1, 5, 1, 512, "foo"))
+    );
+    AllocateResponse allocResponse = am1.schedule(); // send the request
+    List<Container> allocatedContainers = new ArrayList<>();
+    allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+
+    // kick the scheduler
+
+    while (allocatedContainers.size() < 4) {
+      nm1.nodeHeartbeat(true);
+      nm2.nodeHeartbeat(true);
+      nm3.nodeHeartbeat(true);
+      nm4.nodeHeartbeat(true);
+      LOG.info("Waiting for containers to be created for app 1...");
+      sleep(1000);
+      allocResponse = am1.schedule();
+      allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+    }
+
+    Assert.assertEquals(4, allocatedContainers.size());
+    Set<NodeId> nodeIds = allocatedContainers.stream()
+        .map(x -> x.getNodeId()).collect(Collectors.toSet());
+    // Ensure unique nodes
+    Assert.assertEquals(4, nodeIds.size());
+  }
+
+  @Test(timeout = 300000)
+  public void testSchedulerRejection() throws Exception {
+    HashMap<NodeId, MockNM> nodes = new HashMap<>();
+    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm1.getNodeId(), nm1);
+    MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm2.getNodeId(), nm2);
+    MockNM nm3 = new MockNM("h3:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm3.getNodeId(), nm3);
+    MockNM nm4 = new MockNM("h4:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm4.getNodeId(), nm4);
+    nm1.registerNode();
+    nm2.registerNode();
+    nm3.registerNode();
+    nm4.registerNode();
+
+    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2,
+        Collections.singletonMap(
+            Collections.singleton("foo"),
+            PlacementConstraints.build(
+                PlacementConstraints.targetNotIn(NODE, allocationTag("foo")))
+        ));
+    am1.addSchedulingRequest(
+        Arrays.asList(
+            schedulingRequest(1, 1, 1, 512, "foo"),
+            schedulingRequest(1, 2, 1, 512, "foo"),
+            schedulingRequest(1, 3, 1, 512, "foo"),
+            // Ask for a container larger than the node
+            schedulingRequest(1, 4, 1, 5120, "foo"))
+    );
+    AllocateResponse allocResponse = am1.schedule(); // send the request
+    List<Container> allocatedContainers = new ArrayList<>();
+    List<RejectedSchedulingRequest> rejectedReqs = new ArrayList<>();
+    int allocCount = 1;
+    allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+    rejectedReqs.addAll(allocResponse.getRejectedSchedulingRequests());
+
+    // kick the scheduler
+
+    while (allocCount < 11) {
+      nm1.nodeHeartbeat(true);
+      nm2.nodeHeartbeat(true);
+      nm3.nodeHeartbeat(true);
+      nm4.nodeHeartbeat(true);
+      LOG.info("Waiting for containers to be created for app 1...");
+      sleep(1000);
+      allocResponse = am1.schedule();
+      allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+      rejectedReqs.addAll(allocResponse.getRejectedSchedulingRequests());
+      allocCount++;
+      if (rejectedReqs.size() > 0 && allocatedContainers.size() > 2) {
+        break;
+      }
+    }
+
+    Assert.assertEquals(3, allocatedContainers.size());
+    Set<NodeId> nodeIds = allocatedContainers.stream()
+        .map(x -> x.getNodeId()).collect(Collectors.toSet());
+    // Ensure unique nodes
+    Assert.assertEquals(3, nodeIds.size());
+    RejectedSchedulingRequest rej = rejectedReqs.get(0);
+    Assert.assertEquals(4, rej.getRequest().getAllocationRequestId());
+    Assert.assertEquals(RejectionReason.COULD_NOT_SCHEDULE_ON_NODE,
+        rej.getReason());
+  }
+
+  @Test(timeout = 300000)
+  public void testRePlacementAfterSchedulerRejection() throws Exception {
+    stopRM();
+    CapacitySchedulerConfiguration csConf =
+        new CapacitySchedulerConfiguration();
+    YarnConfiguration conf = new YarnConfiguration(csConf);
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+    conf.setBoolean(
+        YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_ENABLED, true);
+    conf.setInt(
+        YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS, 2);
+    startRM(conf);
+
+    HashMap<NodeId, MockNM> nodes = new HashMap<>();
+    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm1.getNodeId(), nm1);
+    MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm2.getNodeId(), nm2);
+    MockNM nm3 = new MockNM("h3:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm3.getNodeId(), nm3);
+    MockNM nm4 = new MockNM("h4:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm4.getNodeId(), nm4);
+    MockNM nm5 = new MockNM("h5:1234", 8192, rm.getResourceTrackerService());
+    nodes.put(nm5.getNodeId(), nm5);
+    nm1.registerNode();
+    nm2.registerNode();
+    nm3.registerNode();
+    nm4.registerNode();
+    // No not register nm5 yet..
+
+    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2,
+        Collections.singletonMap(
+            Collections.singleton("foo"),
+            PlacementConstraints.build(
+                PlacementConstraints.targetNotIn(NODE, allocationTag("foo")))
+        ));
+    am1.addSchedulingRequest(
+        Arrays.asList(
+            schedulingRequest(1, 1, 1, 512, "foo"),
+            schedulingRequest(1, 2, 1, 512, "foo"),
+            schedulingRequest(1, 3, 1, 512, "foo"),
+            // Ask for a container larger than the node
+            schedulingRequest(1, 4, 1, 5120, "foo"))
+    );
+    AllocateResponse allocResponse = am1.schedule(); // send the request
+    List<Container> allocatedContainers = new ArrayList<>();
+    List<RejectedSchedulingRequest> rejectedReqs = new ArrayList<>();
+    int allocCount = 1;
+    allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+    rejectedReqs.addAll(allocResponse.getRejectedSchedulingRequests());
+
+    // Register node5 only after first allocate - so the initial placement
+    // for the large schedReq goes to some other node..
+    nm5.registerNode();
+
+    // kick the scheduler
+    while (allocCount < 11) {
+      nm1.nodeHeartbeat(true);
+      nm2.nodeHeartbeat(true);
+      nm3.nodeHeartbeat(true);
+      nm4.nodeHeartbeat(true);
+      nm5.nodeHeartbeat(true);
+      LOG.info("Waiting for containers to be created for app 1...");
+      sleep(1000);
+      allocResponse = am1.schedule();
+      allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+      rejectedReqs.addAll(allocResponse.getRejectedSchedulingRequests());
+      allocCount++;
+      if (allocatedContainers.size() > 3) {
+        break;
+      }
+    }
+
+    Assert.assertEquals(4, allocatedContainers.size());
+    Set<NodeId> nodeIds = allocatedContainers.stream()
+        .map(x -> x.getNodeId()).collect(Collectors.toSet());
+    // Ensure unique nodes
+    Assert.assertEquals(4, nodeIds.size());
+  }
+
+  @Test(timeout = 300000)
+  public void testPlacementRejection() throws Exception {
+    HashMap<NodeId, MockNM> nodes = new HashMap<>();
+    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm1.getNodeId(), nm1);
+    MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm2.getNodeId(), nm2);
+    MockNM nm3 = new MockNM("h3:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm3.getNodeId(), nm3);
+    MockNM nm4 = new MockNM("h4:1234", 4096, rm.getResourceTrackerService());
+    nodes.put(nm4.getNodeId(), nm4);
+    nm1.registerNode();
+    nm2.registerNode();
+    nm3.registerNode();
+    nm4.registerNode();
+
+    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2,
+        Collections.singletonMap(
+            Collections.singleton("foo"),
+            PlacementConstraints.build(
+                PlacementConstraints.targetNotIn(NODE, allocationTag("foo")))
+        ));
+    am1.addSchedulingRequest(
+        Arrays.asList(
+            schedulingRequest(1, 1, 1, 512, "foo"),
+            schedulingRequest(1, 2, 1, 512, "foo"),
+            schedulingRequest(1, 3, 1, 512, "foo"),
+            schedulingRequest(1, 4, 1, 512, "foo"),
+            // Ask for more containers than nodes
+            schedulingRequest(1, 5, 1, 512, "foo"))
+    );
+    AllocateResponse allocResponse = am1.schedule(); // send the request
+    List<Container> allocatedContainers = new ArrayList<>();
+    List<RejectedSchedulingRequest> rejectedReqs = new ArrayList<>();
+    int allocCount = 1;
+    allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+    rejectedReqs.addAll(allocResponse.getRejectedSchedulingRequests());
+
+    // kick the scheduler
+
+    while (allocCount < 11) {
+      nm1.nodeHeartbeat(true);
+      nm2.nodeHeartbeat(true);
+      nm3.nodeHeartbeat(true);
+      nm4.nodeHeartbeat(true);
+      LOG.info("Waiting for containers to be created for app 1...");
+      sleep(1000);
+      allocResponse = am1.schedule();
+      allocatedContainers.addAll(allocResponse.getAllocatedContainers());
+      rejectedReqs.addAll(allocResponse.getRejectedSchedulingRequests());
+      allocCount++;
+      if (rejectedReqs.size() > 0 && allocatedContainers.size() > 3) {
+        break;
+      }
+    }
+
+    Assert.assertEquals(4, allocatedContainers.size());
+    Set<NodeId> nodeIds = allocatedContainers.stream()
+        .map(x -> x.getNodeId()).collect(Collectors.toSet());
+    // Ensure unique nodes
+    Assert.assertEquals(4, nodeIds.size());
+    RejectedSchedulingRequest rej = rejectedReqs.get(0);
+    Assert.assertEquals(RejectionReason.COULD_NOT_PLACE_ON_NODE,
+        rej.getReason());
+  }
+
+  private static SchedulingRequest schedulingRequest(
+      int priority, long allocReqId, int cores, int mem, String... tags) {
+    return schedulingRequest(priority, allocReqId, cores, mem,
+        ExecutionType.GUARANTEED, tags);
+  }
+
+  private static SchedulingRequest schedulingRequest(
+      int priority, long allocReqId, int cores, int mem,
+      ExecutionType execType, String... tags) {
+    return SchedulingRequest.newBuilder()
+        .priority(Priority.newInstance(priority))
+        .allocationRequestId(allocReqId)
+        .allocationTags(new HashSet<>(Arrays.asList(tags)))
+        .executionType(ExecutionTypeRequest.newInstance(execType, true))
+        .resourceSizing(
+            ResourceSizing.newInstance(1, Resource.newInstance(mem, cores)))
+        .build();
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/49] hadoop git commit: YARN-7674. Update Timeline Reader web app address in UI2. Contributed by Sunil G.

Posted by as...@apache.org.
YARN-7674. Update Timeline Reader web app address in UI2. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13ad7479
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13ad7479
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13ad7479

Branch: refs/heads/YARN-6592
Commit: 13ad7479b0e35a2c2d398e28c676871d9e672dc3
Parents: a78db99
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Wed Dec 20 22:26:49 2017 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Wed Dec 20 22:28:28 2017 +0530

----------------------------------------------------------------------
 .../hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js      | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13ad7479/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
index 55f6e1b..9d63de3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
@@ -24,7 +24,7 @@ function getTimeLineURL(rmhost) {
   var url = window.location.protocol + '//' +
     (ENV.hosts.localBaseAddress? ENV.hosts.localBaseAddress + '/' : '') + rmhost;
 
-  url += '/conf?name=yarn.timeline-service.webapp.address';
+  url += '/conf?name=yarn.timeline-service.reader.webapp.address';
   Ember.Logger.log("Get Timeline Address URL: " + url);
   return url;
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/49] hadoop git commit: YARN-7466. addendum patch for failing unit test. (Contributed by Chandni Singh)

Posted by as...@apache.org.
YARN-7466.  addendum patch for failing unit test.  (Contributed by Chandni Singh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94a2ac6b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94a2ac6b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94a2ac6b

Branch: refs/heads/YARN-6592
Commit: 94a2ac6b719913aa698b66bf40b7ebbe6fa606da
Parents: 989c751
Author: Eric Yang <ey...@apache.org>
Authored: Tue Dec 19 18:42:27 2017 -0500
Committer: Eric Yang <ey...@apache.org>
Committed: Tue Dec 19 18:42:27 2017 -0500

----------------------------------------------------------------------
 .../java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94a2ac6b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
index a698ecf..12dfe18 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
@@ -177,7 +177,7 @@ public class MockAM {
       List<ContainerId> releases, String labelExpression) throws Exception {
     List<ResourceRequest> reqs =
         createReq(new String[] { host }, memory, priority, numContainers,
-            labelExpression, 0L);
+            labelExpression, -1);
     return allocate(reqs, releases);
   }
   


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/49] hadoop git commit: YARN-7032. [ATSv2] NPE while starting hbase co-processor when HBase authorization is enabled. Contributed by Rohith Sharma K S.

Posted by as...@apache.org.
YARN-7032. [ATSv2] NPE while starting hbase co-processor when HBase authorization is enabled. Contributed by Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d62932c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d62932c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d62932c3

Branch: refs/heads/YARN-6592
Commit: d62932c3b2fcacc81dc1f5048cdeb60fb0d38504
Parents: 41b5810
Author: Sunil G <su...@apache.org>
Authored: Wed Dec 20 11:31:15 2017 +0530
Committer: Sunil G <su...@apache.org>
Committed: Wed Dec 20 11:31:15 2017 +0530

----------------------------------------------------------------------
 .../server/timelineservice/storage/flow/FlowRunCoprocessor.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62932c3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
index 359eec9..96a7cf3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
@@ -98,7 +98,9 @@ public class FlowRunCoprocessor extends BaseRegionObserver {
     if ((attributes != null) && (attributes.size() > 0)) {
       for (Map.Entry<String, byte[]> attribute : attributes.entrySet()) {
         Tag t = HBaseTimelineStorageUtils.getTagFromAttribute(attribute);
-        tags.add(t);
+        if (t != null) {
+          tags.add(t);
+        }
       }
       byte[] tagByteArray = Tag.fromList(tags);
       NavigableMap<byte[], List<Cell>> newFamilyMap = new TreeMap<>(


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/49] hadoop git commit: YARN-7669. API and interface modifications for placement constraint processor. (asuresh)

Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutput.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutput.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutput.java
new file mode 100644
index 0000000..9571f0e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutput.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Encapsulates the output of the ConstraintPlacementAlgorithm. The Algorithm
+ * is free to produce multiple of output objects at the end of each run and it
+ * must use the provided ConstraintPlacementAlgorithmOutputCollector to
+ * aggregate/collect this output. Similar to the MapReduce Mapper/Reducer
+ * which is provided a collector to collect output.
+ */
+public class ConstraintPlacementAlgorithmOutput {
+
+  private final ApplicationId applicationId;
+
+  public ConstraintPlacementAlgorithmOutput(ApplicationId applicationId) {
+    this.applicationId = applicationId;
+  }
+
+  private final List<PlacedSchedulingRequest> placedRequests =
+      new ArrayList<>();
+
+  private final List<SchedulingRequest> rejectedRequests =
+      new ArrayList<>();
+
+  public List<PlacedSchedulingRequest> getPlacedRequests() {
+    return placedRequests;
+  }
+
+  public List<SchedulingRequest> getRejectedRequests() {
+    return rejectedRequests;
+  }
+
+  public ApplicationId getApplicationId() {
+    return applicationId;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutputCollector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutputCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutputCollector.java
new file mode 100644
index 0000000..131fd42
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutputCollector.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api;
+
+/**
+ * The ConstraintPlacementAlgorithm uses the
+ * ConstraintPlacementAlgorithmOutputCollector to collect any output it
+ * spits out.
+ */
+public interface ConstraintPlacementAlgorithmOutputCollector {
+
+  /**
+   * Collect an ConstraintPlacementAlgorithm output.
+   * @param algorithmOutput ConstraintPlacementAlgorithm Output.
+   */
+  void collect(ConstraintPlacementAlgorithmOutput algorithmOutput);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/PlacedSchedulingRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/PlacedSchedulingRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/PlacedSchedulingRequest.java
new file mode 100644
index 0000000..2cd90d6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/PlacedSchedulingRequest.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api;
+
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Class to encapsulate a Placed scheduling Request.
+ * It has the original Scheduling Request and a list of SchedulerNodes (one
+ * for each 'numAllocation' field in the corresponding ResourceSizing object.
+ *
+ * NOTE: Clients of this class SHOULD NOT rely on the value of
+ *       resourceSizing.numAllocations and instead should use the
+ *       size of collection returned by getNodes() instead.
+ */
+public class PlacedSchedulingRequest {
+
+  // The number of times the Algorithm tried to place the SchedulingRequest
+  // after it was rejected by the commit phase of the Scheduler (due to some
+  // transient state of the cluster. For eg: no space on Node / user limit etc.)
+  // The Algorithm can then try to probably place on a different node.
+  private int placementAttempt = 0;
+  private final SchedulingRequest request;
+  // One Node per numContainers in the SchedulingRequest;
+  private final List<SchedulerNode> nodes = new ArrayList<>();
+
+  public PlacedSchedulingRequest(SchedulingRequest request) {
+    this.request = request;
+  }
+
+  public SchedulingRequest getSchedulingRequest() {
+    return request;
+  }
+
+  /**
+   * List of Node locations on which this Scheduling Request can be placed.
+   * The size of this list = schedulingRequest.resourceSizing.numAllocations.
+   * @return List of Scheduler nodes.
+   */
+  public List<SchedulerNode> getNodes() {
+    return nodes;
+  }
+
+  public int getPlacementAttempt() {
+    return placementAttempt;
+  }
+
+  public void setPlacementAttempt(int attempt) {
+    this.placementAttempt = attempt;
+  }
+
+  @Override
+  public String toString() {
+    return "PlacedSchedulingRequest{" +
+        "placementAttempt=" + placementAttempt +
+        ", request=" + request +
+        ", nodes=" + nodes +
+        '}';
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/SchedulingResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/SchedulingResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/SchedulingResponse.java
new file mode 100644
index 0000000..6c65d84
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/SchedulingResponse.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+
+/**
+ * This class encapsulates the response received from the ResourceScheduler's
+ * attemptAllocateOnNode method.
+ */
+public class SchedulingResponse {
+
+  private final boolean isSuccess;
+  private final ApplicationId applicationId;
+  private final SchedulingRequest schedulingRequest;
+
+  /**
+   * Create a SchedulingResponse.
+   * @param isSuccess did scheduler accept.
+   * @param applicationId Application Id.
+   * @param schedulingRequest Scheduling Request.
+   */
+  public SchedulingResponse(boolean isSuccess, ApplicationId applicationId,
+      SchedulingRequest schedulingRequest) {
+    this.isSuccess = isSuccess;
+    this.applicationId = applicationId;
+    this.schedulingRequest = schedulingRequest;
+  }
+
+  /**
+   * Returns true if Scheduler was able to accept and commit this request.
+   * @return isSuccessful.
+   */
+  public boolean isSuccess() {
+    return this.isSuccess;
+  }
+
+  /**
+   * Get Application Id.
+   * @return Application Id.
+   */
+  public ApplicationId getApplicationId() {
+    return this.applicationId;
+  }
+
+  /**
+   * Get Scheduling Request.
+   * @return Scheduling Request.
+   */
+  public SchedulingRequest getSchedulingRequest() {
+    return this.schedulingRequest;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/package-info.java
new file mode 100644
index 0000000..01ed713
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This Package contains classes related to constrained placement of
+ * Requests.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/TestAllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/TestAllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/TestAllocationTagsManager.java
deleted file mode 100644
index 0358792..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/TestAllocationTagsManager.java
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * *
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- * /
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.constraint;
-
-import com.google.common.collect.ImmutableSet;
-import org.apache.hadoop.yarn.api.records.NodeId;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test functionality of AllocationTagsManager.
- */
-public class TestAllocationTagsManager {
-  @Test
-  public void testAllocationTagsManagerSimpleCases()
-      throws InvalidAllocationTagsQueryException {
-    AllocationTagsManager atm = new AllocationTagsManager();
-
-    /**
-     * Construct test case:
-     * Node1:
-     *    container_1_1 (mapper/reducer/app_1)
-     *    container_1_3 (service/app_1)
-     *
-     * Node2:
-     *    container_1_2 (mapper/reducer/app_1)
-     *    container_1_4 (reducer/app_1)
-     *    container_2_1 (service/app_2)
-     */
-
-    // 3 Containers from app1
-    atm.addContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.addContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.addContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
-
-    atm.addContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
-        ImmutableSet.of("reducer"));
-
-    // 1 Container from app2
-    atm.addContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
-
-    // Get Cardinality of app1 on node1, with tag "mapper"
-    Assert.assertEquals(1,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node1:1234"),
-            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
-            Long::max));
-
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=min
-    Assert.assertEquals(1,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1),
-            ImmutableSet.of("mapper", "reducer"), Long::min));
-
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=max
-    Assert.assertEquals(2,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1),
-            ImmutableSet.of("mapper", "reducer"), Long::max));
-
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
-    Assert.assertEquals(3,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1),
-            ImmutableSet.of("mapper", "reducer"), Long::sum));
-
-    // Get Cardinality by passing single tag.
-    Assert.assertEquals(1,
-        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1), "mapper"));
-
-    Assert.assertEquals(2,
-        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1), "reducer"));
-
-    // Get Cardinality of app1 on node2, with tag "no_existed/reducer", op=min
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1),
-            ImmutableSet.of("no_existed", "reducer"), Long::min));
-
-    // Get Cardinality of app1 on node2, with tag "<applicationId>", op=max
-    // (Expect this returns #containers from app1 on node2)
-    Assert.assertEquals(2,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1), ImmutableSet
-                .of(AllocationTagsNamespaces.APP_ID + TestUtils
-                    .getMockApplicationId(1).toString()), Long::max));
-
-    // Get Cardinality of app1 on node2, with empty tag set, op=max
-    Assert.assertEquals(2,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
-
-    // Get Cardinality of all apps on node2, with empty tag set, op=sum
-    Assert.assertEquals(7,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"), null,
-            ImmutableSet.of(), Long::sum));
-
-    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
-    Assert.assertEquals(5,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::sum));
-
-    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
-    Assert.assertEquals(2,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(2), ImmutableSet.of(), Long::sum));
-
-    // Finish all containers:
-    atm.removeContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.removeContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.removeContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
-
-    atm.removeContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
-        ImmutableSet.of("reducer"));
-
-    atm.removeContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
-
-    // Expect all cardinality to be 0
-    // Get Cardinality of app1 on node1, with tag "mapper"
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node1:1234"),
-            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
-            Long::max));
-
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=min
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1),
-            ImmutableSet.of("mapper", "reducer"), Long::min));
-
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=max
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1),
-            ImmutableSet.of("mapper", "reducer"), Long::max));
-
-    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1),
-            ImmutableSet.of("mapper", "reducer"), Long::sum));
-
-    // Get Cardinality of app1 on node2, with tag "<applicationId>", op=max
-    // (Expect this returns #containers from app1 on node2)
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1),
-            ImmutableSet.of(TestUtils.getMockApplicationId(1).toString()),
-            Long::max));
-
-    Assert.assertEquals(0,
-        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1),
-            TestUtils.getMockApplicationId(1).toString()));
-
-    // Get Cardinality of app1 on node2, with empty tag set, op=max
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
-
-    // Get Cardinality of all apps on node2, with empty tag set, op=sum
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"), null,
-            ImmutableSet.of(), Long::sum));
-
-    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::sum));
-
-    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
-    Assert.assertEquals(0,
-        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-            TestUtils.getMockApplicationId(2), ImmutableSet.of(), Long::sum));
-  }
-
-  @Test
-  public void testAllocationTagsManagerMemoryAfterCleanup()
-      throws InvalidAllocationTagsQueryException {
-    /**
-     * Make sure YARN cleans up all memory once container/app finishes.
-     */
-
-    AllocationTagsManager atm = new AllocationTagsManager();
-
-    // Add a bunch of containers
-    atm.addContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.addContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.addContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
-
-    atm.addContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
-        ImmutableSet.of("reducer"));
-
-    atm.addContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
-
-    // Remove all these containers
-    atm.removeContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.removeContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.removeContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
-
-    atm.removeContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
-        ImmutableSet.of("reducer"));
-
-    atm.removeContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
-
-    // Check internal data structure
-    Assert.assertEquals(0,
-        atm.getGlobalMapping().getNodeToTagsWithCount().size());
-    Assert.assertEquals(0, atm.getPerAppMappings().size());
-  }
-
-  @Test
-  public void testQueryCardinalityWithIllegalParameters()
-      throws InvalidAllocationTagsQueryException {
-    /**
-     * Make sure YARN cleans up all memory once container/app finishes.
-     */
-
-    AllocationTagsManager atm = new AllocationTagsManager();
-
-    // Add a bunch of containers
-    atm.addContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.addContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
-        ImmutableSet.of("mapper", "reducer"));
-
-    atm.addContainer(NodeId.fromString("node1:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
-        ImmutableSet.of("service"));
-
-    atm.addContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
-        ImmutableSet.of("reducer"));
-
-    atm.addContainer(NodeId.fromString("node2:1234"),
-        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
-        ImmutableSet.of("service"));
-
-    // No node-id
-    boolean caughtException = false;
-    try {
-      atm.getNodeCardinalityByOp(null, TestUtils.getMockApplicationId(2),
-          ImmutableSet.of("mapper"), Long::min);
-    } catch (InvalidAllocationTagsQueryException e) {
-      caughtException = true;
-    }
-    Assert.assertTrue("should fail because of nodeId specified",
-        caughtException);
-
-    // No op
-    caughtException = false;
-    try {
-      atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
-          TestUtils.getMockApplicationId(2), ImmutableSet.of("mapper"), null);
-    } catch (InvalidAllocationTagsQueryException e) {
-      caughtException = true;
-    }
-    Assert.assertTrue("should fail because of nodeId specified",
-        caughtException);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
index 27ff311..538d128 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java
@@ -54,7 +54,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
-import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
@@ -62,6 +61,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAt
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.junit.Assert;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
index 61a5555..e8734cc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
@@ -42,12 +42,12 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
-import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
 import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
new file mode 100644
index 0000000..4bb2a18
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestAllocationTagsManager.java
@@ -0,0 +1,328 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test functionality of AllocationTagsManager.
+ */
+public class TestAllocationTagsManager {
+  @Test
+  public void testAllocationTagsManagerSimpleCases()
+      throws InvalidAllocationTagsQueryException {
+    AllocationTagsManager atm = new AllocationTagsManager();
+
+    /**
+     * Construct test case:
+     * Node1:
+     *    container_1_1 (mapper/reducer/app_1)
+     *    container_1_3 (service/app_1)
+     *
+     * Node2:
+     *    container_1_2 (mapper/reducer/app_1)
+     *    container_1_4 (reducer/app_1)
+     *    container_2_1 (service/app_2)
+     */
+
+    // 3 Containers from app1
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    // 1 Container from app2
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // Get Cardinality of app1 on node1, with tag "mapper"
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node1:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
+            Long::max));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=min
+    Assert.assertEquals(1,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::min));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=max
+    Assert.assertEquals(2,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::max));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
+    Assert.assertEquals(3,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::sum));
+
+    // Get Cardinality by passing single tag.
+    Assert.assertEquals(1,
+        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), "mapper"));
+
+    Assert.assertEquals(2,
+        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), "reducer"));
+
+    // Get Cardinality of app1 on node2, with tag "no_existed/reducer", op=min
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("no_existed", "reducer"), Long::min));
+
+    // Get Cardinality of app1 on node2, with tag "<applicationId>", op=max
+    // (Expect this returns #containers from app1 on node2)
+    Assert.assertEquals(2,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet
+                .of(AllocationTagsNamespaces.APP_ID + TestUtils
+                    .getMockApplicationId(1).toString()), Long::max));
+
+    // Get Cardinality of app1 on node2, with empty tag set, op=max
+    Assert.assertEquals(2,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
+
+    // Get Cardinality of all apps on node2, with empty tag set, op=sum
+    Assert.assertEquals(7,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"), null,
+            ImmutableSet.of(), Long::sum));
+
+    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    Assert.assertEquals(5,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::sum));
+
+    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    Assert.assertEquals(2,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(2), ImmutableSet.of(), Long::sum));
+
+    // Finish all containers:
+    atm.removeContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.removeContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // Expect all cardinality to be 0
+    // Get Cardinality of app1 on node1, with tag "mapper"
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node1:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper"),
+            Long::max));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=min
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::min));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=max
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::max));
+
+    // Get Cardinality of app1 on node2, with tag "mapper/reducer", op=sum
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of("mapper", "reducer"), Long::sum));
+
+    // Get Cardinality of app1 on node2, with tag "<applicationId>", op=max
+    // (Expect this returns #containers from app1 on node2)
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            ImmutableSet.of(TestUtils.getMockApplicationId(1).toString()),
+            Long::max));
+
+    Assert.assertEquals(0,
+        atm.getNodeCardinality(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1),
+            TestUtils.getMockApplicationId(1).toString()));
+
+    // Get Cardinality of app1 on node2, with empty tag set, op=max
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::max));
+
+    // Get Cardinality of all apps on node2, with empty tag set, op=sum
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"), null,
+            ImmutableSet.of(), Long::sum));
+
+    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(1), ImmutableSet.of(), Long::sum));
+
+    // Get Cardinality of app_1 on node2, with empty tag set, op=sum
+    Assert.assertEquals(0,
+        atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+            TestUtils.getMockApplicationId(2), ImmutableSet.of(), Long::sum));
+  }
+
+  @Test
+  public void testAllocationTagsManagerMemoryAfterCleanup()
+      throws InvalidAllocationTagsQueryException {
+    /**
+     * Make sure YARN cleans up all memory once container/app finishes.
+     */
+
+    AllocationTagsManager atm = new AllocationTagsManager();
+
+    // Add a bunch of containers
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // Remove all these containers
+    atm.removeContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.removeContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    atm.removeContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // Check internal data structure
+    Assert.assertEquals(0,
+        atm.getGlobalMapping().getNodeToTagsWithCount().size());
+    Assert.assertEquals(0, atm.getPerAppMappings().size());
+  }
+
+  @Test
+  public void testQueryCardinalityWithIllegalParameters()
+      throws InvalidAllocationTagsQueryException {
+    /**
+     * Make sure YARN cleans up all memory once container/app finishes.
+     */
+
+    AllocationTagsManager atm = new AllocationTagsManager();
+
+    // Add a bunch of containers
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 1),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 2),
+        ImmutableSet.of("mapper", "reducer"));
+
+    atm.addContainer(NodeId.fromString("node1:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 3),
+        ImmutableSet.of("service"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(1), TestUtils.getMockContainerId(1, 4),
+        ImmutableSet.of("reducer"));
+
+    atm.addContainer(NodeId.fromString("node2:1234"),
+        TestUtils.getMockApplicationId(2), TestUtils.getMockContainerId(2, 3),
+        ImmutableSet.of("service"));
+
+    // No node-id
+    boolean caughtException = false;
+    try {
+      atm.getNodeCardinalityByOp(null, TestUtils.getMockApplicationId(2),
+          ImmutableSet.of("mapper"), Long::min);
+    } catch (InvalidAllocationTagsQueryException e) {
+      caughtException = true;
+    }
+    Assert.assertTrue("should fail because of nodeId specified",
+        caughtException);
+
+    // No op
+    caughtException = false;
+    try {
+      atm.getNodeCardinalityByOp(NodeId.fromString("node2:1234"),
+          TestUtils.getMockApplicationId(2), ImmutableSet.of("mapper"), null);
+    } catch (InvalidAllocationTagsQueryException e) {
+      caughtException = true;
+    }
+    Assert.assertTrue("should fail because of nodeId specified",
+        caughtException);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
index 4b902a7..db749ac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
@@ -74,7 +74,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWri
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
-import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
@@ -93,6 +92,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestSchedulerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/49] hadoop git commit: YARN-7580. ContainersMonitorImpl logged message lacks detail when exceeding memory limits. Contributed by Wilfred Spiegelenburg.

Posted by as...@apache.org.
YARN-7580. ContainersMonitorImpl logged message lacks detail when exceeding memory limits. Contributed by Wilfred Spiegelenburg.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b82049b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b82049b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b82049b4

Branch: refs/heads/YARN-6592
Commit: b82049b4f0065b76c3eb590d57eb5bf0ebc2f204
Parents: 6e3e1b8
Author: Miklos Szegedi <sz...@apache.org>
Authored: Fri Dec 29 12:35:49 2017 -0800
Committer: Miklos Szegedi <sz...@apache.org>
Committed: Fri Dec 29 12:49:37 2017 -0800

----------------------------------------------------------------------
 .../monitor/ContainersMonitorImpl.java            | 18 +++++++++++++-----
 .../monitor/TestContainersMonitor.java            |  4 ++--
 2 files changed, 15 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b82049b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index 48ec147..bc28646 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -650,26 +650,34 @@ public class ContainersMonitorImpl extends AbstractService implements
       if (isVmemCheckEnabled()
               && isProcessTreeOverLimit(containerId.toString(),
               currentVmemUsage, curMemUsageOfAgedProcesses, vmemLimit)) {
+        // The current usage (age=0) is always higher than the aged usage. We
+        // do not show the aged size in the message, base the delta on the
+        // current usage
+        long delta = currentVmemUsage - vmemLimit;
         // Container (the root process) is still alive and overflowing
         // memory.
         // Dump the process-tree and then clean it up.
         msg = formatErrorMessage("virtual",
                 formatUsageString(currentVmemUsage, vmemLimit,
                   currentPmemUsage, pmemLimit),
-                pId, containerId, pTree);
+                pId, containerId, pTree, delta);
         isMemoryOverLimit = true;
         containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_VMEM;
       } else if (isPmemCheckEnabled()
               && isProcessTreeOverLimit(containerId.toString(),
               currentPmemUsage, curRssMemUsageOfAgedProcesses,
               pmemLimit)) {
+        // The current usage (age=0) is always higher than the aged usage. We
+        // do not show the aged size in the message, base the delta on the
+        // current usage
+        long delta = currentPmemUsage - pmemLimit;
         // Container (the root process) is still alive and overflowing
         // memory.
         // Dump the process-tree and then clean it up.
         msg = formatErrorMessage("physical",
                 formatUsageString(currentVmemUsage, vmemLimit,
                   currentPmemUsage, pmemLimit),
-                pId, containerId, pTree);
+                pId, containerId, pTree, delta);
         isMemoryOverLimit = true;
         containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_PMEM;
       }
@@ -726,11 +734,11 @@ public class ContainersMonitorImpl extends AbstractService implements
      */
     private String formatErrorMessage(String memTypeExceeded,
         String usageString, String pId, ContainerId containerId,
-        ResourceCalculatorProcessTree pTree) {
+        ResourceCalculatorProcessTree pTree, long delta) {
       return
         String.format("Container [pid=%s,containerID=%s] is " +
-            "running beyond %s memory limits. ",
-            pId, containerId, memTypeExceeded) +
+            "running %dB beyond the '%S' memory limit. ",
+            pId, containerId, delta, memTypeExceeded) +
         "Current usage: " + usageString +
         ". Killing container.\n" +
         "Dump of the process-tree for " + containerId + " :\n" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b82049b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
index 5f72a4c..412b8cd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
@@ -332,8 +332,8 @@ public class TestContainersMonitor extends BaseContainerManagerTest {
     Assert.assertEquals(ContainerExitStatus.KILLED_EXCEEDED_VMEM,
         containerStatus.getExitStatus());
     String expectedMsgPattern =
-        "Container \\[pid=" + pid + ",containerID=" + cId
-            + "\\] is running beyond virtual memory limits. Current usage: "
+        "Container \\[pid=" + pid + ",containerID=" + cId + "\\] is running "
+            + "[0-9]+B beyond the 'VIRTUAL' memory limit. Current usage: "
             + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B physical memory used; "
             + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B virtual memory used. "
             + "Killing container.\nDump of the process-tree for "


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/49] hadoop git commit: YARN-7662. [ATSv2] Define new set of configurations for reader and collectors to bind (Rohith Sharma K S via Varun Saxena)

Posted by as...@apache.org.
YARN-7662. [ATSv2] Define new set of configurations for reader and collectors to bind (Rohith Sharma K S via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0aeb666
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0aeb666
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0aeb666

Branch: refs/heads/YARN-6592
Commit: c0aeb666a4d43aac196569d9ec6768d62139d2b9
Parents: fe5b057
Author: Varun Saxena <va...@apache.org>
Authored: Tue Dec 19 22:29:24 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Tue Dec 19 22:29:24 2017 +0530

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     | 45 +++++++++++++++++++-
 .../hadoop/yarn/webapp/util/WebAppUtils.java    | 39 ++++++++++++++---
 .../src/main/resources/yarn-default.xml         | 24 +++++++++++
 .../security/TestTimelineAuthFilterForV2.java   |  3 +-
 .../AbstractTimelineReaderHBaseTestBase.java    |  2 +-
 .../collector/NodeTimelineCollectorManager.java | 23 +++++++---
 .../reader/TimelineReaderServer.java            | 25 ++++++++---
 .../reader/TestTimelineReaderServer.java        |  6 +--
 .../reader/TestTimelineReaderWebServices.java   |  2 +-
 .../src/site/markdown/TimelineServiceV2.md      |  6 +--
 10 files changed, 145 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index e57f988..1b6bd0e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2389,6 +2389,9 @@ public class YarnConfiguration extends Configuration {
   /**
    * Settings for timeline service v2.0.
    */
+  public static final String TIMELINE_SERVICE_READER_PREFIX =
+      TIMELINE_SERVICE_PREFIX + "reader.";
+
   public static final String TIMELINE_SERVICE_WRITER_CLASS =
       TIMELINE_SERVICE_PREFIX + "writer.class";
 
@@ -2397,7 +2400,7 @@ public class YarnConfiguration extends Configuration {
           + ".storage.HBaseTimelineWriterImpl";
 
   public static final String TIMELINE_SERVICE_READER_CLASS =
-      TIMELINE_SERVICE_PREFIX + "reader.class";
+      TIMELINE_SERVICE_READER_PREFIX + "class";
 
   public static final String DEFAULT_TIMELINE_SERVICE_READER_CLASS =
       "org.apache.hadoop.yarn.server.timelineservice.storage" +
@@ -3422,6 +3425,46 @@ public class YarnConfiguration extends Configuration {
   public static final String TIMELINE_XFS_OPTIONS =
       TIMELINE_XFS_PREFIX + "xframe-options";
 
+  /**
+   * Settings for timeline reader.
+   */
+  public static final String TIMELINE_SERVICE_READER_BIND_HOST =
+      TIMELINE_SERVICE_READER_PREFIX + "bind-host";
+
+  public static final String TIMELINE_SERVICE_READER_WEBAPP_ADDRESS =
+      TIMELINE_SERVICE_READER_PREFIX + "webapp.address";
+  public static final String DEFAULT_TIMELINE_SERVICE_READER_WEBAPP_ADDRESS =
+      DEFAULT_TIMELINE_SERVICE_WEBAPP_ADDRESS;
+
+  public static final String TIMELINE_SERVICE_READER_WEBAPP_HTTPS_ADDRESS =
+      TIMELINE_SERVICE_READER_PREFIX + "webapp.https.address";
+  public static final String
+      DEFAULT_TIMELINE_SERVICE_READER_WEBAPP_HTTPS_ADDRESS =
+      DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS;
+
+  /**
+   * Marked collector properties as Private since it run as auxillary service.
+   */
+  public static final String TIMELINE_SERVICE_COLLECTOR_PREFIX =
+      TIMELINE_SERVICE_PREFIX + "collector.";
+
+  @Private
+  public static final String TIMELINE_SERVICE_COLLECTOR_BIND_HOST =
+      TIMELINE_SERVICE_COLLECTOR_PREFIX + "bind-host";
+
+  @Private
+  public static final String TIMELINE_SERVICE_COLLECTOR_WEBAPP_ADDRESS =
+      TIMELINE_SERVICE_COLLECTOR_PREFIX + "webapp.address";
+  public static final String DEFAULT_TIMELINE_SERVICE_COLLECTOR_WEBAPP_ADDRESS =
+      DEFAULT_TIMELINE_SERVICE_WEBAPP_ADDRESS;
+
+  @Private
+  public static final String TIMELINE_SERVICE_COLLECTOR_WEBAPP_HTTPS_ADDRESS =
+      TIMELINE_SERVICE_COLLECTOR_PREFIX + "webapp.https.address";
+  public static final String
+      DEFAULT_TIMELINE_SERVICE_COLLECTOR_WEBAPP_HTTPS_ADDRESS =
+      DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS;
+
   public YarnConfiguration() {
     super();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
index 446f0a1..e62bf10 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
@@ -314,16 +314,41 @@ public class WebAppUtils {
   }
 
   public static String getAHSWebAppURLWithoutScheme(Configuration conf) {
-    return getTimelineReaderWebAppURL(conf);
-  }
-
-  public static String getTimelineReaderWebAppURL(Configuration conf) {
     if (YarnConfiguration.useHttps(conf)) {
       return conf.get(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS,
-        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS);
+          YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS);
     } else {
       return conf.get(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
-        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_ADDRESS);
+          YarnConfiguration.DEFAULT_TIMELINE_SERVICE_WEBAPP_ADDRESS);
+    }
+  }
+
+  public static String getTimelineReaderWebAppURLWithoutScheme(
+      Configuration conf) {
+    if (YarnConfiguration.useHttps(conf)) {
+      return conf
+          .get(YarnConfiguration.TIMELINE_SERVICE_READER_WEBAPP_HTTPS_ADDRESS,
+              YarnConfiguration.
+                  DEFAULT_TIMELINE_SERVICE_READER_WEBAPP_HTTPS_ADDRESS);
+    } else {
+      return conf.get(YarnConfiguration.TIMELINE_SERVICE_READER_WEBAPP_ADDRESS,
+          YarnConfiguration.
+              DEFAULT_TIMELINE_SERVICE_READER_WEBAPP_ADDRESS);
+    }
+  }
+
+  public static String getTimelineCollectorWebAppURLWithoutScheme(
+      Configuration conf) {
+    if (YarnConfiguration.useHttps(conf)) {
+      return conf.get(
+          YarnConfiguration.TIMELINE_SERVICE_COLLECTOR_WEBAPP_HTTPS_ADDRESS,
+          YarnConfiguration.
+              DEFAULT_TIMELINE_SERVICE_COLLECTOR_WEBAPP_HTTPS_ADDRESS);
+    } else {
+      return conf
+          .get(YarnConfiguration.TIMELINE_SERVICE_COLLECTOR_WEBAPP_ADDRESS,
+              YarnConfiguration.
+                  DEFAULT_TIMELINE_SERVICE_COLLECTOR_WEBAPP_ADDRESS);
     }
   }
 
@@ -342,7 +367,7 @@ public class WebAppUtils {
       return schemePrefix + url;
     }
   }
-  
+
   public static String getRunningLogURL(
       String nodeHttpAddress, String containerId, String user) {
     if (nodeHttpAddress == null || nodeHttpAddress.isEmpty() ||

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 192f62e..d450eca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3599,4 +3599,28 @@
     <value>0,1</value>
   </property>
 
+  <property>
+    <description>The http address of the timeline reader web application.</description>
+    <name>yarn.timeline-service.reader.webapp.address</name>
+    <value>${yarn.timeline-service.webapp.address}</value>
+  </property>
+
+  <property>
+    <description>The https address of the timeline reader web application.</description>
+    <name>yarn.timeline-service.reader.webapp.https.address</name>
+    <value>${yarn.timeline-service.webapp.https.address}</value>
+  </property>
+
+  <property>
+    <description>
+      The actual address timeline reader will bind to. If this optional address is
+      set, the reader server will bind to this address and the port specified in
+      yarn.timeline-service.reader.webapp.address.
+      This is most useful for making the service listen to all interfaces by setting to
+      0.0.0.0.
+    </description>
+    <name>yarn.timeline-service.reader.bind-host</name>
+    <value></value>
+  </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
index 75f17fb..bb511d8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
@@ -162,7 +162,8 @@ public class TestTimelineAuthFilterForV2 {
       conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
       conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
           FileSystemTimelineWriterImpl.class, TimelineWriter.class);
-      conf.set(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST, "localhost");
+      conf.set(YarnConfiguration.TIMELINE_SERVICE_READER_BIND_HOST,
+          "localhost");
       conf.set(FileSystemTimelineWriterImpl.TIMELINE_SERVICE_STORAGE_DIR_ROOT,
           TEST_ROOT_DIR.getAbsolutePath());
       conf.set("hadoop.proxyuser.HTTP.hosts", "*");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
index 3519c3f..471fb6c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
@@ -79,7 +79,7 @@ public abstract class AbstractTimelineReaderHBaseTestBase {
       Configuration config = util.getConfiguration();
       config.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
       config.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
-      config.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+      config.set(YarnConfiguration.TIMELINE_SERVICE_READER_WEBAPP_ADDRESS,
           "localhost:0");
       config.set(YarnConfiguration.RM_CLUSTER_ID, "cluster1");
       config.set(YarnConfiguration.TIMELINE_SERVICE_READER_CLASS,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
index 68a68f0..696f4a3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
@@ -145,10 +145,9 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
 
   private void doSecureLogin() throws IOException {
     Configuration conf = getConfig();
-    InetSocketAddress addr = NetUtils.createSocketAddr(conf.getTrimmed(
-        YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
-            YarnConfiguration.DEFAULT_TIMELINE_SERVICE_BIND_HOST), 0,
-                YarnConfiguration.TIMELINE_SERVICE_BIND_HOST);
+    String webAppURLWithoutScheme =
+        WebAppUtils.getTimelineCollectorWebAppURLWithoutScheme(conf);
+    InetSocketAddress addr = NetUtils.createSocketAddr(webAppURLWithoutScheme);
     SecurityUtil.login(conf, YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
         YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL, addr.getHostName());
   }
@@ -277,8 +276,20 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
         initializers, defaultInitializers, tokenMgrService);
     TimelineServerUtils.setTimelineFilters(
         conf, initializers, defaultInitializers);
-    String bindAddress = conf.get(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
-        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_BIND_HOST) + ":0";
+
+    String bindAddress = null;
+    String host =
+        conf.getTrimmed(YarnConfiguration.TIMELINE_SERVICE_COLLECTOR_BIND_HOST);
+    if (host == null || host.isEmpty()) {
+      // if collector bind-host is not set, fall back to
+      // timeline-service.bind-host to maintain compatibility
+      bindAddress =
+          conf.get(YarnConfiguration.DEFAULT_TIMELINE_SERVICE_BIND_HOST,
+              YarnConfiguration.DEFAULT_TIMELINE_SERVICE_BIND_HOST) + ":0";
+    } else {
+      bindAddress = host + ":0";
+    }
+
     try {
       HttpServer2.Builder builder = new HttpServer2.Builder()
           .setName("timeline")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
index 5c049ea..3cc24ea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer2;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.HttpCrossOriginFilterInitializer;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.service.CompositeService;
@@ -63,6 +64,8 @@ public class TimelineReaderServer extends CompositeService {
 
   private HttpServer2 readerWebServer;
   private TimelineReaderManager timelineReaderManager;
+  private String webAppURLWithoutScheme;
+
 
   public TimelineReaderServer() {
     super(TimelineReaderServer.class.getName());
@@ -73,10 +76,10 @@ public class TimelineReaderServer extends CompositeService {
     if (!YarnConfiguration.timelineServiceV2Enabled(conf)) {
       throw new YarnException("timeline service v.2 is not enabled");
     }
-    InetSocketAddress bindAddr = conf.getSocketAddr(
-        YarnConfiguration.TIMELINE_SERVICE_ADDRESS,
-            YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS,
-                YarnConfiguration.DEFAULT_TIMELINE_SERVICE_PORT);
+    webAppURLWithoutScheme =
+        WebAppUtils.getTimelineReaderWebAppURLWithoutScheme(conf);
+    InetSocketAddress bindAddr =
+        NetUtils.createSocketAddr(webAppURLWithoutScheme);
     // Login from keytab if security is enabled.
     try {
       SecurityUtil.login(conf, YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
@@ -170,9 +173,17 @@ public class TimelineReaderServer extends CompositeService {
   private void startTimelineReaderWebApp() {
     Configuration conf = getConfig();
     addFilters(conf);
-    String bindAddress = WebAppUtils.getWebAppBindURL(conf,
-        YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
-        WebAppUtils.getTimelineReaderWebAppURL(conf));
+
+    String hostProperty = YarnConfiguration.TIMELINE_SERVICE_READER_BIND_HOST;
+    String host = conf.getTrimmed(hostProperty);
+    if (host == null || host.isEmpty()) {
+      // if reader bind-host is not set, fall back to timeline-service.bind-host
+      // to maintain compatibility
+      hostProperty = YarnConfiguration.TIMELINE_SERVICE_BIND_HOST;
+    }
+    String bindAddress = WebAppUtils
+        .getWebAppBindURL(conf, hostProperty, webAppURLWithoutScheme);
+
     LOG.info("Instantiating TimelineReaderWebApp at " + bindAddress);
     try {
       HttpServer2.Builder builder = new HttpServer2.Builder()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderServer.java
index bb96f37..6fc46cc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderServer.java
@@ -37,7 +37,7 @@ public class TestTimelineReaderServer {
     Configuration config = new YarnConfiguration();
     config.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
     config.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
-    config.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+    config.set(YarnConfiguration.TIMELINE_SERVICE_READER_WEBAPP_ADDRESS,
         "localhost:0");
     config.setClass(YarnConfiguration.TIMELINE_SERVICE_READER_CLASS,
         FileSystemTimelineReaderImpl.class, TimelineReader.class);
@@ -61,7 +61,7 @@ public class TestTimelineReaderServer {
     Configuration conf = new YarnConfiguration();
     conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
     conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
-    conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+    conf.set(YarnConfiguration.TIMELINE_SERVICE_READER_WEBAPP_ADDRESS,
         "localhost:0");
     conf.set(YarnConfiguration.TIMELINE_SERVICE_READER_CLASS,
         Object.class.getName());
@@ -75,7 +75,7 @@ public class TestTimelineReaderServer {
     Configuration conf = new YarnConfiguration();
     conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
     conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
-    conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+    conf.set(YarnConfiguration.TIMELINE_SERVICE_READER_WEBAPP_ADDRESS,
         "localhost:0");
     conf.set(YarnConfiguration.TIMELINE_SERVICE_READER_CLASS,
         nonexistentTimelineReaderClass);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java
index f760834..03939ad 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java
@@ -84,7 +84,7 @@ public class TestTimelineReaderWebServices {
       Configuration config = new YarnConfiguration();
       config.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
       config.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
-      config.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+      config.set(YarnConfiguration.TIMELINE_SERVICE_READER_WEBAPP_ADDRESS,
           "localhost:0");
       config.set(YarnConfiguration.RM_CLUSTER_ID, "cluster1");
       config.setClass(YarnConfiguration.TIMELINE_SERVICE_READER_CLASS,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0aeb666/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index 7c51ce0..022f76d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -133,9 +133,9 @@ New configuration parameters that are introduced with v.2 are marked bold.
 | Configuration Property | Description |
 |:---- |:---- |
 | `yarn.timeline-service.hostname` | The hostname of the Timeline service web application. Defaults to `0.0.0.0` |
-| `yarn.timeline-service.address` | Address for the Timeline server to start the RPC server. Defaults to `${yarn.timeline-service.hostname}:10200`. |
-| `yarn.timeline-service.webapp.address` | The http address of the Timeline service web application. Defaults to `${yarn.timeline-service.hostname}:8188`. |
-| `yarn.timeline-service.webapp.https.address` | The https address of the Timeline service web application. Defaults to `${yarn.timeline-service.hostname}:8190`. |
+| `yarn.timeline-service.reader.webapp.address` | The http address of the Timeline Reader web application. Defaults to `${yarn.timeline-service.hostname}:8188`. |
+| `yarn.timeline-service.reader.webapp.https.address` | The https address of the Timeline Reader web application. Defaults to `${yarn.timeline-service.hostname}:8190`. |
+| `yarn.timeline-service.reader.bind-host` | The actual address the timeline reader will bind to. If this optional address is set, reader server will bind to this address and the port specified in yarn.timeline-service.reader.webapp.address. This is most useful for making the service listen on all interfaces by setting to 0.0.0.0. |
 | **`yarn.timeline-service.hbase.configuration.file`** | Optional URL to an hbase-site.xml configuration file to be used to connect to the timeline-service hbase cluster. If empty or not specified, then the HBase configuration will be loaded from the classpath. When specified the values in the specified configuration file will override those from the ones that are present on the classpath. Defaults to `null`. |
 | **`yarn.timeline-service.writer.flush-interval-seconds`** | The setting that controls how often the timeline collector flushes the timeline writer. Defaults to `60`. |
 | **`yarn.timeline-service.app-collector.linger-period.ms`** | Time period till which the application collector will be alive in NM, after the  application master container finishes. Defaults to `1000` (1 second). |


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/49] hadoop git commit: Add 2.8.3 release jdiff files.

Posted by as...@apache.org.
Add 2.8.3 release jdiff files.

(cherry picked from commit c89f99aade575ab1f6a9836df719cda272293d90)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7f8caf5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7f8caf5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7f8caf5

Branch: refs/heads/YARN-6592
Commit: a7f8caf58ed47574861d455a9d9e1521e35c10b9
Parents: 25a36b7
Author: Junping Du <ju...@apache.org>
Authored: Mon Dec 18 22:30:32 2017 -0800
Committer: Junping Du <ju...@apache.org>
Committed: Mon Dec 18 22:39:01 2017 -0800

----------------------------------------------------------------------
 .../jdiff/Apache_Hadoop_Common_2.8.3.xml        | 38433 +++++++++++++++++
 .../jdiff/Apache_Hadoop_HDFS_2.8.3.xml          |   312 +
 .../Apache_Hadoop_MapReduce_Common_2.8.3.xml    |   113 +
 .../Apache_Hadoop_MapReduce_Core_2.8.3.xml      | 27495 ++++++++++++
 .../Apache_Hadoop_MapReduce_JobClient_2.8.3.xml |    16 +
 .../jdiff/Apache_Hadoop_YARN_Client_2.8.3.xml   |  2316 +
 .../jdiff/Apache_Hadoop_YARN_Common_2.8.3.xml   |  2665 ++
 .../Apache_Hadoop_YARN_Server_Common_2.8.3.xml  |   829 +
 8 files changed, 72179 insertions(+)
----------------------------------------------------------------------



---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/49] hadoop git commit: YARN-6596. Introduce Placement Constraint Manager module. (Konstantinos Karanasos via asuresh)

Posted by as...@apache.org.
YARN-6596. Introduce Placement Constraint Manager module. (Konstantinos Karanasos via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/058513de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/058513de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/058513de

Branch: refs/heads/YARN-6592
Commit: 058513de7db6ca3ca9432e479f2d89b420a56a79
Parents: 94429e3
Author: Arun Suresh <as...@apache.org>
Authored: Fri Dec 22 13:26:30 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Fri Dec 29 17:29:15 2017 -0800

----------------------------------------------------------------------
 .../resourcemanager/RMActiveServiceContext.java |  15 +
 .../yarn/server/resourcemanager/RMContext.java  |   6 +
 .../server/resourcemanager/RMContextImpl.java   |  13 +
 .../server/resourcemanager/ResourceManager.java |  13 +
 .../MemoryPlacementConstraintManager.java       | 282 +++++++++++++++++++
 .../constraint/PlacementConstraintManager.java  | 151 ++++++++++
 .../PlacementConstraintManagerService.java      |  93 ++++++
 .../scheduler/constraint/package-info.java      |  29 ++
 .../TestPlacementConstraintManagerService.java  | 182 ++++++++++++
 9 files changed, 784 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/058513de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
index 4d0c230..06a1d00 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAlloca
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed.QueueLimitCalculator;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
@@ -109,6 +110,7 @@ public class RMActiveServiceContext {
   private RMAppLifetimeMonitor rmAppLifetimeMonitor;
   private QueueLimitCalculator queueLimitCalculator;
   private AllocationTagsManager allocationTagsManager;
+  private PlacementConstraintManager placementConstraintManager;
 
   public RMActiveServiceContext() {
     queuePlacementManager = new PlacementManager();
@@ -413,6 +415,19 @@ public class RMActiveServiceContext {
 
   @Private
   @Unstable
+  public PlacementConstraintManager getPlacementConstraintManager() {
+    return placementConstraintManager;
+  }
+
+  @Private
+  @Unstable
+  public void setPlacementConstraintManager(
+      PlacementConstraintManager placementConstraintManager) {
+    this.placementConstraintManager = placementConstraintManager;
+  }
+
+  @Private
+  @Unstable
   public RMDelegatedNodeLabelsUpdater getRMDelegatedNodeLabelsUpdater() {
     return rmDelegatedNodeLabelsUpdater;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/058513de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
index 00da108..eb91a31 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAlloca
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed.QueueLimitCalculator;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
@@ -171,4 +172,9 @@ public interface RMContext extends ApplicationMasterServiceContext {
   AllocationTagsManager getAllocationTagsManager();
 
   void setAllocationTagsManager(AllocationTagsManager allocationTagsManager);
+
+  PlacementConstraintManager getPlacementConstraintManager();
+
+  void setPlacementConstraintManager(
+      PlacementConstraintManager placementConstraintManager);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/058513de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
index da50ef8..0b6be72 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed.QueueLimitCalculator;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
@@ -516,6 +517,18 @@ public class RMContextImpl implements RMContext {
   }
 
   @Override
+  public PlacementConstraintManager getPlacementConstraintManager() {
+    return activeServiceContext.getPlacementConstraintManager();
+  }
+
+  @Override
+  public void setPlacementConstraintManager(
+      PlacementConstraintManager placementConstraintManager) {
+    activeServiceContext
+        .setPlacementConstraintManager(placementConstraintManager);
+  }
+
+  @Override
   public RMDelegatedNodeLabelsUpdater getRMDelegatedNodeLabelsUpdater() {
     return activeServiceContext.getRMDelegatedNodeLabelsUpdater();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/058513de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index d71f224..2396b94 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -94,6 +94,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.MemoryPlacementConstraintManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManagerService;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer;
@@ -495,6 +497,12 @@ public class ResourceManager extends CompositeService implements Recoverable {
   protected AllocationTagsManager createAllocationTagsManager() {
     return new AllocationTagsManager(this.rmContext);
   }
+
+  protected PlacementConstraintManagerService
+      createPlacementConstraintManager() {
+    // Use the in memory Placement Constraint Manager.
+    return new MemoryPlacementConstraintManager();
+  }
   
   protected DelegationTokenRenewer createDelegationTokenRenewer() {
     return new DelegationTokenRenewer();
@@ -618,6 +626,11 @@ public class ResourceManager extends CompositeService implements Recoverable {
           createAllocationTagsManager();
       rmContext.setAllocationTagsManager(allocationTagsManager);
 
+      PlacementConstraintManagerService placementConstraintManager =
+          createPlacementConstraintManager();
+      addService(placementConstraintManager);
+      rmContext.setPlacementConstraintManager(placementConstraintManager);
+
       RMDelegatedNodeLabelsUpdater delegatedNodeLabelsUpdater =
           createRMDelegatedNodeLabelsUpdater();
       if (delegatedNodeLabelsUpdater != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/058513de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/MemoryPlacementConstraintManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/MemoryPlacementConstraintManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/MemoryPlacementConstraintManager.java
new file mode 100644
index 0000000..ceff6f6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/MemoryPlacementConstraintManager.java
@@ -0,0 +1,282 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * In memory implementation of the {@link PlacementConstraintManagerService}.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class MemoryPlacementConstraintManager
+    extends PlacementConstraintManagerService {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(MemoryPlacementConstraintManager.class);
+
+  private ReentrantReadWriteLock.ReadLock readLock;
+  private ReentrantReadWriteLock.WriteLock writeLock;
+
+  /**
+   * Stores the global constraints that will be manipulated by the cluster
+   * admin. The key of each entry is the tag that will enable the corresponding
+   * constraint.
+   */
+  private Map<String, PlacementConstraint> globalConstraints;
+  /**
+   * Stores the constraints for each application, along with the allocation tags
+   * that will enable each of the constraints for a given application.
+   */
+  private Map<ApplicationId, Map<String, PlacementConstraint>> appConstraints;
+
+  public MemoryPlacementConstraintManager() {
+    this.globalConstraints = new HashMap<>();
+    this.appConstraints = new HashMap<>();
+    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+    readLock = lock.readLock();
+    writeLock = lock.writeLock();
+  }
+
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    super.serviceInit(conf);
+  }
+
+  @Override
+  public void registerApplication(ApplicationId appId,
+      Map<Set<String>, PlacementConstraint> constraintMap) {
+    // Check if app already exists. If not, prepare its constraint map.
+    Map<String, PlacementConstraint> constraintsForApp = new HashMap<>();
+    try {
+      readLock.lock();
+      if (appConstraints.get(appId) != null) {
+        LOG.warn("Application {} has already been registered.", appId);
+        return;
+      }
+      // Go over each sourceTag-constraint pair, validate it, and add it to the
+      // constraint map for this app.
+      for (Map.Entry<Set<String>, PlacementConstraint> entry : constraintMap
+          .entrySet()) {
+        Set<String> sourceTags = entry.getKey();
+        PlacementConstraint constraint = entry.getValue();
+        if (validateConstraint(sourceTags, constraint)) {
+          String sourceTag = getValidSourceTag(sourceTags);
+          constraintsForApp.put(sourceTag, constraint);
+        }
+      }
+    } finally {
+      readLock.unlock();
+    }
+
+    if (constraintsForApp.isEmpty()) {
+      LOG.info("Application {} was registered, but no constraints were added.",
+          appId);
+    }
+    // Update appConstraints.
+    try {
+      writeLock.lock();
+      appConstraints.put(appId, constraintsForApp);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  @Override
+  public void addConstraint(ApplicationId appId, Set<String> sourceTags,
+      PlacementConstraint placementConstraint, boolean replace) {
+    try {
+      writeLock.lock();
+      Map<String, PlacementConstraint> constraintsForApp =
+          appConstraints.get(appId);
+      if (constraintsForApp == null) {
+        LOG.info("Cannot add constraint to application {}, as it has not "
+            + "been registered yet.", appId);
+        return;
+      }
+
+      addConstraintToMap(constraintsForApp, sourceTags, placementConstraint,
+          replace);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  @Override
+  public void addGlobalConstraint(Set<String> sourceTags,
+      PlacementConstraint placementConstraint, boolean replace) {
+    try {
+      writeLock.lock();
+      addConstraintToMap(globalConstraints, sourceTags, placementConstraint,
+          replace);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  /**
+   * Helper method that adds a constraint to a map for a given source tag.
+   * Assumes there is already a lock on the constraint map.
+   *
+   * @param constraintMap constraint map to which the constraint will be added
+   * @param sourceTags the source tags that will enable this constraint
+   * @param placementConstraint the new constraint to be added
+   * @param replace if true, an existing constraint for these sourceTags will be
+   *          replaced with the new one
+   */
+  private void addConstraintToMap(
+      Map<String, PlacementConstraint> constraintMap, Set<String> sourceTags,
+      PlacementConstraint placementConstraint, boolean replace) {
+    if (validateConstraint(sourceTags, placementConstraint)) {
+      String sourceTag = getValidSourceTag(sourceTags);
+      if (constraintMap.get(sourceTag) == null || replace) {
+        if (replace) {
+          LOG.info("Replacing the constraint associated with tag {} with {}.",
+              sourceTag, placementConstraint);
+        }
+        constraintMap.put(sourceTag, placementConstraint);
+      } else {
+        LOG.info("Constraint {} will not be added. There is already a "
+                + "constraint associated with tag {}.",
+            placementConstraint, sourceTag);
+      }
+    }
+  }
+
+  @Override
+  public Map<Set<String>, PlacementConstraint> getConstraints(
+      ApplicationId appId) {
+    try {
+      readLock.lock();
+      if (appConstraints.get(appId) == null) {
+        LOG.info("Application {} is not registered in the Placement "
+            + "Constraint Manager.", appId);
+        return null;
+      }
+
+      // Copy to a new map and return an unmodifiable version of it.
+      // Each key of the map is a set with a single source tag.
+      Map<Set<String>, PlacementConstraint> constraintMap =
+          appConstraints.get(appId).entrySet().stream()
+              .collect(Collectors.toMap(
+                  e -> Stream.of(e.getKey()).collect(Collectors.toSet()),
+                  e -> e.getValue()));
+
+      return Collections.unmodifiableMap(constraintMap);
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  @Override
+  public PlacementConstraint getConstraint(ApplicationId appId,
+      Set<String> sourceTags) {
+    if (!validateSourceTags(sourceTags)) {
+      return null;
+    }
+    String sourceTag = getValidSourceTag(sourceTags);
+    try {
+      readLock.lock();
+      if (appConstraints.get(appId) == null) {
+        LOG.info("Application {} is not registered in the Placement "
+            + "Constraint Manager.", appId);
+        return null;
+      }
+      // TODO: Merge this constraint with the global one for this tag, if one
+      // exists.
+      return appConstraints.get(appId).get(sourceTag);
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  @Override
+  public PlacementConstraint getGlobalConstraint(Set<String> sourceTags) {
+    if (!validateSourceTags(sourceTags)) {
+      return null;
+    }
+    String sourceTag = getValidSourceTag(sourceTags);
+    try {
+      readLock.lock();
+      return globalConstraints.get(sourceTag);
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  @Override
+  public void unregisterApplication(ApplicationId appId) {
+    try {
+      writeLock.lock();
+      appConstraints.remove(appId);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  @Override
+  public void removeGlobalConstraint(Set<String> sourceTags) {
+    if (!validateSourceTags(sourceTags)) {
+      return;
+    }
+    String sourceTag = getValidSourceTag(sourceTags);
+    try {
+      writeLock.lock();
+      globalConstraints.remove(sourceTag);
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  @Override
+  public int getNumRegisteredApplications() {
+    try {
+      readLock.lock();
+      return appConstraints.size();
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  @Override
+  public int getNumGlobalConstraints() {
+    try {
+      readLock.lock();
+      return globalConstraints.size();
+    } finally {
+      readLock.unlock();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/058513de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintManager.java
new file mode 100644
index 0000000..7725d0d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintManager.java
@@ -0,0 +1,151 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+
+/**
+ * Interface for storing and retrieving placement constraints (see
+ * {@link PlacementConstraint}).
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public interface PlacementConstraintManager {
+
+  /**
+   * Register all placement constraints of an application.
+   *
+   * @param appId the application ID
+   * @param constraintMap the map of allocation tags to constraints for this
+   *          application
+   */
+  void registerApplication(ApplicationId appId,
+      Map<Set<String>, PlacementConstraint> constraintMap);
+
+  /**
+   * Add a placement constraint for a given application and a given set of
+   * (source) allocation tags. The constraint will be used on Scheduling
+   * Requests that carry this set of allocation tags.
+   * TODO: Support merge and not only replace when adding a constraint.
+   *
+   * @param appId the application ID
+   * @param sourceTags the set of allocation tags that will enable this
+   *          constraint
+   * @param placementConstraint the constraint
+   * @param replace if true, an existing constraint for these tags will be
+   *          replaced by the given one
+   */
+  void addConstraint(ApplicationId appId, Set<String> sourceTags,
+      PlacementConstraint placementConstraint, boolean replace);
+
+  /**
+   * Add a placement constraint that will be used globally. These constraints
+   * are added by the cluster administrator.
+   * TODO: Support merge and not only replace when adding a constraint.
+   *
+   * @param sourceTags the allocation tags that will enable this constraint
+   * @param placementConstraint the constraint
+   * @param replace if true, an existing constraint for these tags will be
+   *          replaced by the given one
+   */
+  void addGlobalConstraint(Set<String> sourceTags,
+      PlacementConstraint placementConstraint, boolean replace);
+
+  /**
+   * Retrieve all constraints for a given application, along with the allocation
+   * tags that enable each constraint.
+   *
+   * @param appId the application ID
+   * @return the constraints for this application with the associated tags
+   */
+  Map<Set<String>, PlacementConstraint> getConstraints(ApplicationId appId);
+
+  /**
+   * Retrieve the placement constraint that is associated with a set of
+   * allocation tags for a given application.
+   *
+   * @param appId the application ID
+   * @param sourceTags the allocation tags that enable this constraint
+   * @return the constraint
+   */
+  PlacementConstraint getConstraint(ApplicationId appId,
+      Set<String> sourceTags);
+
+  /**
+   * Retrieve a global constraint that is associated with a given set of
+   * allocation tags.
+   *
+   * @param sourceTags the allocation tags that enable this constraint
+   * @return the constraint
+   */
+  PlacementConstraint getGlobalConstraint(Set<String> sourceTags);
+
+  /**
+   * Remove the constraints that correspond to a given application.
+   *
+   * @param appId the application that will be removed.
+   */
+  void unregisterApplication(ApplicationId appId);
+
+  /**
+   * Remove a global constraint that is associated with the given allocation
+   * tags.
+   *
+   * @param sourceTags the allocation tags
+   */
+  void removeGlobalConstraint(Set<String> sourceTags);
+
+  /**
+   * Returns the number of currently registered applications in the Placement
+   * Constraint Manager.
+   *
+   * @return number of registered applications.
+   */
+  int getNumRegisteredApplications();
+
+  /**
+   * Returns the number of global constraints registered in the Placement
+   * Constraint Manager.
+   *
+   * @return number of global constraints.
+   */
+  int getNumGlobalConstraints();
+
+  /**
+   * Validate a placement constraint and the set of allocation tags that will
+   * enable it.
+   *
+   * @param sourceTags the associated allocation tags
+   * @param placementConstraint the constraint
+   * @return true if constraint and tags are valid
+   */
+  default boolean validateConstraint(Set<String> sourceTags,
+      PlacementConstraint placementConstraint) {
+    return true;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/058513de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintManagerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintManagerService.java
new file mode 100644
index 0000000..967f251
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintManagerService.java
@@ -0,0 +1,93 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+
+/**
+ * The service that implements the {@link PlacementConstraintManager} interface.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public abstract class PlacementConstraintManagerService extends AbstractService
+    implements PlacementConstraintManager {
+
+  protected static final Log LOG =
+      LogFactory.getLog(PlacementConstraintManagerService.class);
+
+  private PlacementConstraintManager placementConstraintManager = null;
+
+  public PlacementConstraintManagerService() {
+    super(PlacementConstraintManagerService.class.getName());
+  }
+
+  @Override
+  public boolean validateConstraint(Set<String> sourceTags,
+      PlacementConstraint placementConstraint) {
+    if (!validateSourceTags(sourceTags)) {
+      return false;
+    }
+    // TODO: Perform actual validation of the constraint (in YARN-6621).
+    // TODO: Perform satisfiability check for constraint.
+    return true;
+  }
+
+  /**
+   * Validates whether the allocation tags that will enable a constraint have
+   * the expected format. At the moment we support a single allocation tag per
+   * constraint.
+   *
+   * @param sourceTags the source allocation tags
+   * @return true if the tags have the expected format
+   */
+  protected boolean validateSourceTags(Set<String> sourceTags) {
+    if (sourceTags.isEmpty()) {
+      LOG.warn("A placement constraint cannot be associated with an empty "
+          + "set of tags.");
+      return false;
+    }
+    if (sourceTags.size() > 1) {
+      LOG.warn("Only a single tag can be associated with a placement "
+          + "constraint currently.");
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * This method will return a single allocation tag. It should be called after
+   * validating the tags by calling {@link #validateSourceTags}.
+   *
+   * @param sourceTags the source allocation tags
+   * @return the single source tag
+   */
+  protected String getValidSourceTag(Set<String> sourceTags) {
+    return sourceTags.iterator().next();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/058513de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/package-info.java
new file mode 100644
index 0000000..cbb7a55
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/package-info.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement
+ * contains classes related to scheduling containers using placement
+ * constraints.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/058513de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintManagerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintManagerService.java
new file mode 100644
index 0000000..abcab1a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementConstraintManagerService.java
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.RACK;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetNotIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.allocationTag;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.nodeAttribute;
+
+import java.util.AbstractMap.SimpleEntry;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Unit tests for {@link PlacementConstraintManagerService}.
+ */
+public class TestPlacementConstraintManagerService {
+
+  private PlacementConstraintManagerService pcm;
+
+  protected PlacementConstraintManagerService createPCM() {
+    return new MemoryPlacementConstraintManager();
+  }
+
+  private ApplicationId appId1, appId2;
+  private PlacementConstraint c1, c2, c3, c4;
+  private Set<String> sourceTag1, sourceTag2, sourceTag3, sourceTag4;
+  private Map<Set<String>, PlacementConstraint> constraintMap1, constraintMap2;
+
+  @Before
+  public void before() {
+    this.pcm = createPCM();
+
+    // Build appIDs, constraints, source tags, and constraint map.
+    long ts = System.currentTimeMillis();
+    appId1 = BuilderUtils.newApplicationId(ts, 123);
+    appId2 = BuilderUtils.newApplicationId(ts, 234);
+
+    c1 = PlacementConstraints.build(targetIn(NODE, allocationTag("hbase-m")));
+    c2 = PlacementConstraints.build(targetIn(RACK, allocationTag("hbase-rs")));
+    c3 = PlacementConstraints
+        .build(targetNotIn(NODE, nodeAttribute("java", "1.8")));
+    c4 = PlacementConstraints
+        .build(targetCardinality(RACK, 2, 10, allocationTag("zk")));
+
+    sourceTag1 = new HashSet<>(Arrays.asList("spark"));
+    sourceTag2 = new HashSet<>(Arrays.asList("zk"));
+    sourceTag3 = new HashSet<>(Arrays.asList("storm"));
+    sourceTag4 = new HashSet<>(Arrays.asList("hbase-m", "hbase-sec"));
+
+    constraintMap1 = Stream
+        .of(new SimpleEntry<>(sourceTag1, c1),
+            new SimpleEntry<>(sourceTag2, c2))
+        .collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue));
+
+    constraintMap2 = Stream.of(new SimpleEntry<>(sourceTag3, c4))
+        .collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue));
+  }
+
+  @Test
+  public void testRegisterUnregisterApps() {
+    Assert.assertEquals(0, pcm.getNumRegisteredApplications());
+
+    // Register two applications.
+    pcm.registerApplication(appId1, constraintMap1);
+    Assert.assertEquals(1, pcm.getNumRegisteredApplications());
+    Map<Set<String>, PlacementConstraint> constrMap =
+        pcm.getConstraints(appId1);
+    Assert.assertNotNull(constrMap);
+    Assert.assertEquals(2, constrMap.size());
+    Assert.assertNotNull(constrMap.get(sourceTag1));
+    Assert.assertNotNull(constrMap.get(sourceTag2));
+
+    pcm.registerApplication(appId2, constraintMap2);
+    Assert.assertEquals(2, pcm.getNumRegisteredApplications());
+    constrMap = pcm.getConstraints(appId2);
+    Assert.assertNotNull(constrMap);
+    Assert.assertEquals(1, constrMap.size());
+    Assert.assertNotNull(constrMap.get(sourceTag3));
+    Assert.assertNull(constrMap.get(sourceTag2));
+
+    // Try to register the same app again.
+    pcm.registerApplication(appId2, constraintMap1);
+    Assert.assertEquals(2, pcm.getNumRegisteredApplications());
+
+    // Unregister appId1.
+    pcm.unregisterApplication(appId1);
+    Assert.assertEquals(1, pcm.getNumRegisteredApplications());
+    Assert.assertNull(pcm.getConstraints(appId1));
+    Assert.assertNotNull(pcm.getConstraints(appId2));
+  }
+
+  @Test
+  public void testAddConstraint() {
+    // Cannot add constraint to unregistered app.
+    Assert.assertEquals(0, pcm.getNumRegisteredApplications());
+    pcm.addConstraint(appId1, sourceTag1, c1, false);
+    Assert.assertEquals(0, pcm.getNumRegisteredApplications());
+
+    // Register application.
+    pcm.registerApplication(appId1, new HashMap<>());
+    Assert.assertEquals(1, pcm.getNumRegisteredApplications());
+    Assert.assertEquals(0, pcm.getConstraints(appId1).size());
+
+    // Add two constraints.
+    pcm.addConstraint(appId1, sourceTag1, c1, false);
+    pcm.addConstraint(appId1, sourceTag2, c3, false);
+    Assert.assertEquals(2, pcm.getConstraints(appId1).size());
+
+    // Constraint for sourceTag1 should not be replaced.
+    pcm.addConstraint(appId1, sourceTag1, c2, false);
+    Assert.assertEquals(2, pcm.getConstraints(appId1).size());
+    Assert.assertEquals(c1, pcm.getConstraint(appId1, sourceTag1));
+    Assert.assertNotEquals(c2, pcm.getConstraint(appId1, sourceTag1));
+
+    // Now c2 should replace c1 for sourceTag1.
+    pcm.addConstraint(appId1, sourceTag1, c2, true);
+    Assert.assertEquals(2, pcm.getConstraints(appId1).size());
+    Assert.assertEquals(c2, pcm.getConstraint(appId1, sourceTag1));
+  }
+
+  @Test
+  public void testGlobalConstraints() {
+    Assert.assertEquals(0, pcm.getNumGlobalConstraints());
+    pcm.addGlobalConstraint(sourceTag1, c1, false);
+    Assert.assertEquals(1, pcm.getNumGlobalConstraints());
+    Assert.assertNotNull(pcm.getGlobalConstraint(sourceTag1));
+
+    // Constraint for sourceTag1 should not be replaced.
+    pcm.addGlobalConstraint(sourceTag1, c2, false);
+    Assert.assertEquals(1, pcm.getNumGlobalConstraints());
+    Assert.assertEquals(c1, pcm.getGlobalConstraint(sourceTag1));
+    Assert.assertNotEquals(c2, pcm.getGlobalConstraint(sourceTag1));
+
+    // Now c2 should replace c1 for sourceTag1.
+    pcm.addGlobalConstraint(sourceTag1, c2, true);
+    Assert.assertEquals(1, pcm.getNumGlobalConstraints());
+    Assert.assertEquals(c2, pcm.getGlobalConstraint(sourceTag1));
+
+    pcm.removeGlobalConstraint(sourceTag1);
+    Assert.assertEquals(0, pcm.getNumGlobalConstraints());
+  }
+
+  @Test
+  public void testValidateConstraint() {
+    // At the moment we only disallow multiple source tags to be associated with
+    // a constraint. TODO: More tests to be added for YARN-6621.
+    Assert.assertTrue(pcm.validateConstraint(sourceTag1, c1));
+    Assert.assertFalse(pcm.validateConstraint(sourceTag4, c1));
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/49] hadoop git commit: YARN-7669. API and interface modifications for placement constraint processor. (asuresh)

Posted by as...@apache.org.
YARN-7669. API and interface modifications for placement constraint processor. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6062844e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6062844e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6062844e

Branch: refs/heads/YARN-6592
Commit: 6062844ed0b53e4c53c008b3dfb1a576dec60f34
Parents: b6181cd
Author: Arun Suresh <as...@apache.org>
Authored: Tue Dec 19 22:47:46 2017 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Fri Dec 29 17:29:15 2017 -0800

----------------------------------------------------------------------
 .../yarn/ams/ApplicationMasterServiceUtils.java |  16 +
 .../api/protocolrecords/AllocateResponse.java   |  23 +
 .../api/records/RejectedSchedulingRequest.java  |  70 +++
 .../yarn/api/records/RejectionReason.java       |  44 ++
 .../src/main/proto/yarn_protos.proto            |  10 +
 .../src/main/proto/yarn_service_protos.proto    |   1 +
 .../impl/pb/AllocateResponsePBImpl.java         |  85 ++++
 .../yarn/api/records/impl/pb/ProtoUtils.java    |  16 +
 .../pb/RejectedSchedulingRequestPBImpl.java     | 148 +++++++
 .../records/impl/pb/ResourceSizingPBImpl.java   |   8 +
 .../impl/pb/SchedulingRequestPBImpl.java        |  11 +
 .../hadoop/yarn/api/TestPBImplRecords.java      |   2 +
 .../resourcemanager/RMActiveServiceContext.java |   2 +-
 .../yarn/server/resourcemanager/RMContext.java  |   2 +-
 .../server/resourcemanager/RMContextImpl.java   |   2 +-
 .../server/resourcemanager/ResourceManager.java |   2 +-
 .../constraint/AllocationTagsManager.java       | 431 -------------------
 .../constraint/AllocationTagsNamespaces.java    |  31 --
 .../InvalidAllocationTagsQueryException.java    |  35 --
 .../constraint/AllocationTagsManager.java       | 431 +++++++++++++++++++
 .../constraint/AllocationTagsNamespaces.java    |  31 ++
 .../InvalidAllocationTagsQueryException.java    |  35 ++
 .../api/ConstraintPlacementAlgorithm.java       |  43 ++
 .../api/ConstraintPlacementAlgorithmInput.java  |  32 ++
 .../api/ConstraintPlacementAlgorithmOutput.java |  58 +++
 ...traintPlacementAlgorithmOutputCollector.java |  32 ++
 .../constraint/api/PlacedSchedulingRequest.java |  79 ++++
 .../constraint/api/SchedulingResponse.java      |  70 +++
 .../scheduler/constraint/api/package-info.java  |  28 ++
 .../constraint/TestAllocationTagsManager.java   | 328 --------------
 .../rmcontainer/TestRMContainerImpl.java        |   2 +-
 .../scheduler/capacity/TestUtils.java           |   2 +-
 .../constraint/TestAllocationTagsManager.java   | 328 ++++++++++++++
 .../scheduler/fifo/TestFifoScheduler.java       |   2 +-
 34 files changed, 1608 insertions(+), 832 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
index 476da8b..8bdfaf3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.ams;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerError;
 import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 
@@ -86,4 +87,19 @@ public final class ApplicationMasterServiceUtils {
     }
     allocateResponse.setAllocatedContainers(allocatedContainers);
   }
+
+  /**
+   * Add rejected Scheduling Requests to {@link AllocateResponse}.
+   * @param allocateResponse Allocate Response.
+   * @param rejectedRequests Rejected SchedulingRequests.
+   */
+  public static void addToRejectedSchedulingRequests(
+      AllocateResponse allocateResponse,
+      List<RejectedSchedulingRequest> rejectedRequests) {
+    if (allocateResponse.getRejectedSchedulingRequests() != null
+        && !allocateResponse.getRejectedSchedulingRequests().isEmpty()) {
+      rejectedRequests.addAll(allocateResponse.getRejectedSchedulingRequests());
+    }
+    allocateResponse.setRejectedSchedulingRequests(rejectedRequests);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
index 655c6dc..52c30e2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.api.protocolrecords;
 
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -35,6 +36,7 @@ import org.apache.hadoop.yarn.api.records.NMToken;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.PreemptionMessage;
 import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.UpdateContainerError;
@@ -410,6 +412,27 @@ public abstract class AllocateResponse {
   public abstract void setContainersFromPreviousAttempts(
       List<Container> containersFromPreviousAttempt);
 
+  /**
+   * Get a list of all SchedulingRequests that the RM has rejected between
+   * this allocate call and the previous one.
+   * @return List of RejectedSchedulingRequests.
+   */
+  @Public
+  @Unstable
+  public List<RejectedSchedulingRequest> getRejectedSchedulingRequests() {
+    return Collections.EMPTY_LIST;
+  }
+
+  /**
+   * Add a list of rejected SchedulingRequests to the AllocateResponse.
+   * @param rejectedRequests List of Rejected Scheduling Requests.
+   */
+  @Private
+  @Unstable
+  public void setRejectedSchedulingRequests(
+      List<RejectedSchedulingRequest> rejectedRequests) {
+  }
+
   @Private
   @Unstable
   public static AllocateResponseBuilder newBuilder() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/RejectedSchedulingRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/RejectedSchedulingRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/RejectedSchedulingRequest.java
new file mode 100644
index 0000000..6e2d95b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/RejectedSchedulingRequest.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * This encapsulates a Rejected SchedulingRequest. It contains the offending
+ * Scheduling Request along with the reason for rejection.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public abstract class RejectedSchedulingRequest {
+
+  /**
+   * Create new RejectedSchedulingRequest.
+   * @param reason Rejection Reason.
+   * @param request Rejected Scheduling Request.
+   * @return RejectedSchedulingRequest.
+   */
+  public static RejectedSchedulingRequest newInstance(RejectionReason reason,
+      SchedulingRequest request) {
+    RejectedSchedulingRequest instance =
+        Records.newRecord(RejectedSchedulingRequest.class);
+    instance.setReason(reason);
+    instance.setRequest(request);
+    return instance;
+  }
+
+  /**
+   * Get Rejection Reason.
+   * @return Rejection reason.
+   */
+  public abstract RejectionReason getReason();
+
+  /**
+   * Set Rejection Reason.
+   * @param reason Rejection Reason.
+   */
+  public abstract void setReason(RejectionReason reason);
+
+  /**
+   * Get the Rejected Scheduling Request.
+   * @return SchedulingRequest.
+   */
+  public abstract SchedulingRequest getRequest();
+
+  /**
+   * Set the SchedulingRequest.
+   * @param request SchedulingRequest.
+   */
+  public abstract void setRequest(SchedulingRequest request);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/RejectionReason.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/RejectionReason.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/RejectionReason.java
new file mode 100644
index 0000000..afbc2ed
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/RejectionReason.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Reason for rejecting a Scheduling Request.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public enum RejectionReason {
+  /**
+   * This is used to indicate a possible constraint violation. For eg. If the
+   * App requested anti-affinity across 5 container requests, but only 4 nodes
+   * exist. Another eg. could be if tag A has affinity with tag B and tag B has
+   * affinity with tag C, but tag A has anti-affinity with tag C, all at a rack
+   * scope - and only 1 rack exists. Essentially all situations where the
+   * Algorithm cannot assign a Node to SchedulingRequest.
+   */
+  COULD_NOT_PLACE_ON_NODE,
+  /**
+   * This is used to indicate when after the Algorithm has placed a Scheduling
+   * Request at a node, but the commit failed because the Queue has no
+   * capacity etc. This can be a transient situation.
+   */
+  COULD_NOT_SCHEDULE_ON_NODE
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index ac43d1b..df83861 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -424,6 +424,16 @@ enum AMCommandProto {
   AM_SHUTDOWN = 2;
 }
 
+enum RejectionReasonProto {
+  RRP_COULD_NOT_PLACE_ON_NODE = 1;
+  RRP_COULD_NOT_SCHEDULE_ON_NODE = 2;
+}
+
+message RejectedSchedulingRequestProto {
+  required RejectionReasonProto reason = 1;
+  required SchedulingRequestProto request = 2;
+}
+
 message PreemptionMessageProto {
   optional StrictPreemptionContractProto strictContract = 1;
   optional PreemptionContractProto contract = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index e49c4e3..92a65ad 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -120,6 +120,7 @@ message AllocateResponseProto {
   repeated UpdateContainerErrorProto update_errors = 15;
   repeated UpdatedContainerProto updated_containers = 16;
   repeated ContainerProto containers_from_previous_attempts = 17;
+  repeated RejectedSchedulingRequestProto rejected_scheduling_requests = 18;
 }
 
 enum SchedulerResourceTypes {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
index 5ca1e73..3ab5563 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.api.records.NMToken;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.PreemptionMessage;
 import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.UpdateContainerError;
@@ -47,9 +48,11 @@ import org.apache.hadoop.yarn.api.records.impl.pb.NodeReportPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionMessagePBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.PriorityPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
+import org.apache.hadoop.yarn.api.records.impl.pb.RejectedSchedulingRequestPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.UpdatedContainerPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos;
 import org.apache.hadoop.yarn.proto.YarnProtos.CollectorInfoProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto;
@@ -81,6 +84,7 @@ public class AllocateResponsePBImpl extends AllocateResponse {
 
   private List<NodeReport> updatedNodes = null;
   private List<UpdateContainerError> updateErrors = null;
+  private List<RejectedSchedulingRequest> rejectedRequests = null;
   private PreemptionMessage preempt;
   private Token amrmToken = null;
   private Priority appPriority = null;
@@ -140,6 +144,13 @@ public class AllocateResponsePBImpl extends AllocateResponse {
           getContainerStatusProtoIterable(this.completedContainersStatuses);
       builder.addAllCompletedContainerStatuses(iterable);
     }
+    if (this.rejectedRequests != null) {
+      builder.clearRejectedSchedulingRequests();
+      Iterable<YarnProtos.RejectedSchedulingRequestProto> iterable =
+          getRejectedSchedulingRequestsProtoIterable(
+              this.rejectedRequests);
+      builder.addAllRejectedSchedulingRequests(iterable);
+    }
     if (this.updatedNodes != null) {
       builder.clearUpdatedNodes();
       Iterable<NodeReportProto> iterable =
@@ -471,6 +482,24 @@ public class AllocateResponsePBImpl extends AllocateResponse {
     containersFromPreviousAttempts.addAll(containers);
   }
 
+  @Override
+  public synchronized List<RejectedSchedulingRequest>
+      getRejectedSchedulingRequests() {
+    initRejectedRequestsList();
+    return this.rejectedRequests;
+  }
+
+  @Override
+  public synchronized void setRejectedSchedulingRequests(
+      List<RejectedSchedulingRequest> rejectedReqs) {
+    if (rejectedReqs == null) {
+      return;
+    }
+    initRejectedRequestsList();
+    this.rejectedRequests.clear();
+    this.rejectedRequests.addAll(rejectedReqs);
+  }
+
   private synchronized void initLocalUpdatedContainerList() {
     if (this.updatedContainers != null) {
       return;
@@ -528,6 +557,20 @@ public class AllocateResponsePBImpl extends AllocateResponse {
     }
   }
 
+  private synchronized void initRejectedRequestsList() {
+    if (this.rejectedRequests != null) {
+      return;
+    }
+    AllocateResponseProtoOrBuilder p = viaProto ? proto : builder;
+    List<YarnProtos.RejectedSchedulingRequestProto> list =
+        p.getRejectedSchedulingRequestsList();
+    rejectedRequests = new ArrayList<>();
+
+    for (YarnProtos.RejectedSchedulingRequestProto c : list) {
+      rejectedRequests.add(convertFromProtoFormat(c));
+    }
+  }
+
   private synchronized void initLocalNewNMTokenList() {
     if (nmTokens != null) {
       return;
@@ -712,6 +755,38 @@ public class AllocateResponsePBImpl extends AllocateResponse {
       }
     };
   }
+
+  private synchronized Iterable<YarnProtos.RejectedSchedulingRequestProto>
+      getRejectedSchedulingRequestsProtoIterable(
+      final List<RejectedSchedulingRequest> rejectedReqsList) {
+    maybeInitBuilder();
+    return new Iterable<YarnProtos.RejectedSchedulingRequestProto>() {
+      @Override
+      public Iterator<YarnProtos.RejectedSchedulingRequestProto> iterator() {
+        return new Iterator<YarnProtos.RejectedSchedulingRequestProto>() {
+
+          private Iterator<RejectedSchedulingRequest> iter =
+              rejectedReqsList.iterator();
+
+          @Override
+          public synchronized boolean hasNext() {
+            return iter.hasNext();
+          }
+
+          @Override
+          public synchronized YarnProtos.RejectedSchedulingRequestProto next() {
+            return convertToProtoFormat(iter.next());
+          }
+
+          @Override
+          public synchronized void remove() {
+            throw new UnsupportedOperationException();
+
+          }
+        };
+      }
+    };
+  }
   
   private synchronized Iterable<NodeReportProto>
   getNodeReportProtoIterable(
@@ -808,6 +883,16 @@ public class AllocateResponsePBImpl extends AllocateResponse {
     return ((ContainerStatusPBImpl)t).getProto();
   }
 
+  private synchronized RejectedSchedulingRequestPBImpl convertFromProtoFormat(
+      YarnProtos.RejectedSchedulingRequestProto p) {
+    return new RejectedSchedulingRequestPBImpl(p);
+  }
+
+  private synchronized YarnProtos.RejectedSchedulingRequestProto
+      convertToProtoFormat(RejectedSchedulingRequest t) {
+    return ((RejectedSchedulingRequestPBImpl)t).getProto();
+  }
+
   private synchronized ResourcePBImpl convertFromProtoFormat(ResourceProto p) {
     return new ResourcePBImpl(p);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
index 168d864..76e86ad 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.NodeUpdateType;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueState;
+import org.apache.hadoop.yarn.api.records.RejectionReason;
 import org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
@@ -233,6 +234,21 @@ public class ProtoUtils {
   }
 
   /*
+   * RejectionReason
+   */
+  private static final String REJECTION_REASON_PREFIX = "RRP_";
+  public static YarnProtos.RejectionReasonProto convertToProtoFormat(
+      RejectionReason e) {
+    return YarnProtos.RejectionReasonProto
+        .valueOf(REJECTION_REASON_PREFIX + e.name());
+  }
+  public static RejectionReason convertFromProtoFormat(
+      YarnProtos.RejectionReasonProto e) {
+    return RejectionReason.valueOf(e.name()
+        .replace(REJECTION_REASON_PREFIX, ""));
+  }
+
+  /*
    * ByteBuffer
    */
   public static ByteBuffer convertFromProtoFormat(ByteString byteString) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/RejectedSchedulingRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/RejectedSchedulingRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/RejectedSchedulingRequestPBImpl.java
new file mode 100644
index 0000000..ed78551
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/RejectedSchedulingRequestPBImpl.java
@@ -0,0 +1,148 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import com.google.protobuf.TextFormat;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
+import org.apache.hadoop.yarn.api.records.RejectionReason;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.proto.YarnProtos;
+
+/**
+ * Implementation of RejectedSchedulingRequest.
+ */
+public class RejectedSchedulingRequestPBImpl extends RejectedSchedulingRequest {
+
+  private YarnProtos.RejectedSchedulingRequestProto proto =
+      YarnProtos.RejectedSchedulingRequestProto.getDefaultInstance();
+  private YarnProtos.RejectedSchedulingRequestProto.Builder builder = null;
+  private boolean viaProto = false;
+  private SchedulingRequest request;
+
+  public RejectedSchedulingRequestPBImpl() {
+    builder = YarnProtos.RejectedSchedulingRequestProto.newBuilder();
+  }
+
+  public RejectedSchedulingRequestPBImpl(
+      YarnProtos.RejectedSchedulingRequestProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  public synchronized YarnProtos.RejectedSchedulingRequestProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  @Override
+  public int hashCode() {
+    return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null) {
+      return false;
+    }
+    if (other.getClass().isAssignableFrom(this.getClass())) {
+      return this.getProto().equals(this.getClass().cast(other).getProto());
+    }
+    return false;
+  }
+
+  @Override
+  public String toString() {
+    return TextFormat.shortDebugString(getProto());
+  }
+
+  private synchronized void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private synchronized void mergeLocalToBuilder() {
+    if (this.request != null) {
+      builder.setRequest(convertToProtoFormat(this.request));
+    }
+  }
+  private synchronized void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = YarnProtos.RejectedSchedulingRequestProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  @Override
+  public synchronized RejectionReason getReason() {
+    YarnProtos.RejectedSchedulingRequestProtoOrBuilder p =
+        viaProto ? proto : builder;
+    if (!p.hasReason()) {
+      return null;
+    }
+    return ProtoUtils.convertFromProtoFormat(p.getReason());
+  }
+
+  @Override
+  public synchronized void setReason(RejectionReason reason) {
+    maybeInitBuilder();
+    if (reason == null) {
+      builder.clearReason();
+      return;
+    }
+    builder.setReason(ProtoUtils.convertToProtoFormat(reason));
+  }
+
+  @Override
+  public synchronized SchedulingRequest getRequest() {
+    YarnProtos.RejectedSchedulingRequestProtoOrBuilder p =
+        viaProto ? proto : builder;
+    if (this.request != null) {
+      return this.request;
+    }
+    if (!p.hasRequest()) {
+      return null;
+    }
+    this.request = convertFromProtoFormat(p.getRequest());
+    return this.request;
+  }
+
+  @Override
+  public synchronized void setRequest(SchedulingRequest req) {
+    maybeInitBuilder();
+    if (null == req) {
+      builder.clearRequest();
+    }
+    this.request = req;
+  }
+
+  private synchronized YarnProtos.SchedulingRequestProto convertToProtoFormat(
+      SchedulingRequest r) {
+    return ((SchedulingRequestPBImpl)r).getProto();
+  }
+
+  private synchronized SchedulingRequestPBImpl convertFromProtoFormat(
+      YarnProtos.SchedulingRequestProto p) {
+    return new SchedulingRequestPBImpl(p);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
index f98e488..4054837 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceSizingPBImpl.java
@@ -114,4 +114,12 @@ public class ResourceSizingPBImpl extends ResourceSizing {
   private ResourceProto convertToProtoFormat(Resource r) {
     return ProtoUtils.convertToProtoFormat(r);
   }
+
+  @Override
+  public String toString() {
+    return "ResourceSizingPBImpl{" +
+        "numAllocations=" + getNumAllocations() +
+        ", resources=" + getResources() +
+        '}';
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
index 305856a..1f86043 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/SchedulingRequestPBImpl.java
@@ -279,4 +279,15 @@ public class SchedulingRequestPBImpl extends SchedulingRequest {
     }
     return false;
   }
+
+  @Override
+  public String toString() {
+    return "SchedulingRequestPBImpl{" +
+        "priority=" + getPriority() +
+        ", allocationReqId=" + getAllocationRequestId() +
+        ", executionType=" + getExecutionType() +
+        ", allocationTags=" + getAllocationTags() +
+        ", resourceSizing=" + getResourceSizing() +
+        '}';
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
index a0b907d..ae80910 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
@@ -138,6 +138,7 @@ import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.api.records.QueueStatistics;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
 import org.apache.hadoop.yarn.api.records.ReservationAllocationState;
 import org.apache.hadoop.yarn.api.records.ReservationDefinition;
 import org.apache.hadoop.yarn.api.records.ReservationId;
@@ -436,6 +437,7 @@ public class TestPBImplRecords extends BasePBImplRecordsTest {
     generateByNewInstance(ResourceTypeInfo.class);
     generateByNewInstance(ResourceSizing.class);
     generateByNewInstance(SchedulingRequest.class);
+    generateByNewInstance(RejectedSchedulingRequest.class);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
index 6ee3a4c..4d0c230 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
-import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
@@ -43,6 +42,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.monitor.RMAppLifetime
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed.QueueLimitCalculator;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
index 62899d9..00da108 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWri
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater;
-import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
@@ -44,6 +43,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAlloca
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed.QueueLimitCalculator;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
index 315fdc1..da50ef8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWri
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater;
-import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
@@ -50,6 +49,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAlloca
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed.QueueLimitCalculator;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
 import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 8d1000e..adda465 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -70,7 +70,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.metrics.TimelineServiceV1Pu
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.TimelineServiceV2Publisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
-import org.apache.hadoop.yarn.server.resourcemanager.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
@@ -94,6 +93,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsManager.java
deleted file mode 100644
index b67fab9..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsManager.java
+++ /dev/null
@@ -1,431 +0,0 @@
-/*
- * *
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- * /
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.constraint;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.NodeId;
-import org.apache.hadoop.yarn.api.records.SchedulingRequest;
-import org.apache.log4j.Logger;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.function.LongBinaryOperator;
-
-/**
- * Support storing maps between container-tags/applications and
- * nodes. This will be required by affinity/anti-affinity implementation and
- * cardinality.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class AllocationTagsManager {
-
-  private static final Logger LOG = Logger.getLogger(
-      AllocationTagsManager.class);
-
-  private ReentrantReadWriteLock.ReadLock readLock;
-  private ReentrantReadWriteLock.WriteLock writeLock;
-
-  // Application's tags to node
-  private Map<ApplicationId, NodeToCountedTags> perAppMappings =
-      new HashMap<>();
-
-  // Global tags to node mapping (used to fast return aggregated tags
-  // cardinality across apps)
-  private NodeToCountedTags globalMapping = new NodeToCountedTags();
-
-  /**
-   * Store node to counted tags.
-   */
-  @VisibleForTesting
-  static class NodeToCountedTags {
-    // Map<NodeId, Map<Tag, Count>>
-    private Map<NodeId, Map<String, Long>> nodeToTagsWithCount =
-        new HashMap<>();
-
-    // protected by external locks
-    private void addTagsToNode(NodeId nodeId, Set<String> tags) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.computeIfAbsent(nodeId,
-          k -> new HashMap<>());
-
-      for (String tag : tags) {
-        Long count = innerMap.get(tag);
-        if (count == null) {
-          innerMap.put(tag, 1L);
-        } else{
-          innerMap.put(tag, count + 1);
-        }
-      }
-    }
-
-    // protected by external locks
-    private void addTagToNode(NodeId nodeId, String tag) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.computeIfAbsent(nodeId,
-          k -> new HashMap<>());
-
-      Long count = innerMap.get(tag);
-      if (count == null) {
-        innerMap.put(tag, 1L);
-      } else{
-        innerMap.put(tag, count + 1);
-      }
-    }
-
-    private void removeTagFromInnerMap(Map<String, Long> innerMap, String tag) {
-      Long count = innerMap.get(tag);
-      if (count > 1) {
-        innerMap.put(tag, count - 1);
-      } else {
-        if (count <= 0) {
-          LOG.warn(
-              "Trying to remove tags from node, however the count already"
-                  + " becomes 0 or less, it could be a potential bug.");
-        }
-        innerMap.remove(tag);
-      }
-    }
-
-    private void removeTagsFromNode(NodeId nodeId, Set<String> tags) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
-      if (innerMap == null) {
-        LOG.warn("Failed to find node=" + nodeId
-            + " while trying to remove tags, please double check.");
-        return;
-      }
-
-      for (String tag : tags) {
-        removeTagFromInnerMap(innerMap, tag);
-      }
-
-      if (innerMap.isEmpty()) {
-        nodeToTagsWithCount.remove(nodeId);
-      }
-    }
-
-    private void removeTagFromNode(NodeId nodeId, String tag) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
-      if (innerMap == null) {
-        LOG.warn("Failed to find node=" + nodeId
-            + " while trying to remove tags, please double check.");
-        return;
-      }
-
-      removeTagFromInnerMap(innerMap, tag);
-
-      if (innerMap.isEmpty()) {
-        nodeToTagsWithCount.remove(nodeId);
-      }
-    }
-
-    private long getCardinality(NodeId nodeId, String tag) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
-      if (innerMap == null) {
-        return 0;
-      }
-      Long value = innerMap.get(tag);
-      return value == null ? 0 : value;
-    }
-
-    private long getCardinality(NodeId nodeId, Set<String> tags,
-        LongBinaryOperator op) {
-      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
-      if (innerMap == null) {
-        return 0;
-      }
-
-      long returnValue = 0;
-      boolean firstTag = true;
-
-      if (tags != null && !tags.isEmpty()) {
-        for (String tag : tags) {
-          Long value = innerMap.get(tag);
-          if (value == null) {
-            value = 0L;
-          }
-
-          if (firstTag) {
-            returnValue = value;
-            firstTag = false;
-            continue;
-          }
-
-          returnValue = op.applyAsLong(returnValue, value);
-        }
-      } else {
-        // Similar to above if, but only iterate values for better performance
-        for (long value : innerMap.values()) {
-          // For the first value, we will not apply op
-          if (firstTag) {
-            returnValue = value;
-            firstTag = false;
-            continue;
-          }
-          returnValue = op.applyAsLong(returnValue, value);
-        }
-      }
-      return returnValue;
-    }
-
-    private boolean isEmpty() {
-      return nodeToTagsWithCount.isEmpty();
-    }
-
-    @VisibleForTesting
-    public Map<NodeId, Map<String, Long>> getNodeToTagsWithCount() {
-      return nodeToTagsWithCount;
-    }
-  }
-
-  @VisibleForTesting
-  Map<ApplicationId, NodeToCountedTags> getPerAppMappings() {
-    return perAppMappings;
-  }
-
-  @VisibleForTesting
-  NodeToCountedTags getGlobalMapping() {
-    return globalMapping;
-  }
-
-  public AllocationTagsManager() {
-    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
-    readLock = lock.readLock();
-    writeLock = lock.writeLock();
-  }
-
-  /**
-   * Notify container allocated on a node.
-   *
-   * @param nodeId         allocated node.
-   * @param applicationId  applicationId
-   * @param containerId    container id.
-   * @param allocationTags allocation tags, see
-   *                       {@link SchedulingRequest#getAllocationTags()}
-   *                       application_id will be added to allocationTags.
-   */
-  public void addContainer(NodeId nodeId, ApplicationId applicationId,
-      ContainerId containerId, Set<String> allocationTags) {
-    String applicationIdTag =
-        AllocationTagsNamespaces.APP_ID + applicationId.toString();
-
-    boolean useSet = false;
-    if (allocationTags != null && !allocationTags.isEmpty()) {
-      // Copy before edit it.
-      allocationTags = new HashSet<>(allocationTags);
-      allocationTags.add(applicationIdTag);
-      useSet = true;
-    }
-
-    writeLock.lock();
-    try {
-      NodeToCountedTags perAppTagsMapping = perAppMappings.computeIfAbsent(
-          applicationId, k -> new NodeToCountedTags());
-
-      if (useSet) {
-        perAppTagsMapping.addTagsToNode(nodeId, allocationTags);
-        globalMapping.addTagsToNode(nodeId, allocationTags);
-      } else {
-        perAppTagsMapping.addTagToNode(nodeId, applicationIdTag);
-        globalMapping.addTagToNode(nodeId, applicationIdTag);
-      }
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "Added container=" + containerId + " with tags=[" + StringUtils
-                .join(allocationTags, ",") + "]");
-      }
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  /**
-   * Notify container removed.
-   *
-   * @param nodeId         nodeId
-   * @param applicationId  applicationId
-   * @param containerId    containerId.
-   * @param allocationTags allocation tags for given container
-   */
-  public void removeContainer(NodeId nodeId, ApplicationId applicationId,
-      ContainerId containerId, Set<String> allocationTags) {
-    String applicationIdTag =
-        AllocationTagsNamespaces.APP_ID + applicationId.toString();
-    boolean useSet = false;
-
-    if (allocationTags != null && !allocationTags.isEmpty()) {
-      // Copy before edit it.
-      allocationTags = new HashSet<>(allocationTags);
-      allocationTags.add(applicationIdTag);
-      useSet = true;
-    }
-
-    writeLock.lock();
-    try {
-      NodeToCountedTags perAppTagsMapping = perAppMappings.get(applicationId);
-      if (perAppTagsMapping == null) {
-        return;
-      }
-
-      if (useSet) {
-        perAppTagsMapping.removeTagsFromNode(nodeId, allocationTags);
-        globalMapping.removeTagsFromNode(nodeId, allocationTags);
-      } else {
-        perAppTagsMapping.removeTagFromNode(nodeId, applicationIdTag);
-        globalMapping.removeTagFromNode(nodeId, applicationIdTag);
-      }
-
-      if (perAppTagsMapping.isEmpty()) {
-        perAppMappings.remove(applicationId);
-      }
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-            "Removed container=" + containerId + " with tags=[" + StringUtils
-                .join(allocationTags, ",") + "]");
-      }
-    } finally {
-      writeLock.unlock();
-    }
-  }
-
-  /**
-   * Get cardinality for following conditions. External can pass-in a binary op
-   * to implement customized logic.   *
-   * @param nodeId        nodeId, required.
-   * @param applicationId applicationId. When null is specified, return
-   *                      aggregated cardinality among all nodes.
-   * @param tag           allocation tag, see
-   *                      {@link SchedulingRequest#getAllocationTags()},
-   *                      When multiple tags specified. Returns cardinality
-   *                      depends on op. If a specified tag doesn't exist,
-   *                      0 will be its cardinality.
-   *                      When null/empty tags specified, all tags
-   *                      (of the node/app) will be considered.
-   * @return cardinality of specified query on the node.
-   * @throws InvalidAllocationTagsQueryException when illegal query
-   *                                            parameter specified
-   */
-  public long getNodeCardinality(NodeId nodeId, ApplicationId applicationId,
-      String tag) throws InvalidAllocationTagsQueryException {
-    readLock.lock();
-
-    try {
-      if (nodeId == null) {
-        throw new InvalidAllocationTagsQueryException(
-            "Must specify nodeId/tags/op to query cardinality");
-      }
-
-      NodeToCountedTags mapping;
-      if (applicationId != null) {
-        mapping = perAppMappings.get(applicationId);
-      } else{
-        mapping = globalMapping;
-      }
-
-      if (mapping == null) {
-        return 0;
-      }
-
-      return mapping.getCardinality(nodeId, tag);
-    } finally {
-      readLock.unlock();
-    }
-  }
-
-  /**
-   * Check if given tag exists on node.
-   *
-   * @param nodeId        nodeId, required.
-   * @param applicationId applicationId. When null is specified, return
-   *                      aggregated cardinality among all nodes.
-   * @param tag           allocation tag, see
-   *                      {@link SchedulingRequest#getAllocationTags()},
-   *                      When multiple tags specified. Returns cardinality
-   *                      depends on op. If a specified tag doesn't exist,
-   *                      0 will be its cardinality.
-   *                      When null/empty tags specified, all tags
-   *                      (of the node/app) will be considered.
-   * @return cardinality of specified query on the node.
-   * @throws InvalidAllocationTagsQueryException when illegal query
-   *                                            parameter specified
-   */
-  public boolean allocationTagExistsOnNode(NodeId nodeId,
-      ApplicationId applicationId, String tag)
-      throws InvalidAllocationTagsQueryException {
-    return getNodeCardinality(nodeId, applicationId, tag) > 0;
-  }
-
-  /**
-   * Get cardinality for following conditions. External can pass-in a binary op
-   * to implement customized logic.
-   *
-   * @param nodeId        nodeId, required.
-   * @param applicationId applicationId. When null is specified, return
-   *                      aggregated cardinality among all nodes.
-   * @param tags          allocation tags, see
-   *                      {@link SchedulingRequest#getAllocationTags()},
-   *                      When multiple tags specified. Returns cardinality
-   *                      depends on op. If a specified tag doesn't exist, 0
-   *                      will be its cardinality. When null/empty tags
-   *                      specified, all tags (of the node/app) will be
-   *                      considered.
-   * @param op            operator. Such as Long::max, Long::sum, etc. Required.
-   *                      This sparameter only take effect when #values >= 2.
-   * @return cardinality of specified query on the node.
-   * @throws InvalidAllocationTagsQueryException when illegal query
-   *                                            parameter specified
-   */
-  public long getNodeCardinalityByOp(NodeId nodeId, ApplicationId applicationId,
-      Set<String> tags, LongBinaryOperator op)
-      throws InvalidAllocationTagsQueryException {
-    readLock.lock();
-
-    try {
-      if (nodeId == null || op == null) {
-        throw new InvalidAllocationTagsQueryException(
-            "Must specify nodeId/tags/op to query cardinality");
-      }
-
-      NodeToCountedTags mapping;
-      if (applicationId != null) {
-        mapping = perAppMappings.get(applicationId);
-      } else{
-        mapping = globalMapping;
-      }
-
-      if (mapping == null) {
-        return 0;
-      }
-
-      return mapping.getCardinality(nodeId, tags, op);
-    } finally {
-      readLock.unlock();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsNamespaces.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsNamespaces.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsNamespaces.java
deleted file mode 100644
index 893ff1c..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/AllocationTagsNamespaces.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * *
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- * /
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.constraint;
-
-/**
- * Predefined namespaces for tags
- *
- * Same as namespace  of resource types. Namespaces of placement tags are start
- * with alphabets and ended with "/"
- */
-public class AllocationTagsNamespaces {
-  public static final String APP_ID = "yarn_app_id/";
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/InvalidAllocationTagsQueryException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/InvalidAllocationTagsQueryException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/InvalidAllocationTagsQueryException.java
deleted file mode 100644
index 5519e39..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/constraint/InvalidAllocationTagsQueryException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * *
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- * /
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.constraint;
-
-import org.apache.hadoop.yarn.exceptions.YarnException;
-
-/**
- * Exception when invalid parameter specified to do placement tags related
- * queries.
- */
-public class InvalidAllocationTagsQueryException extends YarnException {
-  private static final long serialVersionUID = 12312831974894L;
-
-  public InvalidAllocationTagsQueryException(String msg) {
-    super(msg);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
new file mode 100644
index 0000000..c278606
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
@@ -0,0 +1,431 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.log4j.Logger;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.function.LongBinaryOperator;
+
+/**
+ * Support storing maps between container-tags/applications and
+ * nodes. This will be required by affinity/anti-affinity implementation and
+ * cardinality.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class AllocationTagsManager {
+
+  private static final Logger LOG = Logger.getLogger(
+      AllocationTagsManager.class);
+
+  private ReentrantReadWriteLock.ReadLock readLock;
+  private ReentrantReadWriteLock.WriteLock writeLock;
+
+  // Application's tags to node
+  private Map<ApplicationId, NodeToCountedTags> perAppMappings =
+      new HashMap<>();
+
+  // Global tags to node mapping (used to fast return aggregated tags
+  // cardinality across apps)
+  private NodeToCountedTags globalMapping = new NodeToCountedTags();
+
+  /**
+   * Store node to counted tags.
+   */
+  @VisibleForTesting
+  static class NodeToCountedTags {
+    // Map<NodeId, Map<Tag, Count>>
+    private Map<NodeId, Map<String, Long>> nodeToTagsWithCount =
+        new HashMap<>();
+
+    // protected by external locks
+    private void addTagsToNode(NodeId nodeId, Set<String> tags) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.computeIfAbsent(nodeId,
+          k -> new HashMap<>());
+
+      for (String tag : tags) {
+        Long count = innerMap.get(tag);
+        if (count == null) {
+          innerMap.put(tag, 1L);
+        } else{
+          innerMap.put(tag, count + 1);
+        }
+      }
+    }
+
+    // protected by external locks
+    private void addTagToNode(NodeId nodeId, String tag) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.computeIfAbsent(nodeId,
+          k -> new HashMap<>());
+
+      Long count = innerMap.get(tag);
+      if (count == null) {
+        innerMap.put(tag, 1L);
+      } else{
+        innerMap.put(tag, count + 1);
+      }
+    }
+
+    private void removeTagFromInnerMap(Map<String, Long> innerMap, String tag) {
+      Long count = innerMap.get(tag);
+      if (count > 1) {
+        innerMap.put(tag, count - 1);
+      } else {
+        if (count <= 0) {
+          LOG.warn(
+              "Trying to remove tags from node, however the count already"
+                  + " becomes 0 or less, it could be a potential bug.");
+        }
+        innerMap.remove(tag);
+      }
+    }
+
+    private void removeTagsFromNode(NodeId nodeId, Set<String> tags) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+      if (innerMap == null) {
+        LOG.warn("Failed to find node=" + nodeId
+            + " while trying to remove tags, please double check.");
+        return;
+      }
+
+      for (String tag : tags) {
+        removeTagFromInnerMap(innerMap, tag);
+      }
+
+      if (innerMap.isEmpty()) {
+        nodeToTagsWithCount.remove(nodeId);
+      }
+    }
+
+    private void removeTagFromNode(NodeId nodeId, String tag) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+      if (innerMap == null) {
+        LOG.warn("Failed to find node=" + nodeId
+            + " while trying to remove tags, please double check.");
+        return;
+      }
+
+      removeTagFromInnerMap(innerMap, tag);
+
+      if (innerMap.isEmpty()) {
+        nodeToTagsWithCount.remove(nodeId);
+      }
+    }
+
+    private long getCardinality(NodeId nodeId, String tag) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+      if (innerMap == null) {
+        return 0;
+      }
+      Long value = innerMap.get(tag);
+      return value == null ? 0 : value;
+    }
+
+    private long getCardinality(NodeId nodeId, Set<String> tags,
+        LongBinaryOperator op) {
+      Map<String, Long> innerMap = nodeToTagsWithCount.get(nodeId);
+      if (innerMap == null) {
+        return 0;
+      }
+
+      long returnValue = 0;
+      boolean firstTag = true;
+
+      if (tags != null && !tags.isEmpty()) {
+        for (String tag : tags) {
+          Long value = innerMap.get(tag);
+          if (value == null) {
+            value = 0L;
+          }
+
+          if (firstTag) {
+            returnValue = value;
+            firstTag = false;
+            continue;
+          }
+
+          returnValue = op.applyAsLong(returnValue, value);
+        }
+      } else {
+        // Similar to above if, but only iterate values for better performance
+        for (long value : innerMap.values()) {
+          // For the first value, we will not apply op
+          if (firstTag) {
+            returnValue = value;
+            firstTag = false;
+            continue;
+          }
+          returnValue = op.applyAsLong(returnValue, value);
+        }
+      }
+      return returnValue;
+    }
+
+    private boolean isEmpty() {
+      return nodeToTagsWithCount.isEmpty();
+    }
+
+    @VisibleForTesting
+    public Map<NodeId, Map<String, Long>> getNodeToTagsWithCount() {
+      return nodeToTagsWithCount;
+    }
+  }
+
+  @VisibleForTesting
+  Map<ApplicationId, NodeToCountedTags> getPerAppMappings() {
+    return perAppMappings;
+  }
+
+  @VisibleForTesting
+  NodeToCountedTags getGlobalMapping() {
+    return globalMapping;
+  }
+
+  public AllocationTagsManager() {
+    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+    readLock = lock.readLock();
+    writeLock = lock.writeLock();
+  }
+
+  /**
+   * Notify container allocated on a node.
+   *
+   * @param nodeId         allocated node.
+   * @param applicationId  applicationId
+   * @param containerId    container id.
+   * @param allocationTags allocation tags, see
+   *                       {@link SchedulingRequest#getAllocationTags()}
+   *                       application_id will be added to allocationTags.
+   */
+  public void addContainer(NodeId nodeId, ApplicationId applicationId,
+      ContainerId containerId, Set<String> allocationTags) {
+    String applicationIdTag =
+        AllocationTagsNamespaces.APP_ID + applicationId.toString();
+
+    boolean useSet = false;
+    if (allocationTags != null && !allocationTags.isEmpty()) {
+      // Copy before edit it.
+      allocationTags = new HashSet<>(allocationTags);
+      allocationTags.add(applicationIdTag);
+      useSet = true;
+    }
+
+    writeLock.lock();
+    try {
+      NodeToCountedTags perAppTagsMapping = perAppMappings.computeIfAbsent(
+          applicationId, k -> new NodeToCountedTags());
+
+      if (useSet) {
+        perAppTagsMapping.addTagsToNode(nodeId, allocationTags);
+        globalMapping.addTagsToNode(nodeId, allocationTags);
+      } else {
+        perAppTagsMapping.addTagToNode(nodeId, applicationIdTag);
+        globalMapping.addTagToNode(nodeId, applicationIdTag);
+      }
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(
+            "Added container=" + containerId + " with tags=[" + StringUtils
+                .join(allocationTags, ",") + "]");
+      }
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  /**
+   * Notify container removed.
+   *
+   * @param nodeId         nodeId
+   * @param applicationId  applicationId
+   * @param containerId    containerId.
+   * @param allocationTags allocation tags for given container
+   */
+  public void removeContainer(NodeId nodeId, ApplicationId applicationId,
+      ContainerId containerId, Set<String> allocationTags) {
+    String applicationIdTag =
+        AllocationTagsNamespaces.APP_ID + applicationId.toString();
+    boolean useSet = false;
+
+    if (allocationTags != null && !allocationTags.isEmpty()) {
+      // Copy before edit it.
+      allocationTags = new HashSet<>(allocationTags);
+      allocationTags.add(applicationIdTag);
+      useSet = true;
+    }
+
+    writeLock.lock();
+    try {
+      NodeToCountedTags perAppTagsMapping = perAppMappings.get(applicationId);
+      if (perAppTagsMapping == null) {
+        return;
+      }
+
+      if (useSet) {
+        perAppTagsMapping.removeTagsFromNode(nodeId, allocationTags);
+        globalMapping.removeTagsFromNode(nodeId, allocationTags);
+      } else {
+        perAppTagsMapping.removeTagFromNode(nodeId, applicationIdTag);
+        globalMapping.removeTagFromNode(nodeId, applicationIdTag);
+      }
+
+      if (perAppTagsMapping.isEmpty()) {
+        perAppMappings.remove(applicationId);
+      }
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(
+            "Removed container=" + containerId + " with tags=[" + StringUtils
+                .join(allocationTags, ",") + "]");
+      }
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  /**
+   * Get cardinality for following conditions. External can pass-in a binary op
+   * to implement customized logic.   *
+   * @param nodeId        nodeId, required.
+   * @param applicationId applicationId. When null is specified, return
+   *                      aggregated cardinality among all nodes.
+   * @param tag           allocation tag, see
+   *                      {@link SchedulingRequest#getAllocationTags()},
+   *                      When multiple tags specified. Returns cardinality
+   *                      depends on op. If a specified tag doesn't exist,
+   *                      0 will be its cardinality.
+   *                      When null/empty tags specified, all tags
+   *                      (of the node/app) will be considered.
+   * @return cardinality of specified query on the node.
+   * @throws InvalidAllocationTagsQueryException when illegal query
+   *                                            parameter specified
+   */
+  public long getNodeCardinality(NodeId nodeId, ApplicationId applicationId,
+      String tag) throws InvalidAllocationTagsQueryException {
+    readLock.lock();
+
+    try {
+      if (nodeId == null) {
+        throw new InvalidAllocationTagsQueryException(
+            "Must specify nodeId/tags/op to query cardinality");
+      }
+
+      NodeToCountedTags mapping;
+      if (applicationId != null) {
+        mapping = perAppMappings.get(applicationId);
+      } else{
+        mapping = globalMapping;
+      }
+
+      if (mapping == null) {
+        return 0;
+      }
+
+      return mapping.getCardinality(nodeId, tag);
+    } finally {
+      readLock.unlock();
+    }
+  }
+
+  /**
+   * Check if given tag exists on node.
+   *
+   * @param nodeId        nodeId, required.
+   * @param applicationId applicationId. When null is specified, return
+   *                      aggregated cardinality among all nodes.
+   * @param tag           allocation tag, see
+   *                      {@link SchedulingRequest#getAllocationTags()},
+   *                      When multiple tags specified. Returns cardinality
+   *                      depends on op. If a specified tag doesn't exist,
+   *                      0 will be its cardinality.
+   *                      When null/empty tags specified, all tags
+   *                      (of the node/app) will be considered.
+   * @return cardinality of specified query on the node.
+   * @throws InvalidAllocationTagsQueryException when illegal query
+   *                                            parameter specified
+   */
+  public boolean allocationTagExistsOnNode(NodeId nodeId,
+      ApplicationId applicationId, String tag)
+      throws InvalidAllocationTagsQueryException {
+    return getNodeCardinality(nodeId, applicationId, tag) > 0;
+  }
+
+  /**
+   * Get cardinality for following conditions. External can pass-in a binary op
+   * to implement customized logic.
+   *
+   * @param nodeId        nodeId, required.
+   * @param applicationId applicationId. When null is specified, return
+   *                      aggregated cardinality among all nodes.
+   * @param tags          allocation tags, see
+   *                      {@link SchedulingRequest#getAllocationTags()},
+   *                      When multiple tags specified. Returns cardinality
+   *                      depends on op. If a specified tag doesn't exist, 0
+   *                      will be its cardinality. When null/empty tags
+   *                      specified, all tags (of the node/app) will be
+   *                      considered.
+   * @param op            operator. Such as Long::max, Long::sum, etc. Required.
+   *                      This sparameter only take effect when #values >= 2.
+   * @return cardinality of specified query on the node.
+   * @throws InvalidAllocationTagsQueryException when illegal query
+   *                                            parameter specified
+   */
+  public long getNodeCardinalityByOp(NodeId nodeId, ApplicationId applicationId,
+      Set<String> tags, LongBinaryOperator op)
+      throws InvalidAllocationTagsQueryException {
+    readLock.lock();
+
+    try {
+      if (nodeId == null || op == null) {
+        throw new InvalidAllocationTagsQueryException(
+            "Must specify nodeId/tags/op to query cardinality");
+      }
+
+      NodeToCountedTags mapping;
+      if (applicationId != null) {
+        mapping = perAppMappings.get(applicationId);
+      } else{
+        mapping = globalMapping;
+      }
+
+      if (mapping == null) {
+        return 0;
+      }
+
+      return mapping.getCardinality(nodeId, tags, op);
+    } finally {
+      readLock.unlock();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsNamespaces.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsNamespaces.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsNamespaces.java
new file mode 100644
index 0000000..43fcfe5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsNamespaces.java
@@ -0,0 +1,31 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+/**
+ * Predefined namespaces for tags
+ *
+ * Same as namespace  of resource types. Namespaces of placement tags are start
+ * with alphabets and ended with "/"
+ */
+public class AllocationTagsNamespaces {
+  public static final String APP_ID = "yarn_app_id/";
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/InvalidAllocationTagsQueryException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/InvalidAllocationTagsQueryException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/InvalidAllocationTagsQueryException.java
new file mode 100644
index 0000000..29483a2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/InvalidAllocationTagsQueryException.java
@@ -0,0 +1,35 @@
+/*
+ * *
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ * /
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+/**
+ * Exception when invalid parameter specified to do placement tags related
+ * queries.
+ */
+public class InvalidAllocationTagsQueryException extends YarnException {
+  private static final long serialVersionUID = 12312831974894L;
+
+  public InvalidAllocationTagsQueryException(String msg) {
+    super(msg);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithm.java
new file mode 100644
index 0000000..2651663
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithm.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api;
+
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+
+/**
+ * Marker interface for a Constraint Placement. The only contract is that it
+ * should be initialized with the RMContext.
+ */
+public interface ConstraintPlacementAlgorithm {
+
+  /**
+   * Initialize the Algorithm.
+   * @param rmContext RMContext.
+   */
+  void init(RMContext rmContext);
+
+  /**
+   * The Algorithm is expected to compute the placement of the provided
+   * ConstraintPlacementAlgorithmInput and use the collector to aggregate
+   * any output.
+   * @param algorithmInput Input to the Algorithm.
+   * @param collector Collector for output of algorithm.
+   */
+  void place(ConstraintPlacementAlgorithmInput algorithmInput,
+      ConstraintPlacementAlgorithmOutputCollector collector);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6062844e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmInput.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmInput.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmInput.java
new file mode 100644
index 0000000..74572b8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmInput.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api;
+
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+
+import java.util.Collection;
+
+/**
+ * This encapsulates an input to the Constraint Placement Algorithm. At the
+ * very least it must consist of a collection of SchedulerRequests.
+ */
+public interface ConstraintPlacementAlgorithmInput {
+
+  Collection<SchedulingRequest> getSchedulingRequests();
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/49] hadoop git commit: HADOOP-14965. S3a input stream "normal" fadvise mode to be adaptive

Posted by as...@apache.org.
HADOOP-14965. S3a input stream "normal" fadvise mode to be adaptive


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ba491ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ba491ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ba491ff

Branch: refs/heads/YARN-6592
Commit: 1ba491ff907fc5d2618add980734a3534e2be098
Parents: 13ad747
Author: Steve Loughran <st...@apache.org>
Authored: Wed Dec 20 18:25:33 2017 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Wed Dec 20 18:25:33 2017 +0000

----------------------------------------------------------------------
 .../apache/hadoop/fs/s3a/S3AInputStream.java    | 28 +++++++++++++++++---
 .../hadoop/fs/s3a/S3AInstrumentation.java       | 13 +++++++++
 .../src/site/markdown/tools/hadoop-aws/index.md | 13 ++++++++-
 .../scale/ITestS3AInputStreamPerformance.java   |  6 ++++-
 4 files changed, 54 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ba491ff/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
index 7e6d640..0074143 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
@@ -83,7 +83,7 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
   private final S3AInstrumentation.InputStreamStatistics streamStatistics;
   private S3AEncryptionMethods serverSideEncryptionAlgorithm;
   private String serverSideEncryptionKey;
-  private final S3AInputPolicy inputPolicy;
+  private S3AInputPolicy inputPolicy;
   private long readahead = Constants.DEFAULT_READAHEAD_RANGE;
   private final Invoker invoker;
 
@@ -139,12 +139,22 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
     this.serverSideEncryptionAlgorithm =
         s3Attributes.getServerSideEncryptionAlgorithm();
     this.serverSideEncryptionKey = s3Attributes.getServerSideEncryptionKey();
-    this.inputPolicy = inputPolicy;
+    setInputPolicy(inputPolicy);
     setReadahead(readahead);
     this.invoker = invoker;
   }
 
   /**
+   * Set/update the input policy of the stream.
+   * This updates the stream statistics.
+   * @param inputPolicy new input policy.
+   */
+  private void setInputPolicy(S3AInputPolicy inputPolicy) {
+    this.inputPolicy = inputPolicy;
+    streamStatistics.inputPolicySet(inputPolicy.ordinal());
+  }
+
+  /**
    * Opens up the stream at specified target position and for given length.
    *
    * @param reason reason for reopen
@@ -162,8 +172,9 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
     contentRangeFinish = calculateRequestLimit(inputPolicy, targetPos,
         length, contentLength, readahead);
     LOG.debug("reopen({}) for {} range[{}-{}], length={}," +
-        " streamPosition={}, nextReadPosition={}",
-        uri, reason, targetPos, contentRangeFinish, length,  pos, nextReadPos);
+        " streamPosition={}, nextReadPosition={}, policy={}",
+        uri, reason, targetPos, contentRangeFinish, length,  pos, nextReadPos,
+        inputPolicy);
 
     long opencount = streamStatistics.streamOpened();
     GetObjectRequest request = new GetObjectRequest(bucket, key)
@@ -274,6 +285,12 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
     } else if (diff < 0) {
       // backwards seek
       streamStatistics.seekBackwards(diff);
+      // if the stream is in "Normal" mode, switch to random IO at this
+      // point, as it is indicative of columnar format IO
+      if (inputPolicy.equals(S3AInputPolicy.Normal)) {
+        LOG.info("Switching to Random IO seek policy");
+        setInputPolicy(S3AInputPolicy.Random);
+      }
     } else {
       // targetPos == pos
       if (remainingInCurrentRequest() > 0) {
@@ -443,6 +460,7 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
       try {
         // close or abort the stream
         closeStream("close() operation", this.contentRangeFinish, false);
+        LOG.debug("Statistics of stream {}\n{}", key, streamStatistics);
         // this is actually a no-op
         super.close();
       } finally {
@@ -713,6 +731,8 @@ public class S3AInputStream extends FSInputStream implements CanSetReadahead {
       break;
 
     case Normal:
+      // normal is considered sequential until a backwards seek switches
+      // it to 'Random'
     default:
       rangeLimit = contentLength;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ba491ff/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
index 0fbcc00..d843347 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
@@ -667,6 +667,8 @@ public class S3AInstrumentation implements Closeable, MetricsSource {
     public long readsIncomplete;
     public long bytesReadInClose;
     public long bytesDiscardedInAbort;
+    public long policySetCount;
+    public long inputPolicy;
 
     private InputStreamStatistics() {
     }
@@ -783,6 +785,15 @@ public class S3AInstrumentation implements Closeable, MetricsSource {
     }
 
     /**
+     * The input policy has been switched.
+     * @param updatedPolicy enum value of new policy.
+     */
+    public void inputPolicySet(int updatedPolicy) {
+      policySetCount++;
+      inputPolicy = updatedPolicy;
+    }
+
+    /**
      * String operator describes all the current statistics.
      * <b>Important: there are no guarantees as to the stability
      * of this value.</b>
@@ -813,6 +824,8 @@ public class S3AInstrumentation implements Closeable, MetricsSource {
       sb.append(", ReadsIncomplete=").append(readsIncomplete);
       sb.append(", BytesReadInClose=").append(bytesReadInClose);
       sb.append(", BytesDiscardedInAbort=").append(bytesDiscardedInAbort);
+      sb.append(", InputPolicy=").append(inputPolicy);
+      sb.append(", InputPolicySetCount=").append(policySetCount);
       sb.append('}');
       return sb.toString();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ba491ff/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index fbcd54a..7eebf5c 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -1553,7 +1553,18 @@ backward seeks.
 
 *"normal" (default)*
 
-This is currently the same as "sequential", though it may evolve in future.
+The "Normal" policy starts off reading a file  in "sequential" mode,
+but if the caller seeks backwards in the stream, it switches from
+sequential to "random".
+
+This policy effectively recognizes the initial read pattern of columnar
+storage formats (e.g. Apache ORC and Apache Parquet), which seek to the end
+of a file, read in index data and then seek backwards to selectively read
+columns. The first seeks may be be expensive compared to the random policy,
+however the overall process is much less expensive than either sequentially
+reading through a file with the "random" policy, or reading columnar data
+with the "sequential" policy. When the exact format/recommended
+seek policy of data are known in advance, this policy
 
 *"random"*
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ba491ff/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java
index 83ab210..efd96c4 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java
@@ -427,7 +427,11 @@ public class ITestS3AInputStreamPerformance extends S3AScaleTestBase {
     long expectedOpenCount = RANDOM_IO_SEQUENCE.length;
     executeRandomIO(S3AInputPolicy.Normal, expectedOpenCount);
     assertEquals("streams aborted in " + streamStatistics,
-        4, streamStatistics.aborted);
+        1, streamStatistics.aborted);
+    assertEquals("policy changes in " + streamStatistics,
+        2, streamStatistics.policySetCount);
+    assertEquals("input policy in " + streamStatistics,
+        S3AInputPolicy.Random.ordinal(), streamStatistics.inputPolicy);
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/49] hadoop git commit: HDFS-9023. When NN is not able to identify DN for replication, reason behind it can be logged.

Posted by as...@apache.org.
HDFS-9023. When NN is not able to identify DN for replication, reason behind it can be logged.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5bf7e594
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5bf7e594
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5bf7e594

Branch: refs/heads/YARN-6592
Commit: 5bf7e594d7d54e5295fe4240c3d60c08d4755ab7
Parents: d31c9d8
Author: Xiao Chen <xi...@apache.org>
Authored: Thu Dec 28 11:52:49 2017 -0800
Committer: Xiao Chen <xi...@apache.org>
Committed: Thu Dec 28 11:54:08 2017 -0800

----------------------------------------------------------------------
 .../BlockPlacementPolicyDefault.java            | 74 +++++++++++++++++---
 .../blockmanagement/DatanodeDescriptor.java     |  2 +-
 2 files changed, 64 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5bf7e594/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index b925feb..a37cda4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -62,6 +62,28 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
         }
       };
 
+  private static final ThreadLocal<HashMap<NodeNotChosenReason, Integer>>
+      CHOOSE_RANDOM_REASONS = ThreadLocal
+      .withInitial(() -> new HashMap<NodeNotChosenReason, Integer>());
+
+  private enum NodeNotChosenReason {
+    NOT_IN_SERVICE("the node isn't in service"),
+    NODE_STALE("the node is stale"),
+    NODE_TOO_BUSY("the node is too busy"),
+    TOO_MANY_NODES_ON_RACK("the rack has too many chosen nodes"),
+    NOT_ENOUGH_STORAGE_SPACE("no enough storage space to place the block");
+
+    private final String text;
+
+    NodeNotChosenReason(final String logText) {
+      text = logText;
+    }
+
+    private String getText() {
+      return text;
+    }
+  }
+
   protected boolean considerLoad; 
   protected double considerLoadFactor;
   private boolean preferLocalNode;
@@ -711,6 +733,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
       builder.setLength(0);
       builder.append("[");
     }
+    CHOOSE_RANDOM_REASONS.get().clear();
     boolean badTarget = false;
     DatanodeStorageInfo firstChosen = null;
     while (numOfReplicas > 0) {
@@ -781,14 +804,24 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     }
     if (numOfReplicas>0) {
       String detail = enableDebugLogging;
-      if (LOG.isDebugEnabled()) {
-        if (badTarget && builder != null) {
-          detail = builder.toString();
+      if (LOG.isDebugEnabled() && builder != null) {
+        detail = builder.toString();
+        if (badTarget) {
           builder.setLength(0);
         } else {
+          if (detail.length() > 1) {
+            // only log if there's more than "[", which is always appended at
+            // the beginning of this method.
+            LOG.debug(detail);
+          }
           detail = "";
         }
       }
+      final HashMap<NodeNotChosenReason, Integer> reasonMap =
+          CHOOSE_RANDOM_REASONS.get();
+      if (!reasonMap.isEmpty()) {
+        LOG.info("Not enough replicas was chosen. Reason:{}", reasonMap);
+      }
       throw new NotEnoughReplicasException(detail);
     }
     
@@ -834,19 +867,38 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     if (storage != null) {
       results.add(storage);
     } else {
-      logNodeIsNotChosen(dnd, "no good storage to place the block ");
+      logNodeIsNotChosen(dnd, NodeNotChosenReason.NOT_ENOUGH_STORAGE_SPACE,
+          " for storage type " + storageType);
     }
     return storage;
   }
 
   private static void logNodeIsNotChosen(DatanodeDescriptor node,
-      String reason) {
+      NodeNotChosenReason reason) {
+    logNodeIsNotChosen(node, reason, null);
+  }
+
+  private static void logNodeIsNotChosen(DatanodeDescriptor node,
+      NodeNotChosenReason reason, String reasonDetails) {
+    assert reason != null;
     if (LOG.isDebugEnabled()) {
       // build the error message for later use.
       debugLoggingBuilder.get()
           .append("\n  Datanode ").append(node)
-          .append(" is not chosen since ").append(reason).append(".");
+          .append(" is not chosen since ").append(reason.getText());
+      if (reasonDetails != null) {
+        debugLoggingBuilder.get().append(" ").append(reasonDetails);
+      }
+      debugLoggingBuilder.get().append(".");
+    }
+    // always populate reason map to log high level reasons.
+    final HashMap<NodeNotChosenReason, Integer> reasonMap =
+        CHOOSE_RANDOM_REASONS.get();
+    Integer base = reasonMap.get(reason);
+    if (base == null) {
+      base = 0;
     }
+    reasonMap.put(reason, base + 1);
   }
 
   /**
@@ -868,13 +920,13 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
                          boolean avoidStaleNodes) {
     // check if the node is (being) decommissioned
     if (!node.isInService()) {
-      logNodeIsNotChosen(node, "the node isn't in service.");
+      logNodeIsNotChosen(node, NodeNotChosenReason.NOT_IN_SERVICE);
       return false;
     }
 
     if (avoidStaleNodes) {
       if (node.isStale(this.staleInterval)) {
-        logNodeIsNotChosen(node, "the node is stale ");
+        logNodeIsNotChosen(node, NodeNotChosenReason.NODE_STALE);
         return false;
       }
     }
@@ -885,8 +937,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
           stats.getInServiceXceiverAverage();
       final int nodeLoad = node.getXceiverCount();
       if (nodeLoad > maxLoad) {
-        logNodeIsNotChosen(node, "the node is too busy (load: " + nodeLoad
-            + " > " + maxLoad + ") ");
+        logNodeIsNotChosen(node, NodeNotChosenReason.NODE_TOO_BUSY,
+            "(load: " + nodeLoad + " > " + maxLoad + ")");
         return false;
       }
     }
@@ -901,7 +953,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
       }
     }
     if (counter > maxTargetPerRack) {
-      logNodeIsNotChosen(node, "the rack has too many chosen nodes ");
+      logNodeIsNotChosen(node, NodeNotChosenReason.TOO_MANY_NODES_ON_RACK);
       return false;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5bf7e594/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index fc58708..618bc13 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -764,7 +764,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
       }
     }
     if (requiredSize > remaining - scheduledSize) {
-      LOG.debug(
+      BlockPlacementPolicy.LOG.debug(
           "The node {} does not have enough {} space (required={},"
           + " scheduled={}, remaining={}).",
           this, t, requiredSize, scheduledSize, remaining);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/49] hadoop git commit: HADOOP-15113. NPE in S3A getFileStatus: null instrumentation on using closed instance. Contributed by Steve Loughran.

Posted by as...@apache.org.
HADOOP-15113. NPE in S3A getFileStatus: null instrumentation on using closed instance.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef450df4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef450df4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef450df4

Branch: refs/heads/YARN-6592
Commit: ef450df443f1dea1c52082cf281f25db7141972f
Parents: d2d8f4a
Author: Steve Loughran <st...@apache.org>
Authored: Thu Dec 21 14:15:53 2017 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Thu Dec 21 14:15:53 2017 +0000

----------------------------------------------------------------------
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 53 ++++++++---
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  3 +
 .../apache/hadoop/fs/s3a/ITestS3AClosedFS.java  | 92 ++++++++++++++++++++
 3 files changed, 135 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef450df4/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 9431f17..f461c9e 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -187,6 +187,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   private long readAhead;
   private S3AInputPolicy inputPolicy;
   private final AtomicBoolean closed = new AtomicBoolean(false);
+  private volatile boolean isClosed = false;
   private MetadataStore metadataStore;
   private boolean allowAuthoritative;
 
@@ -678,7 +679,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
    */
   public FSDataInputStream open(Path f, int bufferSize)
       throws IOException {
-
+    checkNotClosed();
     LOG.debug("Opening '{}' for reading; input policy = {}", f, inputPolicy);
     final FileStatus fileStatus = getFileStatus(f);
     if (fileStatus.isDirectory()) {
@@ -722,6 +723,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   public FSDataOutputStream create(Path f, FsPermission permission,
       boolean overwrite, int bufferSize, short replication, long blockSize,
       Progressable progress) throws IOException {
+    checkNotClosed();
     final Path path = qualify(f);
     String key = pathToKey(path);
     FileStatus status = null;
@@ -871,7 +873,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
     Path dst = qualify(dest);
 
     LOG.debug("Rename path {} to {}", src, dst);
-    incrementStatistic(INVOCATION_RENAME);
+    entryPoint(INVOCATION_RENAME);
 
     String srcKey = pathToKey(src);
     String dstKey = pathToKey(dst);
@@ -1098,6 +1100,17 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   }
 
   /**
+   * Entry point to an operation.
+   * Increments the statistic; verifies the FS is active.
+   * @param operation The operation to increment
+   * @throws IOException if the
+   */
+  protected void entryPoint(Statistic operation) throws IOException {
+    checkNotClosed();
+    incrementStatistic(operation);
+  }
+
+  /**
    * Increment a statistic by 1.
    * @param statistic The operation to increment
    */
@@ -1660,6 +1673,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   @Retries.RetryTranslated
   public boolean delete(Path f, boolean recursive) throws IOException {
     try {
+      checkNotClosed();
       return innerDelete(innerGetFileStatus(f, true), recursive);
     } catch (FileNotFoundException e) {
       LOG.debug("Couldn't delete {} - does not exist", f);
@@ -1838,7 +1852,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
     Path path = qualify(f);
     String key = pathToKey(path);
     LOG.debug("List status for path: {}", path);
-    incrementStatistic(INVOCATION_LIST_STATUS);
+    entryPoint(INVOCATION_LIST_STATUS);
 
     List<FileStatus> result;
     final FileStatus fileStatus =  getFileStatus(path);
@@ -1981,7 +1995,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
       throws IOException, FileAlreadyExistsException, AmazonClientException {
     Path f = qualify(p);
     LOG.debug("Making directory: {}", f);
-    incrementStatistic(INVOCATION_MKDIRS);
+    entryPoint(INVOCATION_MKDIRS);
     FileStatus fileStatus;
     List<Path> metadataStoreDirs = null;
     if (hasMetadataStore()) {
@@ -2058,7 +2072,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   @Retries.RetryTranslated
   S3AFileStatus innerGetFileStatus(final Path f,
       boolean needEmptyDirectoryFlag) throws IOException {
-    incrementStatistic(INVOCATION_GET_FILE_STATUS);
+    entryPoint(INVOCATION_GET_FILE_STATUS);
     final Path path = qualify(f);
     String key = pathToKey(path);
     LOG.debug("Getting path status for {}  ({})", path, key);
@@ -2319,7 +2333,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   private void innerCopyFromLocalFile(boolean delSrc, boolean overwrite,
       Path src, Path dst)
       throws IOException, FileAlreadyExistsException, AmazonClientException {
-    incrementStatistic(INVOCATION_COPY_FROM_LOCAL_FILE);
+    entryPoint(INVOCATION_COPY_FROM_LOCAL_FILE);
     LOG.debug("Copying local file from {} to {}", src, dst);
 
     // Since we have a local file, we don't need to stream into a temporary file
@@ -2418,6 +2432,8 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
       // already closed
       return;
     }
+    isClosed = true;
+    LOG.debug("Filesystem {} is closed", uri);
     try {
       super.close();
     } finally {
@@ -2435,6 +2451,17 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   }
 
   /**
+   * Verify that the input stream is open. Non blocking; this gives
+   * the last state of the volatile {@link #closed} field.
+   * @throws IOException if the connection is closed.
+   */
+  private void checkNotClosed() throws IOException {
+    if (isClosed) {
+      throw new IOException(uri + ": " + E_FS_CLOSED);
+    }
+  }
+
+  /**
    * Override getCanonicalServiceName because we don't support token in S3A.
    */
   @Override
@@ -2860,7 +2887,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
    */
   @Override
   public FileStatus[] globStatus(Path pathPattern) throws IOException {
-    incrementStatistic(INVOCATION_GLOB_STATUS);
+    entryPoint(INVOCATION_GLOB_STATUS);
     return super.globStatus(pathPattern);
   }
 
@@ -2871,7 +2898,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   @Override
   public FileStatus[] globStatus(Path pathPattern, PathFilter filter)
       throws IOException {
-    incrementStatistic(INVOCATION_GLOB_STATUS);
+    entryPoint(INVOCATION_GLOB_STATUS);
     return super.globStatus(pathPattern, filter);
   }
 
@@ -2881,7 +2908,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
    */
   @Override
   public boolean exists(Path f) throws IOException {
-    incrementStatistic(INVOCATION_EXISTS);
+    entryPoint(INVOCATION_EXISTS);
     return super.exists(f);
   }
 
@@ -2892,7 +2919,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   @Override
   @SuppressWarnings("deprecation")
   public boolean isDirectory(Path f) throws IOException {
-    incrementStatistic(INVOCATION_IS_DIRECTORY);
+    entryPoint(INVOCATION_IS_DIRECTORY);
     return super.isDirectory(f);
   }
 
@@ -2903,7 +2930,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   @Override
   @SuppressWarnings("deprecation")
   public boolean isFile(Path f) throws IOException {
-    incrementStatistic(INVOCATION_IS_FILE);
+    entryPoint(INVOCATION_IS_FILE);
     return super.isFile(f);
   }
 
@@ -2948,7 +2975,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
 
   private RemoteIterator<LocatedFileStatus> innerListFiles(Path f, boolean
       recursive, Listing.FileStatusAcceptor acceptor) throws IOException {
-    incrementStatistic(INVOCATION_LIST_FILES);
+    entryPoint(INVOCATION_LIST_FILES);
     Path path = qualify(f);
     LOG.debug("listFiles({}, {})", path, recursive);
     try {
@@ -3033,7 +3060,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f,
       final PathFilter filter)
       throws FileNotFoundException, IOException {
-    incrementStatistic(INVOCATION_LIST_LOCATED_STATUS);
+    entryPoint(INVOCATION_LIST_LOCATED_STATUS);
     Path path = qualify(f);
     LOG.debug("listLocatedStatus({}, {}", path, filter);
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef450df4/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
index 2457217..6d66739 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
@@ -89,6 +89,9 @@ public final class S3AUtils {
       "is abstract and therefore cannot be created";
   static final String ENDPOINT_KEY = "Endpoint";
 
+  /** Filesystem is closed; kept here to keep the errors close. */
+  static final String E_FS_CLOSED = "FileSystem is closed!";
+
   /**
    * Core property for provider path. Duplicated here for consistent
    * code across Hadoop version: {@value}.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef450df4/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClosedFS.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClosedFS.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClosedFS.java
new file mode 100644
index 0000000..6e81452
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClosedFS.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a;
+
+import java.io.IOException;
+
+import org.junit.Test;
+
+import org.apache.hadoop.fs.Path;
+
+import static org.apache.hadoop.test.LambdaTestUtils.*;
+import static org.apache.hadoop.fs.s3a.S3AUtils.E_FS_CLOSED;
+
+/**
+ * Tests of the S3A FileSystem which is closed; just make sure
+ * that that basic file Ops fail meaningfully.
+ */
+public class ITestS3AClosedFS extends AbstractS3ATestBase {
+
+  private Path root = new Path("/");
+
+  @Override
+  public void setup() throws Exception {
+    super.setup();
+    root = getFileSystem().makeQualified(new Path("/"));
+    getFileSystem().close();
+  }
+
+  @Override
+  public void teardown()  {
+    // no op, as the FS is closed
+  }
+
+  @Test
+  public void testClosedGetFileStatus() throws Exception {
+    intercept(IOException.class, E_FS_CLOSED,
+        () -> getFileSystem().getFileStatus(root));
+  }
+
+  @Test
+  public void testClosedListStatus() throws Exception {
+    intercept(IOException.class, E_FS_CLOSED,
+        () -> getFileSystem().listStatus(root));
+  }
+
+  @Test
+  public void testClosedListFile() throws Exception {
+    intercept(IOException.class, E_FS_CLOSED,
+        () -> getFileSystem().listFiles(root, false));
+  }
+
+  @Test
+  public void testClosedListLocatedStatus() throws Exception {
+    intercept(IOException.class, E_FS_CLOSED,
+        () -> getFileSystem().listLocatedStatus(root));
+  }
+
+  @Test
+  public void testClosedCreate() throws Exception {
+    intercept(IOException.class, E_FS_CLOSED,
+        () -> getFileSystem().create(path("to-create")).close());
+  }
+
+  @Test
+  public void testClosedDelete() throws Exception {
+    intercept(IOException.class, E_FS_CLOSED,
+        () ->  getFileSystem().delete(path("to-delete"), false));
+  }
+
+  @Test
+  public void testClosedOpen() throws Exception {
+    intercept(IOException.class, E_FS_CLOSED,
+        () ->  getFileSystem().open(path("to-open")));
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/49] hadoop git commit: Add 2.8.3 release jdiff files.

Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f8caf5/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.8.3.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.8.3.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.8.3.xml
new file mode 100644
index 0000000..6826c25
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.8.3.xml
@@ -0,0 +1,2665 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Dec 05 05:29:28 UTC 2017 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop YARN Common 2.8.3"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-annotations.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/jdiff.jar -verbose -classpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/classes:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-2.8.3.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.2
 6.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/maven/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.8.3.jar:/maven/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/maven/net/jcip/jcip-annotations/1.0/jcip-annota
 tions-1.0.jar:/maven/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/maven/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/htrace/htrace-core4/4.0.1-incubating/htrace-core4-4.0.1-incubating.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/build/source/hadoop-yarn-project/h
 adoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-2.8.3.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-client/1.9/jersey-client-1.9.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson
 -xc-1.9.13.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.8.3.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/maven/com/google/inject/extensions/guice-servlet/3.0/guice-servlet-3.0.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/com/google/inject/guice/3.0/guice-3.0.jar:/maven/javax/inject/javax.inject/1/javax.inject-1.jar:/maven/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/asm-3.2.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1
 /jaxb-impl-2.2.3-1.jar:/maven/com/sun/jersey/contribs/jersey-guice/1.9/jersey-guice-1.9.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar -sourcepath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java -apidir /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/site/jdiff/xml -apiname Apache Hadoop YARN Common 2.8.3 -->
+<package name="org.apache.hadoop.yarn">
+  <!-- start class org.apache.hadoop.yarn.ContainerLogAppender -->
+  <class name="ContainerLogAppender" extends="org.apache.log4j.FileAppender"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Flushable"/>
+    <constructor name="ContainerLogAppender"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="activateOptions"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="append"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+    </method>
+    <method name="flush"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getContainerLogDir" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Getter/Setter methods for log4j.]]>
+      </doc>
+    </method>
+    <method name="setContainerLogDir"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerLogDir" type="java.lang.String"/>
+    </method>
+    <method name="getContainerLogFile" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setContainerLogFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerLogFile" type="java.lang.String"/>
+    </method>
+    <method name="getTotalLogFileSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setTotalLogFileSize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="logSize" type="long"/>
+    </method>
+    <doc>
+    <![CDATA[A simple log4j-appender for container's logs.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.ContainerLogAppender -->
+  <!-- start class org.apache.hadoop.yarn.ContainerRollingLogAppender -->
+  <class name="ContainerRollingLogAppender" extends="org.apache.log4j.RollingFileAppender"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Flushable"/>
+    <constructor name="ContainerRollingLogAppender"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="activateOptions"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="flush"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getContainerLogDir" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Getter/Setter methods for log4j.]]>
+      </doc>
+    </method>
+    <method name="setContainerLogDir"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerLogDir" type="java.lang.String"/>
+    </method>
+    <method name="getContainerLogFile" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setContainerLogFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerLogFile" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[A simple log4j-appender for container's logs.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.ContainerRollingLogAppender -->
+  <!-- start class org.apache.hadoop.yarn.YarnUncaughtExceptionHandler -->
+  <class name="YarnUncaughtExceptionHandler" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Thread.UncaughtExceptionHandler"/>
+    <constructor name="YarnUncaughtExceptionHandler"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="uncaughtException"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="t" type="java.lang.Thread"/>
+      <param name="e" type="java.lang.Throwable"/>
+    </method>
+    <doc>
+    <![CDATA[This class is intended to be installed by calling 
+ {@link Thread#setDefaultUncaughtExceptionHandler(UncaughtExceptionHandler)}
+ In the main entry point.  It is intended to try and cleanly shut down
+ programs using the Yarn Event framework.
+ 
+ Note: Right now it only will shut down the program if a Error is caught, but
+ not any other exception.  Anything else is just logged.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.YarnUncaughtExceptionHandler -->
+</package>
+<package name="org.apache.hadoop.yarn.api">
+</package>
+<package name="org.apache.hadoop.yarn.client">
+  <!-- start class org.apache.hadoop.yarn.client.AHSProxy -->
+  <class name="AHSProxy" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AHSProxy"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAHSProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="ahsAddress" type="java.net.InetSocketAddress"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="rmAddress" type="java.net.InetSocketAddress"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.AHSProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.ClientRMProxy -->
+  <class name="ClientRMProxy" extends="org.apache.hadoop.yarn.client.RMProxy"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="createRMProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="configuration" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a proxy to the ResourceManager for the specified protocol.
+ @param configuration Configuration with all the required information.
+ @param protocol Client protocol for which proxy is being requested.
+ @param <T> Type of proxy.
+ @return Proxy to the ResourceManager for the specified client protocol.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getRMDelegationTokenService" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get the token service name to be used for RMDelegationToken. Depending
+ on whether HA is enabled or not, this method generates the appropriate
+ service name as a comma-separated list of service addresses.
+
+ @param conf Configuration corresponding to the cluster we need the
+             RMDelegationToken for
+ @return - Service name for RMDelegationToken]]>
+      </doc>
+    </method>
+    <method name="getAMRMTokenService" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenService" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="address" type="java.lang.String"/>
+      <param name="defaultAddr" type="java.lang.String"/>
+      <param name="defaultPort" type="int"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.ClientRMProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.NMProxy -->
+  <class name="NMProxy" extends="org.apache.hadoop.yarn.client.ServerProxy"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMProxy"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createNMProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="rpc" type="org.apache.hadoop.yarn.ipc.YarnRPC"/>
+      <param name="serverAddress" type="java.net.InetSocketAddress"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.NMProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.RMProxy -->
+  <class name="RMProxy" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RMProxy"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createRMProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="This method is deprecated and is not used by YARN internally any more.
+ To create a proxy to the RM, use ClientRMProxy#createRMProxy or
+ ServerRMProxy#createRMProxy.
+
+ Create a proxy to the ResourceManager at the specified address.">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="rmAddress" type="java.net.InetSocketAddress"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@deprecated
+ This method is deprecated and is not used by YARN internally any more.
+ To create a proxy to the RM, use ClientRMProxy#createRMProxy or
+ ServerRMProxy#createRMProxy.
+
+ Create a proxy to the ResourceManager at the specified address.
+
+ @param conf Configuration to generate retry policy
+ @param protocol Protocol for the proxy
+ @param rmAddress Address of the ResourceManager
+ @param <T> Type information of the proxy
+ @return Proxy to the RM
+ @throws IOException]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.RMProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.ServerProxy -->
+  <class name="ServerProxy" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ServerProxy"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createRetryPolicy" return="org.apache.hadoop.io.retry.RetryPolicy"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="maxWaitTimeStr" type="java.lang.String"/>
+      <param name="defMaxWaitTime" type="long"/>
+      <param name="connectRetryIntervalStr" type="java.lang.String"/>
+      <param name="defRetryInterval" type="long"/>
+    </method>
+    <method name="createRetriableProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="user" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="rpc" type="org.apache.hadoop.yarn.ipc.YarnRPC"/>
+      <param name="serverAddress" type="java.net.InetSocketAddress"/>
+      <param name="retryPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.ServerProxy -->
+</package>
+<package name="org.apache.hadoop.yarn.client.api">
+  <!-- start class org.apache.hadoop.yarn.client.api.TimelineClient -->
+  <class name="TimelineClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Flushable"/>
+    <constructor name="TimelineClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createTimelineClient" return="org.apache.hadoop.yarn.client.api.TimelineClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a timeline client. The current UGI when the user initialize the
+ client will be used to do the put and the delegation token operations. The
+ current user may use {@link UserGroupInformation#doAs} another user to
+ construct and initialize a timeline client if the following operations are
+ supposed to be conducted by that user.
+
+ @return a timeline client]]>
+      </doc>
+    </method>
+    <method name="putEntities" return="org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entities" type="org.apache.hadoop.yarn.api.records.timeline.TimelineEntity[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Send the information of a number of conceptual entities to the timeline
+ server. It is a blocking API. The method will not return until it gets the
+ response from the timeline server.
+ </p>
+ 
+ @param entities
+          the collection of {@link TimelineEntity}
+ @return the error information if the sent entities are not correctly stored
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="putEntities" return="org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <param name="groupId" type="org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId"/>
+      <param name="entities" type="org.apache.hadoop.yarn.api.records.timeline.TimelineEntity[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Send the information of a number of conceptual entities to the timeline
+ server. It is a blocking API. The method will not return until it gets the
+ response from the timeline server.
+
+ This API is only for timeline service v1.5
+ </p>
+
+ @param appAttemptId {@link ApplicationAttemptId}
+ @param groupId {@link TimelineEntityGroupId}
+ @param entities
+          the collection of {@link TimelineEntity}
+ @return the error information if the sent entities are not correctly stored
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="putDomain"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="domain" type="org.apache.hadoop.yarn.api.records.timeline.TimelineDomain"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Send the information of a domain to the timeline server. It is a
+ blocking API. The method will not return until it gets the response from
+ the timeline server.
+ </p>
+ 
+ @param domain
+          an {@link TimelineDomain} object
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="putDomain"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <param name="domain" type="org.apache.hadoop.yarn.api.records.timeline.TimelineDomain"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Send the information of a domain to the timeline server. It is a
+ blocking API. The method will not return until it gets the response from
+ the timeline server.
+
+ This API is only for timeline service v1.5
+ </p>
+
+ @param domain
+          an {@link TimelineDomain} object
+ @param appAttemptId {@link ApplicationAttemptId}
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="renewer" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a delegation token so as to be able to talk to the timeline server in a
+ secure way.
+ </p>
+ 
+ @param renewer
+          Address of the renewer who can renew these tokens when needed by
+          securely talking to the timeline server
+ @return a delegation token ({@link Token}) that can be used to talk to the
+         timeline server
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="renewDelegationToken" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timelineDT" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Renew a timeline delegation token.
+ </p>
+ 
+ @param timelineDT
+          the delegation token to renew
+ @return the new expiration time
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="cancelDelegationToken"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timelineDT" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Cancel a timeline delegation token.
+ </p>
+ 
+ @param timelineDT
+          the delegation token to cancel
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A client library that can be used to post some information in terms of a
+ number of conceptual entities.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.TimelineClient -->
+</package>
+<package name="org.apache.hadoop.yarn.client.api.impl">
+</package>
+<package name="org.apache.hadoop.yarn.event">
+  <!-- start class org.apache.hadoop.yarn.event.AbstractEvent -->
+  <class name="AbstractEvent" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.event.Event"/>
+    <constructor name="AbstractEvent" type="TYPE"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AbstractEvent" type="TYPE, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getTimestamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getType" return="TYPE"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Parent class of all the events. All events extend this class.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.event.AbstractEvent -->
+  <!-- start class org.apache.hadoop.yarn.event.AsyncDispatcher -->
+  <class name="AsyncDispatcher" extends="org.apache.hadoop.service.AbstractService"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.event.Dispatcher"/>
+    <constructor name="AsyncDispatcher"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AsyncDispatcher" type="java.util.concurrent.BlockingQueue"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="serviceInit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="serviceStart"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="setDrainEventsOnStop"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="serviceStop"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="dispatch"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="event" type="org.apache.hadoop.yarn.event.Event"/>
+    </method>
+    <method name="register"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="eventType" type="java.lang.Class"/>
+      <param name="handler" type="org.apache.hadoop.yarn.event.EventHandler"/>
+    </method>
+    <method name="getEventHandler" return="org.apache.hadoop.yarn.event.EventHandler"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isEventThreadWaiting" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="isDrained" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <field name="eventDispatchers" type="java.util.Map"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Dispatches {@link Event}s in a separate thread. Currently only single thread
+ does that. Potentially there could be multiple channels for each event type
+ class and a thread pool can be used to dispatch the events.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.event.AsyncDispatcher -->
+  <!-- start interface org.apache.hadoop.yarn.event.Dispatcher -->
+  <interface name="Dispatcher"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getEventHandler" return="org.apache.hadoop.yarn.event.EventHandler"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="register"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="eventType" type="java.lang.Class"/>
+      <param name="handler" type="org.apache.hadoop.yarn.event.EventHandler"/>
+    </method>
+    <field name="DISPATCHER_EXIT_ON_ERROR_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_DISPATCHER_EXIT_ON_ERROR" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Event Dispatcher interface. It dispatches events to registered 
+ event handlers based on event types.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.event.Dispatcher -->
+  <!-- start interface org.apache.hadoop.yarn.event.Event -->
+  <interface name="Event"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getType" return="TYPE"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getTimestamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Interface defining events api.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.event.Event -->
+  <!-- start interface org.apache.hadoop.yarn.event.EventHandler -->
+  <interface name="EventHandler"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="handle"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="event" type="T"/>
+    </method>
+    <doc>
+    <![CDATA[Interface for handling events of type T
+
+ @param <T> parameterized event of type T]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.event.EventHandler -->
+</package>
+<package name="org.apache.hadoop.yarn.factories">
+</package>
+<package name="org.apache.hadoop.yarn.factory.providers">
+</package>
+<package name="org.apache.hadoop.yarn.logaggregation">
+  <!-- start class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat -->
+  <class name="AggregatedLogFormat" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AggregatedLogFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat -->
+  <!-- start class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey -->
+  <class name="AggregatedLogFormat.LogKey" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="AggregatedLogFormat.LogKey"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AggregatedLogFormat.LogKey" type="org.apache.hadoop.yarn.api.records.ContainerId"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AggregatedLogFormat.LogKey" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey -->
+  <!-- start class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader -->
+  <class name="AggregatedLogFormat.LogReader" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AggregatedLogFormat.LogReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="getApplicationOwner" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns the owner of the application.
+ 
+ @return the application owner.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAcls" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns ACLs for the application. An empty map is returned if no ACLs are
+ found.
+ 
+ @return a map of the Application ACLs.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="next" return="java.io.DataInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read the next key and return the value-stream.
+ 
+ @param key
+ @return the valueStream if there are more keys or null otherwise.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readAcontainerLogs"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="writer" type="java.io.Writer"/>
+      <param name="logUploadedTime" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes all logs for a single container to the provided writer.
+ @param valueStream
+ @param writer
+ @param logUploadedTime
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readAcontainerLogs"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="writer" type="java.io.Writer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes all logs for a single container to the provided writer.
+ @param valueStream
+ @param writer
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readAContainerLogsForALogType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="out" type="java.io.PrintStream"/>
+      <param name="logUploadedTime" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Keep calling this till you get a {@link EOFException} for getting logs of
+ all types for a single container.
+ 
+ @param valueStream
+ @param out
+ @param logUploadedTime
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readAContainerLogsForALogType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="out" type="java.io.PrintStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Keep calling this till you get a {@link EOFException} for getting logs of
+ all types for a single container.
+ 
+ @param valueStream
+ @param out
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readContainerLogsForALogType" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="out" type="java.io.PrintStream"/>
+      <param name="logUploadedTime" type="long"/>
+      <param name="logType" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Keep calling this till you get a {@link EOFException} for getting logs of
+ the specific types for a single container.
+ @param valueStream
+ @param out
+ @param logUploadedTime
+ @param logType
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader -->
+</package>
+<package name="org.apache.hadoop.yarn.nodelabels">
+</package>
+<package name="org.apache.hadoop.yarn.nodelabels.event">
+</package>
+<package name="org.apache.hadoop.yarn.security">
+  <!-- start class org.apache.hadoop.yarn.security.AMRMTokenIdentifier -->
+  <class name="AMRMTokenIdentifier" extends="org.apache.hadoop.security.token.TokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AMRMTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AMRMTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getKeyId" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.AMRMTokenIdentifierProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND_NAME" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[AMRMTokenIdentifier is the TokenIdentifier to be used by
+ ApplicationMasters to authenticate to the ResourceManager.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.AMRMTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.AMRMTokenSelector -->
+  <class name="AMRMTokenSelector" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.security.token.TokenSelector"/>
+    <constructor name="AMRMTokenSelector"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="selectToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.io.Text"/>
+      <param name="tokens" type="java.util.Collection"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.AMRMTokenSelector -->
+  <!-- start class org.apache.hadoop.yarn.security.ContainerManagerSecurityInfo -->
+  <class name="ContainerManagerSecurityInfo" extends="org.apache.hadoop.security.SecurityInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerManagerSecurityInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.ContainerManagerSecurityInfo -->
+  <!-- start class org.apache.hadoop.yarn.security.ContainerTokenIdentifier -->
+  <class name="ContainerTokenIdentifier" extends="org.apache.hadoop.security.token.TokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long, org.apache.hadoop.yarn.api.records.LogAggregationContext"
+      static="false" final="false" visibility="public"
+      deprecated="Use one of the other constructors instead.">
+      <doc>
+      <![CDATA[Creates a instance.
+
+ @param appSubmitter appSubmitter
+ @param containerID container ID
+ @param creationTime creation time
+ @param expiryTimeStamp expiry timestamp
+ @param hostName hostname
+ @param logAggregationContext log aggregation context
+ @param masterKeyId master key ID
+ @param priority priority
+ @param r resource needed by the container
+ @param rmIdentifier ResourceManager identifier
+ @deprecated Use one of the other constructors instead.]]>
+      </doc>
+    </constructor>
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long, org.apache.hadoop.yarn.api.records.LogAggregationContext, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long, org.apache.hadoop.yarn.api.records.LogAggregationContext, java.lang.String, org.apache.hadoop.yarn.server.api.ContainerType"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, int, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long, org.apache.hadoop.yarn.api.records.LogAggregationContext, java.lang.String, org.apache.hadoop.yarn.server.api.ContainerType"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor needed by RPC layer/SecretManager.]]>
+      </doc>
+    </constructor>
+    <method name="getContainerID" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationSubmitter" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNmHostAddress" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getResource" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getExpiryTimeStamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getMasterKeyId" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getCreationTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRMIdentifier" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the RMIdentifier of RM in which containers are allocated
+ @return RMIdentifier]]>
+      </doc>
+    </method>
+    <method name="getContainerType" return="org.apache.hadoop.yarn.server.api.ContainerType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the ContainerType of container to allocate.
+ @return ContainerType]]>
+      </doc>
+    </method>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.ContainerTokenIdentifierProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getLogAggregationContext" return="org.apache.hadoop.yarn.api.records.LogAggregationContext"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getVersion" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the Container version
+ @return container version]]>
+      </doc>
+    </method>
+    <method name="getNodeLabelExpression" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the node-label-expression in the original ResourceRequest]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[TokenIdentifier for a container. Encodes {@link ContainerId},
+ {@link Resource} needed by the container and the target NMs host-address.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.ContainerTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.ContainerTokenSelector -->
+  <class name="ContainerTokenSelector" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.security.token.TokenSelector"/>
+    <constructor name="ContainerTokenSelector"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="selectToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.io.Text"/>
+      <param name="tokens" type="java.util.Collection"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.ContainerTokenSelector -->
+  <!-- start class org.apache.hadoop.yarn.security.NMTokenIdentifier -->
+  <class name="NMTokenIdentifier" extends="org.apache.hadoop.security.token.TokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId, org.apache.hadoop.yarn.api.records.NodeId, java.lang.String, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="NMTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor needed by RPC/Secret manager]]>
+      </doc>
+    </constructor>
+    <method name="getApplicationAttemptId" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNodeId" return="org.apache.hadoop.yarn.api.records.NodeId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationSubmitter" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getKeyId" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.NMTokenIdentifierProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.NMTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.SchedulerSecurityInfo -->
+  <class name="SchedulerSecurityInfo" extends="org.apache.hadoop.security.SecurityInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SchedulerSecurityInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.SchedulerSecurityInfo -->
+</package>
+<package name="org.apache.hadoop.yarn.security.admin">
+  <!-- start class org.apache.hadoop.yarn.security.admin.AdminSecurityInfo -->
+  <class name="AdminSecurityInfo" extends="org.apache.hadoop.security.SecurityInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AdminSecurityInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.admin.AdminSecurityInfo -->
+</package>
+<package name="org.apache.hadoop.yarn.security.client">
+  <!-- start class org.apache.hadoop.yarn.security.client.BaseClientToAMTokenSecretManager -->
+  <class name="BaseClientToAMTokenSecretManager" extends="org.apache.hadoop.security.token.SecretManager"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="BaseClientToAMTokenSecretManager"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[A base {@link SecretManager} for AMs to extend and validate Client-RM tokens
+ issued to clients by the RM using the underlying master-key shared by RM to
+ the AMs on their launch. All the methods are called by either Hadoop RPC or
+ YARN, so this class is strictly for the purpose of inherit/extend and
+ register with Hadoop RPC.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.BaseClientToAMTokenSecretManager -->
+  <!-- start class org.apache.hadoop.yarn.security.client.ClientRMSecurityInfo -->
+  <class name="ClientRMSecurityInfo" extends="org.apache.hadoop.security.SecurityInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ClientRMSecurityInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.ClientRMSecurityInfo -->
+  <!-- start class org.apache.hadoop.yarn.security.client.ClientTimelineSecurityInfo -->
+  <class name="ClientTimelineSecurityInfo" extends="org.apache.hadoop.security.SecurityInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ClientTimelineSecurityInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.ClientTimelineSecurityInfo -->
+  <!-- start class org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier -->
+  <class name="ClientToAMTokenIdentifier" extends="org.apache.hadoop.security.token.TokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ClientToAMTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ClientToAMTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getApplicationAttemptID" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getClientName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.ClientToAMTokenIdentifierProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND_NAME" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager -->
+  <class name="ClientToAMTokenSecretManager" extends="org.apache.hadoop.yarn.security.client.BaseClientToAMTokenSecretManager"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ClientToAMTokenSecretManager" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId, byte[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="retrievePassword" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="identifier" type="org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier"/>
+      <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+    </method>
+    <method name="getMasterKey" return="javax.crypto.SecretKey"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptID" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+    </method>
+    <method name="setMasterKey"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="byte[]"/>
+    </method>
+    <doc>
+    <![CDATA[A simple {@link SecretManager} for AMs to validate Client-RM tokens issued to
+ clients by the RM using the underlying master-key shared by RM to the AMs on
+ their launch. All the methods are called by either Hadoop RPC or YARN, so
+ this class is strictly for the purpose of inherit/extend and register with
+ Hadoop RPC.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager -->
+  <!-- start class org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier -->
+  <class name="RMDelegationTokenIdentifier" extends="org.apache.hadoop.yarn.security.client.YARNDelegationTokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RMDelegationTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="RMDelegationTokenIdentifier" type="org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new delegation token identifier
+ @param owner the effective username of the token owner
+ @param renewer the username of the renewer
+ @param realUser the real username of the token owner]]>
+      </doc>
+    </constructor>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND_NAME" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Delegation Token Identifier that identifies the delegation tokens from the 
+ Resource Manager.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.client.RMDelegationTokenSelector -->
+  <class name="RMDelegationTokenSelector" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.security.token.TokenSelector"/>
+    <constructor name="RMDelegationTokenSelector"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="selectToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.io.Text"/>
+      <param name="tokens" type="java.util.Collection"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.RMDelegationTokenSelector -->
+  <!-- start class org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier -->
+  <class name="TimelineDelegationTokenIdentifier" extends="org.apache.hadoop.yarn.security.client.YARNDelegationTokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineDelegationTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="TimelineDelegationTokenIdentifier" type="org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new timeline delegation token identifier
+
+ @param owner the effective username of the token owner
+ @param renewer the username of the renewer
+ @param realUser the real username of the token owner]]>
+      </doc>
+    </constructor>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND_NAME" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.client.TimelineDelegationTokenSelector -->
+  <class name="TimelineDelegationTokenSelector" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.security.token.TokenSelector"/>
+    <constructor name="TimelineDelegationTokenSelector"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="selectToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.io.Text"/>
+      <param name="tokens" type="java.util.Collection"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.TimelineDelegationTokenSelector -->
+</package>
+<package name="org.apache.hadoop.yarn.server.api">
+</package>
+<package name="org.apache.hadoop.yarn.server.api.impl.pb.client">
+</package>
+<package name="org.apache.hadoop.yarn.server.api.impl.pb.service">
+</package>
+<package name="org.apache.hadoop.yarn.sharedcache">
+  <!-- start interface org.apache.hadoop.yarn.sharedcache.SharedCacheChecksum -->
+  <interface name="SharedCacheChecksum"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="computeChecksum" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Calculate the checksum of the passed input stream.
+
+ @param in <code>InputStream</code> to be checksumed
+ @return the message digest of the input stream
+ @throws IOException]]>
+      </doc>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.sharedcache.SharedCacheChecksum -->
+  <!-- start class org.apache.hadoop.yarn.sharedcache.SharedCacheChecksumFactory -->
+  <class name="SharedCacheChecksumFactory" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SharedCacheChecksumFactory"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getChecksum" return="org.apache.hadoop.yarn.sharedcache.SharedCacheChecksum"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get a new <code>SharedCacheChecksum</code> object based on the configurable
+ algorithm implementation
+ (see <code>yarn.sharedcache.checksum.algo.impl</code>)
+
+ @return <code>SharedCacheChecksum</code> object]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.sharedcache.SharedCacheChecksumFactory -->
+</package>
+<package name="org.apache.hadoop.yarn.state">
+  <!-- start class org.apache.hadoop.yarn.state.InvalidStateTransitionException -->
+  <class name="InvalidStateTransitionException" extends="org.apache.hadoop.yarn.state.InvalidStateTransitonException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="InvalidStateTransitionException" type="java.lang.Enum, java.lang.Enum"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[The exception that happens when you call invalid state transition.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.state.InvalidStateTransitionException -->
+  <!-- start class org.apache.hadoop.yarn.state.InvalidStateTransitonException -->
+  <class name="InvalidStateTransitonException" extends="org.apache.hadoop.yarn.exceptions.YarnRuntimeException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="Use {@link InvalidStateTransitionException} instead.">
+    <constructor name="InvalidStateTransitonException" type="java.lang.Enum, java.lang.Enum"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getCurrentState" return="java.lang.Enum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getEvent" return="java.lang.Enum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[@deprecated Use {@link InvalidStateTransitionException} instead.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.state.InvalidStateTransitonException -->
+  <!-- start interface org.apache.hadoop.yarn.state.MultipleArcTransition -->
+  <interface name="MultipleArcTransition"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="transition" return="STATE"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="operand" type="OPERAND"/>
+      <param name="event" type="EVENT"/>
+      <doc>
+      <![CDATA[Transition hook.
+ @return the postState. Post state must be one of the 
+                      valid post states registered in StateMachine.
+ @param operand the entity attached to the FSM, whose internal 
+                state may change.
+ @param event causal event]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Hook for Transition. 
+ Post state is decided by Transition hook. Post state must be one of the 
+ valid post states registered in StateMachine.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.state.MultipleArcTransition -->
+  <!-- start interface org.apache.hadoop.yarn.state.SingleArcTransition -->
+  <interface name="SingleArcTransition"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="transition"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="operand" type="OPERAND"/>
+      <param name="event" type="EVENT"/>
+      <doc>
+      <![CDATA[Transition hook.
+ 
+ @param operand the entity attached to the FSM, whose internal 
+                state may change.
+ @param event causal event]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Hook for Transition. This lead to state machine to move to 
+ the post state as registered in the state machine.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.state.SingleArcTransition -->
+  <!-- start interface org.apache.hadoop.yarn.state.StateMachine -->
+  <interface name="StateMachine"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/49] hadoop git commit: HDFS-12959. Fix TestOpenFilesWithSnapshot redundant configurations.

Posted by as...@apache.org.
HDFS-12959. Fix TestOpenFilesWithSnapshot redundant configurations.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76e664e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76e664e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76e664e9

Branch: refs/heads/YARN-6592
Commit: 76e664e931bf0784620b69bc588bd51cf2a024e6
Parents: 826507c
Author: Manoj Govindassamy <ma...@apache.org>
Authored: Thu Dec 21 15:47:15 2017 -0800
Committer: Manoj Govindassamy <ma...@apache.org>
Committed: Thu Dec 21 15:47:15 2017 -0800

----------------------------------------------------------------------
 .../server/namenode/snapshot/TestOpenFilesWithSnapshot.java   | 7 ++-----
 1 file changed, 2 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76e664e9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
index be118a3..17082a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
@@ -66,7 +66,8 @@ public class TestOpenFilesWithSnapshot {
   public void setup() throws IOException {
     conf.setBoolean(
         DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES, true);
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+    cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
     conf.set("dfs.blocksize", "1048576");
     fs = cluster.getFileSystem();
   }
@@ -252,8 +253,6 @@ public class TestOpenFilesWithSnapshot {
    */
   @Test (timeout = 120000)
   public void testPointInTimeSnapshotCopiesForOpenFiles() throws Exception {
-    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES,
-        true);
     // Construct the directory tree
     final Path level0A = new Path("/level_0_A");
     final Path level0B = new Path("/level_0_B");
@@ -738,8 +737,6 @@ public class TestOpenFilesWithSnapshot {
    */
   @Test (timeout = 120000)
   public void testOpenFilesSnapChecksumWithTrunkAndAppend() throws Exception {
-    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES,
-        true);
     // Construct the directory tree
     final Path dir = new Path("/A/B/C");
     fs.mkdirs(dir);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/49] hadoop git commit: HDFS-12937. RBF: Add more unit tests for router admin commands. Contributed by Yiqun Lin.

Posted by as...@apache.org.
HDFS-12937. RBF: Add more unit tests for router admin commands. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e040c97b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e040c97b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e040c97b

Branch: refs/heads/YARN-6592
Commit: e040c97b7743469f363eeae52c8abcf4fe7c65d5
Parents: a7f8caf
Author: Yiqun Lin <yq...@apache.org>
Authored: Tue Dec 19 15:31:34 2017 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Tue Dec 19 15:31:34 2017 +0800

----------------------------------------------------------------------
 .../hdfs/tools/federation/RouterAdmin.java      |   4 +-
 .../federation/router/TestRouterAdminCLI.java   | 121 ++++++++++++++++++-
 2 files changed, 120 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e040c97b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index a91a602..fd961f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -132,11 +132,11 @@ public class RouterAdmin extends Configured implements Tool {
     try {
       if ("-add".equals(cmd)) {
         if (addMount(argv, i)) {
-          System.err.println("Successfuly added mount point " + argv[i]);
+          System.out.println("Successfuly added mount point " + argv[i]);
         }
       } else if ("-rm".equals(cmd)) {
         if (removeMount(argv[i])) {
-          System.err.println("Successfully removed mount point " + argv[i]);
+          System.out.println("Successfully removed mount point " + argv[i]);
         }
       } else if ("-ls".equals(cmd)) {
         if (argv.length > 1) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e040c97b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
index 3882b8b..9e82967 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
@@ -18,16 +18,20 @@
 package org.apache.hadoop.hdfs.server.federation.router;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
 import java.net.InetSocketAddress;
+import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.RouterContext;
 import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
 import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
 import org.apache.hadoop.hdfs.server.federation.store.impl.MountTableStoreImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
@@ -36,6 +40,7 @@ import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
 import org.apache.hadoop.hdfs.tools.federation.RouterAdmin;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ToolRunner;
+import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -83,12 +88,123 @@ public class TestRouterAdminCLI {
   }
 
   @AfterClass
-  public static void tearDown() {
+  public static void tearDownCluster() {
     cluster.stopRouter(routerContext);
     cluster.shutdown();
     cluster = null;
   }
 
+  @After
+  public void tearDown() {
+    // set back system out
+    System.setOut(OLD_OUT);
+  }
+
+  @Test
+  public void testAddMountTable() throws Exception {
+    String nsId = "ns0";
+    String src = "/test-addmounttable";
+    String dest = "/addmounttable";
+    String[] argv = new String[] {"-add", src, nsId, dest};
+    Assert.assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest
+        .newInstance(src);
+    GetMountTableEntriesResponse getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+    MountTable mountTable = getResponse.getEntries().get(0);
+
+    List<RemoteLocation> destinations = mountTable.getDestinations();
+    assertEquals(1, destinations.size());
+
+    assertEquals(src, mountTable.getSourcePath());
+    assertEquals(nsId, destinations.get(0).getNameserviceId());
+    assertEquals(dest, destinations.get(0).getDest());
+    assertFalse(mountTable.isReadOnly());
+
+    // test mount table update behavior
+    dest = dest + "-new";
+    argv = new String[] {"-add", src, nsId, dest, "-readonly"};
+    Assert.assertEquals(0, ToolRunner.run(admin, argv));
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+
+    getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+    mountTable = getResponse.getEntries().get(0);
+    assertEquals(2, mountTable.getDestinations().size());
+    assertEquals(nsId, mountTable.getDestinations().get(1).getNameserviceId());
+    assertEquals(dest, mountTable.getDestinations().get(1).getDest());
+    assertTrue(mountTable.isReadOnly());
+  }
+
+  @Test
+  public void testListMountTable() throws Exception {
+    String nsId = "ns0";
+    String src = "/test-lsmounttable";
+    String dest = "/lsmounttable";
+    String[] argv = new String[] {"-add", src, nsId, dest};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    // re-set system out for testing
+    System.setOut(new PrintStream(out));
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    argv = new String[] {"-ls", src};
+    assertEquals(0, ToolRunner.run(admin, argv));
+    assertTrue(out.toString().contains(src));
+
+    out.reset();
+    GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest
+        .newInstance("/");
+    GetMountTableEntriesResponse getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+
+    // Test ls command without input path, it will list
+    // mount table under root path.
+    argv = new String[] {"-ls"};
+    assertEquals(0, ToolRunner.run(admin, argv));
+    assertTrue(out.toString().contains(src));
+    String outStr = out.toString();
+    // verify if all the mount table are listed
+    for(MountTable entry: getResponse.getEntries()) {
+      assertTrue(outStr.contains(entry.getSourcePath()));
+    }
+  }
+
+  @Test
+  public void testRemoveMountTable() throws Exception {
+    String nsId = "ns0";
+    String src = "/test-rmmounttable";
+    String dest = "/rmmounttable";
+    String[] argv = new String[] {"-add", src, nsId, dest};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest
+        .newInstance(src);
+    GetMountTableEntriesResponse getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+    // ensure mount table added successfully
+    MountTable mountTable = getResponse.getEntries().get(0);
+    assertEquals(src, mountTable.getSourcePath());
+
+    argv = new String[] {"-rm", src};
+    assertEquals(0, ToolRunner.run(admin, argv));
+
+    stateStore.loadCache(MountTableStoreImpl.class, true);
+    getResponse = client.getMountTableManager()
+        .getMountTableEntries(getRequest);
+    assertEquals(0, getResponse.getEntries().size());
+
+    // remove an invalid mount table
+    String invalidPath = "/invalid";
+    System.setOut(new PrintStream(out));
+    argv = new String[] {"-rm", invalidPath};
+    assertEquals(0, ToolRunner.run(admin, argv));
+    assertTrue(out.toString().contains(
+        "Cannot remove mount point " + invalidPath));
+  }
+
   @Test
   public void testMountTableDefaultACL() throws Exception {
     String[] argv = new String[] {"-add", "/testpath0", "ns0", "/testdir0"};
@@ -140,8 +256,7 @@ public class TestRouterAdminCLI {
     assertEquals(0, ToolRunner.run(admin, argv));
     verifyExecutionResult("/testpath2-3", true, 0, 0);
 
-    // set back system out and login user
-    System.setOut(OLD_OUT);
+    // set back login user
     remoteUser = UserGroupInformation.createRemoteUser(superUser);
     UserGroupInformation.setLoginUser(remoteUser);
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/49] hadoop git commit: HADOOP-13282. S3 blob etags to be made visible in S3A status/getFileChecksum() calls. Contributed by Steve Loughran

Posted by as...@apache.org.
HADOOP-13282. S3 blob etags to be made visible in S3A status/getFileChecksum() calls.
Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8ff0cc3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8ff0cc3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8ff0cc3

Branch: refs/heads/YARN-6592
Commit: c8ff0cc304f07bf793192291e0611b2fb4bcc4e3
Parents: ef450df
Author: Steve Loughran <st...@apache.org>
Authored: Thu Dec 21 14:58:58 2017 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Thu Dec 21 14:58:58 2017 +0000

----------------------------------------------------------------------
 .../apache/hadoop/fs/store/EtagChecksum.java    |  90 +++++++++++++
 .../apache/hadoop/fs/store/package-info.java    |  28 ++++
 .../hadoop/fs/store/TestEtagChecksum.java       |  85 ++++++++++++
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  40 ++++++
 .../hadoop/fs/s3a/ITestS3AMiscOperations.java   | 133 ++++++++++++++++---
 5 files changed, 359 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8ff0cc3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/EtagChecksum.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/EtagChecksum.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/EtagChecksum.java
new file mode 100644
index 0000000..cc29f1b
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/EtagChecksum.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.store;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+
+import org.apache.hadoop.fs.FileChecksum;
+
+/**
+ * An etag as a checksum.
+ * Consider these suitable for checking if an object has changed, but
+ * not suitable for comparing two different objects for equivalence,
+ * especially between object stores.
+ */
+public class EtagChecksum extends FileChecksum {
+
+  /** The algorithm name: {@value}. */
+  private static final String ETAG = "etag";
+
+  /**
+   * Etag string.
+   */
+  private String eTag = "";
+
+  /**
+   * Create with an empty etag.
+   */
+  public EtagChecksum() {
+  }
+
+  /**
+   * Create with a string etag.
+   * @param eTag etag
+   */
+  public EtagChecksum(String eTag) {
+    this.eTag = eTag;
+  }
+
+  @Override
+  public String getAlgorithmName() {
+    return ETAG;
+  }
+
+  @Override
+  public int getLength() {
+    return eTag.getBytes(StandardCharsets.UTF_8).length;
+  }
+
+  @Override
+  public byte[] getBytes() {
+    return eTag != null
+        ? eTag.getBytes(StandardCharsets.UTF_8)
+        : new byte[0];
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    out.writeUTF(eTag != null ? eTag : "");
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    eTag = in.readUTF();
+  }
+
+  @Override
+  public String toString() {
+    return "etag: \"" + eTag  + '"';
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8ff0cc3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/package-info.java
new file mode 100644
index 0000000..ebe1db4
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package is for classes to be shared across object stores; for internal
+ * use within the hadoop-* modules only. No stability guarantees.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.fs.store;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8ff0cc3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/store/TestEtagChecksum.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/store/TestEtagChecksum.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/store/TestEtagChecksum.java
new file mode 100644
index 0000000..ef9613f
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/store/TestEtagChecksum.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.store;
+
+import java.io.IOException;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+
+/**
+ * Unit test of etag operations.
+ */
+public class TestEtagChecksum extends Assert {
+
+  private final EtagChecksum empty1 = tag("");
+  private final EtagChecksum empty2 = tag("");
+  private final EtagChecksum valid1 = tag("valid");
+  private final EtagChecksum valid2 = tag("valid");
+
+  @Test
+  public void testEmptyTagsEqual() {
+    assertEquals(empty1, empty2);
+  }
+
+  @Test
+  public void testEmptyTagRoundTrip() throws Throwable {
+    assertEquals(empty1, roundTrip(empty1));
+  }
+
+  @Test
+  public void testValidTagsEqual() {
+    assertEquals(valid1, valid2);
+  }
+
+  @Test
+  public void testValidTagRoundTrip() throws Throwable {
+    assertEquals(valid1, roundTrip(valid1));
+  }
+
+  @Test
+  public void testValidAndEmptyTagsDontMatch() {
+    assertNotEquals(valid1, empty1);
+    assertNotEquals(valid1, tag("other valid one"));
+  }
+
+  @Test
+  public void testDifferentTagsDontMatch() {
+    assertNotEquals(valid1, tag("other valid one"));
+  }
+
+  private EtagChecksum tag(String t) {
+    return new EtagChecksum(t);
+  }
+
+  private EtagChecksum roundTrip(EtagChecksum tag) throws IOException {
+    try (DataOutputBuffer dob = new DataOutputBuffer();
+         DataInputBuffer dib = new DataInputBuffer()) {
+      tag.write(dob);
+      dib.reset(dob.getData(), dob.getLength());
+      EtagChecksum t2 = new EtagChecksum();
+      t2.readFields(dib);
+      return t2;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8ff0cc3/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index f461c9e..a8147ed 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -112,6 +112,7 @@ import org.apache.hadoop.fs.s3a.s3guard.PathMetadata;
 import org.apache.hadoop.fs.s3a.s3guard.S3Guard;
 import org.apache.hadoop.fs.s3native.S3xLoginHelper;
 import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.fs.store.EtagChecksum;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.BlockingThreadPoolExecutorService;
 import org.apache.hadoop.util.Progressable;
@@ -539,6 +540,14 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   }
 
   /**
+   * Get the encryption algorithm of this endpoint.
+   * @return the encryption algorithm.
+   */
+  public S3AEncryptionMethods getServerSideEncryptionAlgorithm() {
+    return serverSideEncryptionAlgorithm;
+  }
+
+  /**
    * Demand create the directory allocator, then create a temporary file.
    * {@link LocalDirAllocator#createTmpFileForWrite(String, long, Configuration)}.
    *  @param pathStr prefix for the temporary file
@@ -1069,6 +1078,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
    * @throws IOException IO and object access problems.
    */
   @VisibleForTesting
+  @Retries.RetryRaw
   public ObjectMetadata getObjectMetadata(Path path) throws IOException {
     return getObjectMetadata(pathToKey(path));
   }
@@ -2935,6 +2945,36 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
   }
 
   /**
+   * Get the etag of a object at the path via HEAD request and return it
+   * as a checksum object. This has the whatever guarantees about equivalence
+   * the S3 implementation offers.
+   * <ol>
+   *   <li>If a tag has not changed, consider the object unchanged.</li>
+   *   <li>Two tags being different does not imply the data is different.</li>
+   * </ol>
+   * Different S3 implementations may offer different guarantees.
+   * @param f The file path
+   * @param length The length of the file range for checksum calculation
+   * @return The EtagChecksum or null if checksums are not supported.
+   * @throws IOException IO failure
+   * @see <a href="http://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html">Common Response Headers</a>
+   */
+
+  public EtagChecksum getFileChecksum(Path f, final long length)
+      throws IOException {
+    Preconditions.checkArgument(length >= 0);
+    Path path = qualify(f);
+    LOG.debug("getFileChecksum({})", path);
+    return once("getFileChecksum", path.toString(),
+        () -> {
+          // this always does a full HEAD to the object
+          ObjectMetadata headers = getObjectMetadata(path);
+          String eTag = headers.getETag();
+          return eTag != null ? new EtagChecksum(eTag) : null;
+        });
+  }
+
+  /**
    * {@inheritDoc}.
    *
    * This implementation is optimized for S3, which can do a bulk listing

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8ff0cc3/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
index 869d64c..ddf2529 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
@@ -18,21 +18,24 @@
 
 package org.apache.hadoop.fs.s3a;
 
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.apache.hadoop.test.LambdaTestUtils;
+import java.io.ByteArrayInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 
 import com.amazonaws.services.s3.model.ObjectMetadata;
 import com.amazonaws.services.s3.model.PutObjectRequest;
-import com.amazonaws.services.s3.model.PutObjectResult;
+import org.junit.Assume;
 import org.junit.Test;
 
-import java.io.ByteArrayInputStream;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.concurrent.Callable;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.store.EtagChecksum;
+import org.apache.hadoop.test.LambdaTestUtils;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
 
 /**
  * Tests of the S3A FileSystem which don't have a specific home and can share
@@ -40,6 +43,8 @@ import java.util.concurrent.Callable;
  */
 public class ITestS3AMiscOperations extends AbstractS3ATestBase {
 
+  private static final byte[] HELLO = "hello".getBytes(StandardCharsets.UTF_8);
+
   @Test
   public void testCreateNonRecursiveSuccess() throws IOException {
     Path shouldWork = path("nonrecursivenode");
@@ -58,7 +63,7 @@ public class ITestS3AMiscOperations extends AbstractS3ATestBase {
   @Test(expected = FileAlreadyExistsException.class)
   public void testCreateNonRecursiveParentIsFile() throws IOException {
     Path parent = path("/file.txt");
-    ContractTestUtils.touch(getFileSystem(), parent);
+    touch(getFileSystem(), parent);
     createNonRecursive(new Path(parent, "fail"));
   }
 
@@ -73,12 +78,7 @@ public class ITestS3AMiscOperations extends AbstractS3ATestBase {
         new ByteArrayInputStream("PUT".getBytes()),
         metadata);
     LambdaTestUtils.intercept(IllegalStateException.class,
-        new Callable<PutObjectResult>() {
-          @Override
-          public PutObjectResult call() throws Exception {
-            return fs.putObjectDirect(put);
-          }
-        });
+        () -> fs.putObjectDirect(put));
     assertPathDoesNotExist("put object was created", path);
   }
 
@@ -87,4 +87,103 @@ public class ITestS3AMiscOperations extends AbstractS3ATestBase {
         (short) 3, (short) 4096,
         null);
   }
+
+  /**
+   * Touch a path, return the full path.
+   * @param name relative name
+   * @return the path
+   * @throws IOException IO failure
+   */
+  Path touchFile(String name) throws IOException {
+    Path path = path(name);
+    touch(getFileSystem(), path);
+    return path;
+  }
+
+  /**
+   * Create a file with the data, return the path.
+   * @param name relative name
+   * @param data data to write
+   * @return the path
+   * @throws IOException IO failure
+   */
+  Path mkFile(String name, byte[] data) throws IOException {
+    final Path f = path(name);
+    createFile(getFileSystem(), f, true, data);
+    return f;
+  }
+
+  /**
+   * The assumption here is that 0-byte files uploaded in a single PUT
+   * always have the same checksum, including stores with encryption.
+   * @throws Throwable on a failure
+   */
+  @Test
+  public void testEmptyFileChecksums() throws Throwable {
+    final S3AFileSystem fs = getFileSystem();
+    Path file1 = touchFile("file1");
+    EtagChecksum checksum1 = fs.getFileChecksum(file1, 0);
+    LOG.info("Checksum for {}: {}", file1, checksum1);
+    assertNotNull("file 1 checksum", checksum1);
+    assertNotEquals("file 1 checksum", 0, checksum1.getLength());
+    assertEquals("checksums", checksum1,
+        fs.getFileChecksum(touchFile("file2"), 0));
+  }
+
+  /**
+   * Verify that different file contents have different
+   * checksums, and that that they aren't the same as the empty file.
+   * @throws Throwable failure
+   */
+  @Test
+  public void testNonEmptyFileChecksums() throws Throwable {
+    final S3AFileSystem fs = getFileSystem();
+    final Path file3 = mkFile("file3", HELLO);
+    final EtagChecksum checksum1 = fs.getFileChecksum(file3, 0);
+    assertNotNull("file 3 checksum", checksum1);
+    final Path file4 = touchFile("file4");
+    final EtagChecksum checksum2 = fs.getFileChecksum(file4, 0);
+    assertNotEquals("checksums", checksum1, checksum2);
+    // overwrite
+    createFile(fs, file4, true,
+        "hello, world".getBytes(StandardCharsets.UTF_8));
+    assertNotEquals(checksum2, fs.getFileChecksum(file4, 0));
+  }
+
+  /**
+   * Verify that on an unencrypted store, the checksum of two non-empty
+   * (single PUT) files is the same if the data is the same.
+   * This will fail if the bucket has S3 default encryption enabled.
+   * @throws Throwable failure
+   */
+  @Test
+  public void testNonEmptyFileChecksumsUnencrypted() throws Throwable {
+    Assume.assumeTrue(encryptionAlgorithm().equals(S3AEncryptionMethods.NONE));
+    final S3AFileSystem fs = getFileSystem();
+    final EtagChecksum checksum1 =
+        fs.getFileChecksum(mkFile("file5", HELLO), 0);
+    assertNotNull("file 3 checksum", checksum1);
+    assertEquals("checksums", checksum1,
+        fs.getFileChecksum(mkFile("file6", HELLO), 0));
+  }
+
+  private S3AEncryptionMethods encryptionAlgorithm() {
+    return getFileSystem().getServerSideEncryptionAlgorithm();
+  }
+
+  @Test
+  public void testNegativeLength() throws Throwable {
+    LambdaTestUtils.intercept(IllegalArgumentException.class,
+        () -> getFileSystem().getFileChecksum(mkFile("negative", HELLO), -1));
+  }
+
+  @Test
+  public void testLengthPastEOF() throws Throwable {
+    final S3AFileSystem fs = getFileSystem();
+    Path f = mkFile("file5", HELLO);
+    assertEquals(
+        fs.getFileChecksum(f, HELLO.length),
+        fs.getFileChecksum(f, HELLO.length * 2));
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/49] hadoop git commit: YARN-6593. [API] Introduce Placement Constraint object. (Konstantinos Karanasos via wangda)

Posted by as...@apache.org.
YARN-6593. [API] Introduce Placement Constraint object. (Konstantinos Karanasos via wangda)

Change-Id: Id00edb7185fdf01cce6e40f920cac3585f8cbe9c


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/276a62d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/276a62d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/276a62d1

Branch: refs/heads/YARN-6592
Commit: 276a62d170866fc7d328a1a3fd41d64cabdec49f
Parents: 7467e8f
Author: Wangda Tan <wa...@apache.org>
Authored: Thu Aug 3 14:03:55 2017 -0700
Committer: Arun Suresh <as...@apache.org>
Committed: Fri Dec 29 17:29:15 2017 -0800

----------------------------------------------------------------------
 .../yarn/api/resource/PlacementConstraint.java  | 567 +++++++++++++++++++
 .../yarn/api/resource/PlacementConstraints.java | 286 ++++++++++
 .../hadoop/yarn/api/resource/package-info.java  |  23 +
 .../src/main/proto/yarn_protos.proto            |  55 ++
 .../api/resource/TestPlacementConstraints.java  | 106 ++++
 .../PlacementConstraintFromProtoConverter.java  | 116 ++++
 .../pb/PlacementConstraintToProtoConverter.java | 174 ++++++
 .../apache/hadoop/yarn/api/pb/package-info.java |  23 +
 .../yarn/api/records/impl/pb/ProtoUtils.java    |  27 +
 .../PlacementConstraintTransformations.java     | 209 +++++++
 .../hadoop/yarn/api/resource/package-info.java  |  23 +
 .../TestPlacementConstraintPBConversion.java    | 195 +++++++
 .../TestPlacementConstraintTransformations.java | 183 ++++++
 13 files changed, 1987 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/276a62d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
new file mode 100644
index 0000000..f0e3982
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
@@ -0,0 +1,567 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.resource;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+/**
+ * {@code PlacementConstraint} represents a placement constraint for a resource
+ * allocation.
+ */
+@Public
+@Unstable
+public class PlacementConstraint {
+
+  /**
+   * The constraint expression tree.
+   */
+  private AbstractConstraint constraintExpr;
+
+  public PlacementConstraint(AbstractConstraint constraintExpr) {
+    this.constraintExpr = constraintExpr;
+  }
+
+  /**
+   * Get the constraint expression of the placement constraint.
+   *
+   * @return the constraint expression
+   */
+  public AbstractConstraint getConstraintExpr() {
+    return constraintExpr;
+  }
+
+  /**
+   * Interface used to enable the elements of the constraint tree to be visited.
+   */
+  @Private
+  public interface Visitable {
+    /**
+     * Visitor pattern.
+     *
+     * @param visitor visitor to be used
+     * @param <T> defines the type that the visitor will use and the return type
+     *          of the accept.
+     * @return the result of visiting a given object.
+     */
+    <T> T accept(Visitor<T> visitor);
+
+  }
+
+  /**
+   * Visitor API for a constraint tree.
+   *
+   * @param <T> determines the return type of the visit methods.
+   */
+  @Private
+  public interface Visitor<T> {
+    T visit(SingleConstraint constraint);
+
+    T visit(TargetExpression target);
+
+    T visit(TargetConstraint constraint);
+
+    T visit(CardinalityConstraint constraint);
+
+    T visit(And constraint);
+
+    T visit(Or constraint);
+
+    T visit(DelayedOr constraint);
+
+    T visit(TimedPlacementConstraint constraint);
+  }
+
+  /**
+   * Abstract class that acts as the superclass of all placement constraint
+   * classes.
+   */
+  public abstract static class AbstractConstraint implements Visitable {
+    public PlacementConstraint build() {
+      return new PlacementConstraint(this);
+    }
+  }
+
+  static final String NODE_SCOPE = "node";
+  static final String RACK_SCOPE = "rack";
+
+  /**
+   * Consider a set of nodes N that belongs to the scope specified in the
+   * constraint. If the target expressions are satisfied at least minCardinality
+   * times and at most max-cardinality times in the node set N, then the
+   * constraint is satisfied.
+   *
+   * For example, a constraint of the form {@code {RACK, 2, 10,
+   * allocationTag("zk")}}, requires an allocation to be placed within a rack
+   * that has at least 2 and at most 10 other allocations with tag "zk".
+   */
+  public static class SingleConstraint extends AbstractConstraint {
+    private String scope;
+    private int minCardinality;
+    private int maxCardinality;
+    private Set<TargetExpression> targetExpressions;
+
+    public SingleConstraint(String scope, int minCardinality,
+        int maxCardinality, Set<TargetExpression> targetExpressions) {
+      this.scope = scope;
+      this.minCardinality = minCardinality;
+      this.maxCardinality = maxCardinality;
+      this.targetExpressions = targetExpressions;
+    }
+
+    public SingleConstraint(String scope, int minC, int maxC,
+        TargetExpression... targetExpressions) {
+      this(scope, minC, maxC, new HashSet<>(Arrays.asList(targetExpressions)));
+    }
+
+    /**
+     * Get the scope of the constraint.
+     *
+     * @return the scope of the constraint
+     */
+    public String getScope() {
+      return scope;
+    }
+
+    /**
+     * Get the minimum cardinality of the constraint.
+     *
+     * @return the minimum cardinality of the constraint
+     */
+    public int getMinCardinality() {
+      return minCardinality;
+    }
+
+    /**
+     * Get the maximum cardinality of the constraint.
+     *
+     * @return the maximum cardinality of the constraint
+     */
+    public int getMaxCardinality() {
+      return maxCardinality;
+    }
+
+    /**
+     * Get the target expressions of the constraint.
+     *
+     * @return the set of target expressions
+     */
+    public Set<TargetExpression> getTargetExpressions() {
+      return targetExpressions;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Class representing the target expressions that are used in placement
+   * constraints. They might refer to expressions on node attributes, allocation
+   * tags, or be self-targets (referring to the allocation to which the
+   * constraint is attached).
+   */
+  public static class TargetExpression implements Visitable {
+    /**
+     * Enum specifying the type of the target expression.
+     */
+    public enum TargetType {
+      NODE_ATTRIBUTE, ALLOCATION_TAG, SELF
+    }
+
+    private TargetType targetType;
+    private String targetKey;
+    private Set<String> targetValues;
+
+    public TargetExpression(TargetType targetType, String targetKey,
+        Set<String> targetValues) {
+      this.targetType = targetType;
+      this.targetKey = targetKey;
+      this.targetValues = targetValues;
+    }
+
+    public TargetExpression(TargetType targetType) {
+      this(targetType, null, new HashSet<>());
+    }
+
+    public TargetExpression(TargetType targetType, String targetKey,
+        String... targetValues) {
+      this(targetType, targetKey, new HashSet<>(Arrays.asList(targetValues)));
+    }
+
+    /**
+     * Get the type of the target expression.
+     *
+     * @return the type of the target expression
+     */
+    public TargetType getTargetType() {
+      return targetType;
+    }
+
+    /**
+     * Get the key of the target expression.
+     *
+     * @return the key of the target expression
+     */
+    public String getTargetKey() {
+      return targetKey;
+    }
+
+    /**
+     * Get the set of values of the target expression.
+     *
+     * @return the set of values of the target expression
+     */
+    public Set<String> getTargetValues() {
+      return targetValues;
+    }
+
+    @Override
+    public int hashCode() {
+      int result = targetType != null ? targetType.hashCode() : 0;
+      result = 31 * result + (targetKey != null ? targetKey.hashCode() : 0);
+      result =
+          31 * result + (targetValues != null ? targetValues.hashCode() : 0);
+      return result;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (o == null) {
+        return false;
+      }
+      if (!(o instanceof TargetExpression)) {
+        return false;
+      }
+
+      TargetExpression that = (TargetExpression) o;
+      if (targetType != that.targetType) {
+        return false;
+      }
+      if (targetKey != null ? !targetKey.equals(that.targetKey)
+          : that.targetKey != null) {
+        return false;
+      }
+      return targetValues != null ? targetValues.equals(that.targetValues)
+          : that.targetValues == null;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Class that represents a target constraint. Such a constraint requires an
+   * allocation to be placed within a scope that satisfies some specified
+   * expressions on node attributes and allocation tags.
+   *
+   * It is a specialized version of the {@link SingleConstraint}, where the
+   * minimum and the maximum cardinalities take specific values based on the
+   * {@link TargetOperator} used.
+   */
+  public static class TargetConstraint extends AbstractConstraint {
+    enum TargetOperator {
+      IN, NOT_IN
+    }
+
+    private TargetOperator op;
+    private String scope;
+    private Set<TargetExpression> targetExpressions;
+
+    public TargetConstraint(TargetOperator op, String scope,
+        Set<TargetExpression> targetExpressions) {
+      this.op = op;
+      this.scope = scope;
+      this.targetExpressions = targetExpressions;
+    }
+
+    /**
+     * Get the target operator of the constraint.
+     *
+     * @return the target operator
+     */
+    public TargetOperator getOp() {
+      return op;
+    }
+
+    /**
+     * Get the scope of the constraint.
+     *
+     * @return the scope of the constraint
+     */
+    public String getScope() {
+      return scope;
+    }
+
+    /**
+     * Get the set of target expressions.
+     *
+     * @return the set of target expressions
+     */
+    public Set<TargetExpression> getTargetExpressions() {
+      return targetExpressions;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Class that represents a cardinality constraint. Such a constraint the
+   * number of allocations within a given scope to some minimum and maximum
+   * values.
+   *
+   * It is a specialized version of the {@link SingleConstraint}, where the
+   * target is self (i.e., the allocation to which the constraint is attached).
+   */
+  public static class CardinalityConstraint extends AbstractConstraint {
+    private String scope;
+    private int minCardinality;
+    private int maxCardinality;
+
+    public CardinalityConstraint(String scope, int minCardinality,
+        int maxCardinality) {
+      this.scope = scope;
+      this.minCardinality = minCardinality;
+      this.maxCardinality = maxCardinality;
+    }
+
+    /**
+     * Get the scope of the constraint.
+     *
+     * @return the scope of the constraint
+     */
+    public String getScope() {
+      return scope;
+    }
+
+    /**
+     * Get the minimum cardinality of the constraint.
+     *
+     * @return the minimum cardinality of the constraint
+     */
+    public int getMinCardinality() {
+      return minCardinality;
+    }
+
+    /**
+     * Get the maximum cardinality of the constraint.
+     *
+     * @return the maximum cardinality of the constraint
+     */
+    public int getMaxCardinality() {
+      return maxCardinality;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Class that represents composite constraints, which comprise other
+   * constraints, forming a constraint tree.
+   *
+   * @param <R> the type of constraints that are used as children of the
+   *          specific composite constraint
+   */
+  public abstract static class CompositeConstraint<R extends Visitable>
+      extends AbstractConstraint {
+
+    /**
+     * Get the children of this composite constraint.
+     *
+     * @return the children of the composite constraint
+     */
+    public abstract List<R> getChildren();
+  }
+
+  /**
+   * Class that represents a composite constraint that is a conjunction of other
+   * constraints.
+   */
+  public static class And extends CompositeConstraint<AbstractConstraint> {
+    private List<AbstractConstraint> children;
+
+    public And(List<AbstractConstraint> children) {
+      this.children = children;
+    }
+
+    public And(AbstractConstraint... children) {
+      this(Arrays.asList(children));
+    }
+
+    @Override
+    public List<AbstractConstraint> getChildren() {
+      return children;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Class that represents a composite constraint that is a disjunction of other
+   * constraints.
+   */
+  public static class Or extends CompositeConstraint<AbstractConstraint> {
+    private List<AbstractConstraint> children;
+
+    public Or(List<AbstractConstraint> children) {
+      this.children = children;
+    }
+
+    public Or(AbstractConstraint... children) {
+      this(Arrays.asList(children));
+    }
+
+    @Override
+    public List<AbstractConstraint> getChildren() {
+      return children;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Class that represents a composite constraint that comprises a list of timed
+   * placement constraints (see {@link TimedPlacementConstraint}). The scheduler
+   * should try to satisfy first the first timed child constraint within the
+   * specified time window. If this is not possible, it should attempt to
+   * satisfy the second, and so on.
+   */
+  public static class DelayedOr
+      extends CompositeConstraint<TimedPlacementConstraint> {
+    private List<TimedPlacementConstraint> children = new ArrayList<>();
+
+    public DelayedOr(List<TimedPlacementConstraint> children) {
+      this.children = children;
+    }
+
+    public DelayedOr(TimedPlacementConstraint... children) {
+      this(Arrays.asList(children));
+    }
+
+    @Override
+    public List<TimedPlacementConstraint> getChildren() {
+      return children;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+
+  /**
+   * Represents a timed placement constraint that has to be satisfied within a
+   * time window.
+   */
+  public static class TimedPlacementConstraint implements Visitable {
+    /**
+     * The unit of scheduling delay.
+     */
+    public enum DelayUnit {
+      MILLISECONDS, OPPORTUNITIES
+    }
+
+    private AbstractConstraint constraint;
+    private long schedulingDelay;
+    private DelayUnit delayUnit;
+
+    public TimedPlacementConstraint(AbstractConstraint constraint,
+        long schedulingDelay, DelayUnit delayUnit) {
+      this.constraint = constraint;
+      this.schedulingDelay = schedulingDelay;
+      this.delayUnit = delayUnit;
+    }
+
+    public TimedPlacementConstraint(AbstractConstraint constraint,
+        long schedulingDelay) {
+      this(constraint, schedulingDelay, DelayUnit.MILLISECONDS);
+    }
+
+    public TimedPlacementConstraint(AbstractConstraint constraint) {
+      this(constraint, Long.MAX_VALUE, DelayUnit.MILLISECONDS);
+    }
+
+    /**
+     * Get the constraint that has to be satisfied within the time window.
+     *
+     * @return the constraint to be satisfied
+     */
+    public AbstractConstraint getConstraint() {
+      return constraint;
+    }
+
+    /**
+     * Sets the constraint that has to be satisfied within the time window.
+     *
+     * @param constraint the constraint to be satisfied
+     */
+    public void setConstraint(AbstractConstraint constraint) {
+      this.constraint = constraint;
+    }
+
+    /**
+     * Get the scheduling delay value that determines the time window within
+     * which the constraint has to be satisfied.
+     *
+     * @return the value of the scheduling delay
+     */
+    public long getSchedulingDelay() {
+      return schedulingDelay;
+    }
+
+    /**
+     * The unit of the scheduling delay.
+     *
+     * @return the unit of the delay
+     */
+    public DelayUnit getDelayUnit() {
+      return delayUnit;
+    }
+
+    @Override
+    public <T> T accept(Visitor<T> visitor) {
+      return visitor.visit(this);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/276a62d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
new file mode 100644
index 0000000..8e84280
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
@@ -0,0 +1,286 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.resource;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.DelayedOr;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression.TargetType;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint;
+
+/**
+ * This class contains various static methods for the applications to create
+ * placement constraints (see also {@link PlacementConstraint}).
+ */
+@Public
+@Unstable
+public final class PlacementConstraints {
+
+  // Suppresses default constructor, ensuring non-instantiability.
+  private PlacementConstraints() {
+  }
+
+  // Creation of simple constraints.
+
+  public static final String NODE = PlacementConstraint.NODE_SCOPE;
+  public static final String RACK = PlacementConstraint.RACK_SCOPE;
+
+  /**
+   * Creates a constraint that requires allocations to be placed on nodes that
+   * satisfy all target expressions within the given scope (e.g., node or rack).
+   *
+   * For example, {@code targetIn(RACK, allocationTag("hbase-m"))}, allows
+   * allocations on nodes that belong to a rack that has at least one tag with
+   * value "hbase-m".
+   *
+   * @param scope the scope within which the target expressions should be
+   *          satisfied
+   * @param targetExpressions the expressions that need to be satisfied within
+   *          the scope
+   * @return the resulting placement constraint
+   */
+  public static AbstractConstraint targetIn(String scope,
+      TargetExpression... targetExpressions) {
+    return new SingleConstraint(scope, 1, Integer.MAX_VALUE, targetExpressions);
+  }
+
+  /**
+   * Creates a constraint that requires allocations to be placed on nodes that
+   * belong to a scope (e.g., node or rack) that does not satisfy any of the
+   * target expressions.
+   *
+   * @param scope the scope within which the target expressions should not be
+   *          true
+   * @param targetExpressions the expressions that need to not be true within
+   *          the scope
+   * @return the resulting placement constraint
+   */
+  public static AbstractConstraint targetNotIn(String scope,
+      TargetExpression... targetExpressions) {
+    return new SingleConstraint(scope, 0, 0, targetExpressions);
+  }
+
+  /**
+   * Creates a constraint that restricts the number of allocations within a
+   * given scope (e.g., node or rack).
+   *
+   * For example, {@code cardinality(NODE, 3, 10)}, restricts the number of
+   * allocations per node to be no less than 3 and no more than 10.
+   *
+   * @param scope the scope of the constraint
+   * @param minCardinality determines the minimum number of allocations within
+   *          the scope
+   * @param maxCardinality determines the maximum number of allocations within
+   *          the scope
+   * @return the resulting placement constraint
+   */
+  public static AbstractConstraint cardinality(String scope, int minCardinality,
+      int maxCardinality) {
+    return new SingleConstraint(scope, minCardinality, maxCardinality,
+        PlacementTargets.self());
+  }
+
+  /**
+   * Similar to {@link #cardinality(String, int, int)}, but determines only the
+   * minimum cardinality (the maximum cardinality is unbound).
+   *
+   * @param scope the scope of the constraint
+   * @param minCardinality determines the minimum number of allocations within
+   *          the scope
+   * @return the resulting placement constraint
+   */
+  public static AbstractConstraint minCardinality(String scope,
+      int minCardinality) {
+    return cardinality(scope, minCardinality, Integer.MAX_VALUE);
+  }
+
+  /**
+   * Similar to {@link #cardinality(String, int, int)}, but determines only the
+   * maximum cardinality (the minimum can be as low as 0).
+   *
+   * @param scope the scope of the constraint
+   * @param maxCardinality determines the maximum number of allocations within
+   *          the scope
+   * @return the resulting placement constraint
+   */
+  public static AbstractConstraint maxCardinality(String scope,
+      int maxCardinality) {
+    return cardinality(scope, 0, maxCardinality);
+  }
+
+  /**
+   * This constraint generalizes the cardinality and target constraints.
+   *
+   * Consider a set of nodes N that belongs to the scope specified in the
+   * constraint. If the target expressions are satisfied at least minCardinality
+   * times and at most max-cardinality times in the node set N, then the
+   * constraint is satisfied.
+   *
+   * For example, {@code targetCardinality(RACK, 2, 10, allocationTag("zk"))},
+   * requires an allocation to be placed within a rack that has at least 2 and
+   * at most 10 other allocations with tag "zk".
+   *
+   * @param scope the scope of the constraint
+   * @param minCardinality the minimum number of times the target expressions
+   *          have to be satisfied with the given scope
+   * @param maxCardinality the maximum number of times the target expressions
+   *          have to be satisfied with the given scope
+   * @param targetExpressions the target expressions
+   * @return the resulting placement constraint
+   */
+  public static AbstractConstraint targetCardinality(String scope,
+      int minCardinality, int maxCardinality,
+      TargetExpression... targetExpressions) {
+    return new SingleConstraint(scope, minCardinality, maxCardinality,
+        targetExpressions);
+  }
+
+  // Creation of target expressions to be used in simple constraints.
+
+  /**
+   * Class with static methods for constructing target expressions to be used in
+   * placement constraints.
+   */
+  public static class PlacementTargets {
+
+    /**
+     * Constructs a target expression on a node attribute. It is satisfied if
+     * the specified node attribute has one of the specified values.
+     *
+     * @param attributeKey the name of the node attribute
+     * @param attributeValues the set of values that the attribute should take
+     *          values from
+     * @return the resulting expression on the node attribute
+     */
+    public static TargetExpression nodeAttribute(String attributeKey,
+        String... attributeValues) {
+      return new TargetExpression(TargetType.NODE_ATTRIBUTE, attributeKey,
+          attributeValues);
+    }
+
+    /**
+     * Constructs a target expression on an allocation tag. It is satisfied if
+     * the there are allocations with one of the given tags.
+     *
+     * @param allocationTags the set of tags that the attribute should take
+     *          values from
+     * @return the resulting expression on the allocation tags
+     */
+    public static TargetExpression allocationTag(String... allocationTags) {
+      return new TargetExpression(TargetType.ALLOCATION_TAG, null,
+          allocationTags);
+    }
+
+    /**
+     * The default target expression that uses as target the allocation that
+     * specifies the constraint.
+     *
+     * @return the self-target
+     */
+    public static TargetExpression self() {
+      return new TargetExpression(TargetType.SELF);
+    }
+  }
+
+  // Creation of compound constraints.
+
+  /**
+   * A conjunction of constraints.
+   *
+   * @param children the children constraints that should all be satisfied
+   * @return the resulting placement constraint
+   */
+  public static And and(AbstractConstraint... children) {
+    return new And(children);
+  }
+
+  /**
+   * A disjunction of constraints.
+   *
+   * @param children the children constraints, one of which should be satisfied
+   * @return the resulting placement constraint
+   */
+  public static Or or(AbstractConstraint... children) {
+    return new Or(children);
+  }
+
+  /**
+   * Creates a composite constraint that includes a list of timed placement
+   * constraints. The scheduler should try to satisfy first the first timed
+   * child constraint within the specified time window. If this is not possible,
+   * it should attempt to satisfy the second, and so on.
+   *
+   * @param children the timed children constraints
+   * @return the resulting composite constraint
+   */
+  public static DelayedOr delayedOr(TimedPlacementConstraint... children) {
+    return new DelayedOr(children);
+  }
+
+  // Creation of timed constraints to be used in a DELAYED_OR constraint.
+
+  /**
+   * Creates a placement constraint that has to be satisfied within a time
+   * window.
+   *
+   * @param constraint the placement constraint
+   * @param delay the length of the time window within which the constraint has
+   *          to be satisfied
+   * @param timeUnit the unit of time of the time window
+   * @return the resulting timed placement constraint
+   */
+  public static TimedPlacementConstraint timedClockConstraint(
+      AbstractConstraint constraint, long delay, TimeUnit timeUnit) {
+    return new TimedPlacementConstraint(constraint, timeUnit.toMillis(delay),
+        TimedPlacementConstraint.DelayUnit.MILLISECONDS);
+  }
+
+  /**
+   * Creates a placement constraint that has to be satisfied within a number of
+   * placement opportunities (invocations of the scheduler).
+   *
+   * @param constraint the placement constraint
+   * @param delay the number of scheduling opportunities within which the
+   *          constraint has to be satisfied
+   * @return the resulting timed placement constraint
+   */
+  public static TimedPlacementConstraint timedOpportunitiesConstraint(
+      AbstractConstraint constraint, long delay) {
+    return new TimedPlacementConstraint(constraint, delay,
+        TimedPlacementConstraint.DelayUnit.OPPORTUNITIES);
+  }
+
+  /**
+   * Creates a {@link PlacementConstraint} given a constraint expression.
+   *
+   * @param constraintExpr the constraint expression
+   * @return the placement constraint
+   */
+  public static PlacementConstraint build(AbstractConstraint constraintExpr) {
+    return constraintExpr.build();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/276a62d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
new file mode 100644
index 0000000..660dc02
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * API related to resources.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.api.resource;
+import org.apache.hadoop.classification.InterfaceAudience;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/276a62d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 3a9662b..968b75e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -578,6 +578,61 @@ enum SignalContainerCommandProto {
   FORCEFUL_SHUTDOWN = 3;
 }
 
+////////////////////////////////////////////////////////////////////////
+////// Placement constraints ///////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+
+message PlacementConstraintProto {
+  optional SimplePlacementConstraintProto simpleConstraint = 1;
+  optional CompositePlacementConstraintProto compositeConstraint = 2;
+}
+
+message SimplePlacementConstraintProto {
+  required string scope = 1;
+  repeated PlacementConstraintTargetProto targetExpressions = 2;
+  optional int32 minCardinality = 3;
+  optional int32 maxCardinality = 4;
+}
+
+message PlacementConstraintTargetProto {
+  enum TargetType {
+    NODE_ATTRIBUTE = 1;
+    ALLOCATION_TAG = 2;
+    SELF = 3;
+  }
+
+  required TargetType targetType = 1;
+  optional string targetKey = 2;
+  repeated string targetValues = 3;
+}
+
+message TimedPlacementConstraintProto {
+  enum DelayUnit {
+    MILLISECONDS = 1;
+    OPPORTUNITIES = 2;
+  }
+
+  required PlacementConstraintProto placementConstraint = 1;
+  required int64 schedulingDelay = 2;
+  optional DelayUnit delayUnit = 3 [ default = MILLISECONDS ];
+}
+
+message CompositePlacementConstraintProto {
+  enum CompositeType {
+    // All children constraints have to be satisfied.
+    AND = 1;
+    // One of the children constraints has to be satisfied.
+    OR = 2;
+    // Attempt to satisfy the first child constraint for delays[0] units (e.g.,
+    // millisec or heartbeats). If this fails, try to satisfy the second child
+    // constraint for delays[1] units and so on.
+    DELAYED_OR = 3;
+  }
+
+  required CompositeType compositeType = 1;
+  repeated PlacementConstraintProto childConstraints = 2;
+  repeated TimedPlacementConstraintProto timedChildConstraints = 3;
+}
 
 ////////////////////////////////////////////////////////////////////////
 ////// From reservation_protocol /////////////////////////////////////

http://git-wip-us.apache.org/repos/asf/hadoop/blob/276a62d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraints.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraints.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraints.java
new file mode 100644
index 0000000..e25d477
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraints.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.resource;
+
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.RACK;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.and;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.maxCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetNotIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.allocationTag;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.nodeAttribute;
+
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression.TargetType;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test class for the various static methods in
+ * {@link org.apache.hadoop.yarn.api.resource.PlacementConstraints}.
+ */
+public class TestPlacementConstraints {
+
+  @Test
+  public void testNodeAffinityToTag() {
+    AbstractConstraint constraintExpr =
+        targetIn(NODE, allocationTag("hbase-m"));
+
+    SingleConstraint sConstraint = (SingleConstraint) constraintExpr;
+    Assert.assertEquals(NODE, sConstraint.getScope());
+    Assert.assertEquals(1, sConstraint.getMinCardinality());
+    Assert.assertEquals(Integer.MAX_VALUE, sConstraint.getMaxCardinality());
+
+    Assert.assertEquals(1, sConstraint.getTargetExpressions().size());
+    TargetExpression tExpr =
+        sConstraint.getTargetExpressions().iterator().next();
+    Assert.assertNull(tExpr.getTargetKey());
+    Assert.assertEquals(TargetType.ALLOCATION_TAG, tExpr.getTargetType());
+    Assert.assertEquals(1, tExpr.getTargetValues().size());
+    Assert.assertEquals("hbase-m", tExpr.getTargetValues().iterator().next());
+
+    PlacementConstraint constraint = PlacementConstraints.build(constraintExpr);
+    Assert.assertNotNull(constraint.getConstraintExpr());
+  }
+
+  @Test
+  public void testNodeAntiAffinityToAttribute() {
+    AbstractConstraint constraintExpr =
+        targetNotIn(NODE, nodeAttribute("java", "1.8"));
+
+    SingleConstraint sConstraint = (SingleConstraint) constraintExpr;
+    Assert.assertEquals(NODE, sConstraint.getScope());
+    Assert.assertEquals(0, sConstraint.getMinCardinality());
+    Assert.assertEquals(0, sConstraint.getMaxCardinality());
+
+    Assert.assertEquals(1, sConstraint.getTargetExpressions().size());
+    TargetExpression tExpr =
+        sConstraint.getTargetExpressions().iterator().next();
+    Assert.assertEquals("java", tExpr.getTargetKey());
+    Assert.assertEquals(TargetType.NODE_ATTRIBUTE, tExpr.getTargetType());
+    Assert.assertEquals(1, tExpr.getTargetValues().size());
+    Assert.assertEquals("1.8", tExpr.getTargetValues().iterator().next());
+  }
+
+  @Test
+  public void testAndConstraint() {
+    AbstractConstraint constraintExpr =
+        and(targetIn(RACK, allocationTag("spark")), maxCardinality(NODE, 3),
+            targetCardinality(RACK, 2, 10, allocationTag("zk")));
+
+    And andExpr = (And) constraintExpr;
+    Assert.assertEquals(3, andExpr.getChildren().size());
+    SingleConstraint sConstr = (SingleConstraint) andExpr.getChildren().get(0);
+    TargetExpression tExpr = sConstr.getTargetExpressions().iterator().next();
+    Assert.assertEquals("spark", tExpr.getTargetValues().iterator().next());
+
+    sConstr = (SingleConstraint) andExpr.getChildren().get(1);
+    Assert.assertEquals(0, sConstr.getMinCardinality());
+    Assert.assertEquals(3, sConstr.getMaxCardinality());
+
+    sConstr = (SingleConstraint) andExpr.getChildren().get(2);
+    Assert.assertEquals(2, sConstr.getMinCardinality());
+    Assert.assertEquals(10, sConstr.getMaxCardinality());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/276a62d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintFromProtoConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintFromProtoConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintFromProtoConverter.java
new file mode 100644
index 0000000..926b6fa
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintFromProtoConverter.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.pb;
+
+import static org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto.CompositeType.AND;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.DelayedOr;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintTargetProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.SimplePlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.TimedPlacementConstraintProto;
+
+/**
+ * {@code PlacementConstraintFromProtoConverter} generates an
+ * {@link PlacementConstraint.AbstractConstraint} given a
+ * {@link PlacementConstraintProto}.
+ */
+@Private
+public class PlacementConstraintFromProtoConverter {
+
+  private PlacementConstraintProto constraintProto;
+
+  public PlacementConstraintFromProtoConverter(
+      PlacementConstraintProto constraintProto) {
+    this.constraintProto = constraintProto;
+  }
+
+  public PlacementConstraint convert() {
+    return new PlacementConstraint(convert(constraintProto));
+  }
+
+  private AbstractConstraint convert(PlacementConstraintProto proto) {
+    return proto.hasSimpleConstraint() ? convert(proto.getSimpleConstraint())
+        : convert(proto.getCompositeConstraint());
+  }
+
+  private SingleConstraint convert(SimplePlacementConstraintProto proto) {
+    Set<TargetExpression> targets = new HashSet<>();
+    for (PlacementConstraintTargetProto tp : proto.getTargetExpressionsList()) {
+      targets.add(convert(tp));
+    }
+
+    return new SingleConstraint(proto.getScope(), proto.getMinCardinality(),
+        proto.getMaxCardinality(), targets);
+  }
+
+  private TargetExpression convert(PlacementConstraintTargetProto proto) {
+    return new TargetExpression(
+        ProtoUtils.convertFromProtoFormat(proto.getTargetType()),
+        proto.hasTargetKey() ? proto.getTargetKey() : null,
+        new HashSet<>(proto.getTargetValuesList()));
+  }
+
+  private AbstractConstraint convert(CompositePlacementConstraintProto proto) {
+    switch (proto.getCompositeType()) {
+    case AND:
+    case OR:
+      List<AbstractConstraint> children = new ArrayList<>();
+      for (PlacementConstraintProto cp : proto.getChildConstraintsList()) {
+        children.add(convert(cp));
+      }
+      return (proto.getCompositeType() == AND) ? new And(children)
+          : new Or(children);
+    case DELAYED_OR:
+      List<TimedPlacementConstraint> tChildren = new ArrayList<>();
+      for (TimedPlacementConstraintProto cp : proto
+          .getTimedChildConstraintsList()) {
+        tChildren.add(convert(cp));
+      }
+      return new DelayedOr(tChildren);
+    default:
+      throw new YarnRuntimeException(
+          "Encountered unexpected type of composite constraint.");
+    }
+  }
+
+  private TimedPlacementConstraint convert(
+      TimedPlacementConstraintProto proto) {
+    AbstractConstraint pConstraint = convert(proto.getPlacementConstraint());
+
+    return new TimedPlacementConstraint(pConstraint, proto.getSchedulingDelay(),
+        ProtoUtils.convertFromProtoFormat(proto.getDelayUnit()));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/276a62d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintToProtoConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintToProtoConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintToProtoConverter.java
new file mode 100644
index 0000000..7816e18
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintToProtoConverter.java
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.CardinalityConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.CompositeConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.DelayedOr;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto.CompositeType;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintTargetProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.SimplePlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.TimedPlacementConstraintProto;
+
+import com.google.protobuf.GeneratedMessage;
+
+/**
+ * {@code PlacementConstraintToProtoConverter} generates a
+ * {@link PlacementConstraintProto} given a
+ * {@link PlacementConstraint.AbstractConstraint}.
+ */
+@Private
+public class PlacementConstraintToProtoConverter
+    implements PlacementConstraint.Visitor<GeneratedMessage> {
+
+  private PlacementConstraint placementConstraint;
+
+  public PlacementConstraintToProtoConverter(
+      PlacementConstraint placementConstraint) {
+    this.placementConstraint = placementConstraint;
+  }
+
+  public PlacementConstraintProto convert() {
+    return (PlacementConstraintProto) placementConstraint.getConstraintExpr()
+        .accept(this);
+  }
+
+  @Override
+  public GeneratedMessage visit(SingleConstraint constraint) {
+    SimplePlacementConstraintProto.Builder sb =
+        SimplePlacementConstraintProto.newBuilder();
+
+    if (constraint.getScope() != null) {
+      sb.setScope(constraint.getScope());
+    }
+    sb.setMinCardinality(constraint.getMinCardinality());
+    sb.setMaxCardinality(constraint.getMaxCardinality());
+    if (constraint.getTargetExpressions() != null) {
+      for (TargetExpression target : constraint.getTargetExpressions()) {
+        sb.addTargetExpressions(
+            (PlacementConstraintTargetProto) target.accept(this));
+      }
+
+    }
+    SimplePlacementConstraintProto sProto = sb.build();
+
+    // Wrap around PlacementConstraintProto object.
+    PlacementConstraintProto.Builder pb = PlacementConstraintProto.newBuilder();
+    pb.setSimpleConstraint(sProto);
+    return pb.build();
+  }
+
+  @Override
+  public GeneratedMessage visit(TargetExpression target) {
+    PlacementConstraintTargetProto.Builder tb =
+        PlacementConstraintTargetProto.newBuilder();
+
+    tb.setTargetType(ProtoUtils.convertToProtoFormat(target.getTargetType()));
+    if (target.getTargetKey() != null) {
+      tb.setTargetKey(target.getTargetKey());
+    }
+    if (target.getTargetValues() != null) {
+      tb.addAllTargetValues(target.getTargetValues());
+    }
+    return tb.build();
+  }
+
+  @Override
+  public GeneratedMessage visit(TargetConstraint constraint) {
+    throw new YarnRuntimeException("Unexpected TargetConstraint found.");
+  }
+
+  @Override
+  public GeneratedMessage visit(CardinalityConstraint constraint) {
+    throw new YarnRuntimeException("Unexpected CardinalityConstraint found.");
+  }
+
+  private GeneratedMessage visitAndOr(
+      CompositeConstraint<AbstractConstraint> composite, CompositeType type) {
+    CompositePlacementConstraintProto.Builder cb =
+        CompositePlacementConstraintProto.newBuilder();
+
+    cb.setCompositeType(type);
+
+    for (AbstractConstraint c : composite.getChildren()) {
+      cb.addChildConstraints((PlacementConstraintProto) c.accept(this));
+    }
+    CompositePlacementConstraintProto cProto = cb.build();
+
+    // Wrap around PlacementConstraintProto object.
+    PlacementConstraintProto.Builder pb = PlacementConstraintProto.newBuilder();
+    pb.setCompositeConstraint(cProto);
+    return pb.build();
+  }
+
+  @Override
+  public GeneratedMessage visit(And constraint) {
+    return visitAndOr(constraint, CompositeType.AND);
+  }
+
+  @Override
+  public GeneratedMessage visit(Or constraint) {
+    return visitAndOr(constraint, CompositeType.OR);
+  }
+
+  @Override
+  public GeneratedMessage visit(DelayedOr constraint) {
+    CompositePlacementConstraintProto.Builder cb =
+        CompositePlacementConstraintProto.newBuilder();
+
+    cb.setCompositeType(CompositeType.DELAYED_OR);
+
+    for (TimedPlacementConstraint c : constraint.getChildren()) {
+      cb.addTimedChildConstraints(
+          (TimedPlacementConstraintProto) c.accept(this));
+    }
+    CompositePlacementConstraintProto cProto = cb.build();
+
+    // Wrap around PlacementConstraintProto object.
+    PlacementConstraintProto.Builder pb = PlacementConstraintProto.newBuilder();
+    pb.setCompositeConstraint(cProto);
+    return pb.build();
+  }
+
+  @Override
+  public GeneratedMessage visit(TimedPlacementConstraint constraint) {
+    TimedPlacementConstraintProto.Builder tb =
+        TimedPlacementConstraintProto.newBuilder();
+
+    tb.setDelayUnit(ProtoUtils.convertToProtoFormat(constraint.getDelayUnit()));
+    tb.setSchedulingDelay(constraint.getSchedulingDelay());
+    tb.setPlacementConstraint(
+        (PlacementConstraintProto) constraint.getConstraint().accept(this));
+
+    return tb.build();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/276a62d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/package-info.java
new file mode 100644
index 0000000..18da80f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * API related to protobuf objects that are not backed by PBImpl classes.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.api.pb;
+import org.apache.hadoop.classification.InterfaceAudience;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/276a62d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
index f3e665b..168d864 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
@@ -56,6 +56,8 @@ import org.apache.hadoop.yarn.api.records.UpdateContainerError;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint;
 import org.apache.hadoop.yarn.proto.YarnProtos;
 import org.apache.hadoop.yarn.proto.YarnProtos.AMCommandProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAccessTypeProto;
@@ -70,10 +72,12 @@ import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceVisibilityProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.LogAggregationStatusProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.NodeStateProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintTargetProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.QueueACLProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.QueueStateProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ReservationRequestInterpreterProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.TimedPlacementConstraintProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationAttemptStateProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerRetryPolicyProto;
@@ -507,6 +511,29 @@ public class ProtoUtils {
     }
     return ret;
   }
+
+  public static PlacementConstraintTargetProto.TargetType convertToProtoFormat(
+          TargetExpression.TargetType t) {
+    return PlacementConstraintTargetProto.TargetType.valueOf(t.name());
+  }
+
+  public static TargetExpression.TargetType convertFromProtoFormat(
+          PlacementConstraintTargetProto.TargetType t) {
+    return TargetExpression.TargetType.valueOf(t.name());
+  }
+
+  /*
+   * TimedPlacementConstraint.DelayUnit
+   */
+  public static TimedPlacementConstraintProto.DelayUnit convertToProtoFormat(
+          TimedPlacementConstraint.DelayUnit u) {
+    return TimedPlacementConstraintProto.DelayUnit.valueOf(u.name());
+  }
+
+  public static TimedPlacementConstraint.DelayUnit convertFromProtoFormat(
+          TimedPlacementConstraintProto.DelayUnit u) {
+    return TimedPlacementConstraint.DelayUnit.valueOf(u.name());
+  }
 }
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/276a62d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
new file mode 100644
index 0000000..e9eda6f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraintTransformations.java
@@ -0,0 +1,209 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.resource;
+
+import java.util.ListIterator;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.CardinalityConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.CompositeConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.DelayedOr;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint.TargetOperator;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+
+/**
+ * This class contains inner classes that define transformation on a
+ * {@link PlacementConstraint} expression.
+ */
+@Private
+public class PlacementConstraintTransformations {
+
+  /**
+   * The default implementation of the {@link PlacementConstraint.Visitor} that
+   * does a traversal of the constraint tree, performing no action for the lead
+   * constraints.
+   */
+  public static class AbstractTransformer
+      implements PlacementConstraint.Visitor<AbstractConstraint> {
+
+    private PlacementConstraint placementConstraint;
+
+    public AbstractTransformer(PlacementConstraint placementConstraint) {
+      this.placementConstraint = placementConstraint;
+    }
+
+    /**
+     * This method performs the transformation of the
+     * {@link #placementConstraint}.
+     *
+     * @return the transformed placement constraint.
+     */
+    public PlacementConstraint transform() {
+      AbstractConstraint constraintExpr =
+          placementConstraint.getConstraintExpr();
+
+      // Visit the constraint tree to perform the transformation.
+      constraintExpr = constraintExpr.accept(this);
+
+      return new PlacementConstraint(constraintExpr);
+    }
+
+    @Override
+    public AbstractConstraint visit(SingleConstraint constraint) {
+      // Do nothing.
+      return constraint;
+    }
+
+    @Override
+    public AbstractConstraint visit(TargetExpression expression) {
+      // Do nothing.
+      return null;
+    }
+
+    @Override
+    public AbstractConstraint visit(TargetConstraint constraint) {
+      // Do nothing.
+      return constraint;
+    }
+
+    @Override
+    public AbstractConstraint visit(CardinalityConstraint constraint) {
+      // Do nothing.
+      return constraint;
+    }
+
+    private AbstractConstraint visitAndOr(
+        CompositeConstraint<AbstractConstraint> constraint) {
+      for (ListIterator<AbstractConstraint> iter =
+          constraint.getChildren().listIterator(); iter.hasNext();) {
+        AbstractConstraint child = iter.next();
+        child = child.accept(this);
+        iter.set(child);
+      }
+      return constraint;
+    }
+
+    @Override
+    public AbstractConstraint visit(And constraint) {
+      return visitAndOr(constraint);
+    }
+
+    @Override
+    public AbstractConstraint visit(Or constraint) {
+      return visitAndOr(constraint);
+    }
+
+    @Override
+    public AbstractConstraint visit(DelayedOr constraint) {
+      constraint.getChildren().forEach(
+          child -> child.setConstraint(child.getConstraint().accept(this)));
+      return constraint;
+    }
+
+    @Override
+    public AbstractConstraint visit(TimedPlacementConstraint constraint) {
+      // Do nothing.
+      return null;
+    }
+  }
+
+  /**
+   * Visits a {@link PlacementConstraint} tree and substitutes each
+   * {@link TargetConstraint} and {@link CardinalityConstraint} with an
+   * equivalent {@link SingleConstraint}.
+   */
+  public static class SingleConstraintTransformer extends AbstractTransformer {
+
+    public SingleConstraintTransformer(PlacementConstraint constraint) {
+      super(constraint);
+    }
+
+    @Override
+    public AbstractConstraint visit(TargetConstraint constraint) {
+      AbstractConstraint newConstraint;
+      if (constraint.getOp() == TargetOperator.IN) {
+        newConstraint = new SingleConstraint(constraint.getScope(), 1,
+            Integer.MAX_VALUE, constraint.getTargetExpressions());
+      } else if (constraint.getOp() == TargetOperator.NOT_IN) {
+        newConstraint = new SingleConstraint(constraint.getScope(), 0, 0,
+            constraint.getTargetExpressions());
+      } else {
+        throw new YarnRuntimeException(
+            "Encountered unexpected type of constraint target operator: "
+                + constraint.getOp());
+      }
+      return newConstraint;
+    }
+
+    @Override
+    public AbstractConstraint visit(CardinalityConstraint constraint) {
+      return new SingleConstraint(constraint.getScope(),
+          constraint.getMinCardinality(), constraint.getMaxCardinality(),
+          new TargetExpression(TargetExpression.TargetType.SELF));
+    }
+  }
+
+  /**
+   * Visits a {@link PlacementConstraint} tree and, whenever possible,
+   * substitutes each {@link SingleConstraint} with a {@link TargetConstraint}
+   * or a {@link CardinalityConstraint}. When such a substitution is not
+   * possible, we keep the original {@link SingleConstraint}.
+   */
+  public static class SpecializedConstraintTransformer
+      extends AbstractTransformer {
+
+    public SpecializedConstraintTransformer(PlacementConstraint constraint) {
+      super(constraint);
+    }
+
+    @Override
+    public AbstractConstraint visit(SingleConstraint constraint) {
+      AbstractConstraint transformedConstraint = constraint;
+      // Check if it is a cardinality constraint.
+      if (constraint.getTargetExpressions().size() == 1) {
+        TargetExpression targetExpr =
+            constraint.getTargetExpressions().iterator().next();
+        if (targetExpr.getTargetType() == TargetExpression.TargetType.SELF) {
+          transformedConstraint = new CardinalityConstraint(
+              constraint.getScope(), constraint.getMinCardinality(),
+              constraint.getMaxCardinality());
+        }
+      }
+      // Check if it is a target constraint.
+      if (constraint.getMinCardinality() == 1
+          && constraint.getMaxCardinality() == Integer.MAX_VALUE) {
+        transformedConstraint = new TargetConstraint(TargetOperator.IN,
+            constraint.getScope(), constraint.getTargetExpressions());
+      } else if (constraint.getMinCardinality() == 0
+          && constraint.getMaxCardinality() == 0) {
+        transformedConstraint = new TargetConstraint(TargetOperator.NOT_IN,
+            constraint.getScope(), constraint.getTargetExpressions());
+      }
+
+      return transformedConstraint;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/276a62d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
new file mode 100644
index 0000000..660dc02
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/resource/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * API related to resources.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.api.resource;
+import org.apache.hadoop.classification.InterfaceAudience;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/276a62d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPlacementConstraintPBConversion.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPlacementConstraintPBConversion.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPlacementConstraintPBConversion.java
new file mode 100644
index 0000000..bd245e2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPlacementConstraintPBConversion.java
@@ -0,0 +1,195 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api;
+
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.RACK;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.cardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.maxCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.or;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.allocationTag;
+
+import java.util.Iterator;
+
+import org.apache.hadoop.yarn.api.pb.PlacementConstraintFromProtoConverter;
+import org.apache.hadoop.yarn.api.pb.PlacementConstraintToProtoConverter;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto.CompositeType;
+import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.SimplePlacementConstraintProto;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test class for {@link PlacementConstraintToProtoConverter} and
+ * {@link PlacementConstraintFromProtoConverter}.
+ */
+public class TestPlacementConstraintPBConversion {
+
+  @Test
+  public void testTargetConstraintProtoConverter() {
+    AbstractConstraint sConstraintExpr =
+        targetIn(NODE, allocationTag("hbase-m"));
+    Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
+    SingleConstraint single = (SingleConstraint) sConstraintExpr;
+    PlacementConstraint sConstraint =
+        PlacementConstraints.build(sConstraintExpr);
+
+    // Convert to proto.
+    PlacementConstraintToProtoConverter toProtoConverter =
+        new PlacementConstraintToProtoConverter(sConstraint);
+    PlacementConstraintProto protoConstraint = toProtoConverter.convert();
+
+    Assert.assertTrue(protoConstraint.hasSimpleConstraint());
+    Assert.assertFalse(protoConstraint.hasCompositeConstraint());
+    SimplePlacementConstraintProto sProto =
+        protoConstraint.getSimpleConstraint();
+    Assert.assertEquals(single.getScope(), sProto.getScope());
+    Assert.assertEquals(single.getMinCardinality(), sProto.getMinCardinality());
+    Assert.assertEquals(single.getMaxCardinality(), sProto.getMaxCardinality());
+    Assert.assertEquals(single.getTargetExpressions().size(),
+        sProto.getTargetExpressionsList().size());
+
+    // Convert from proto.
+    PlacementConstraintFromProtoConverter fromProtoConverter =
+        new PlacementConstraintFromProtoConverter(protoConstraint);
+    PlacementConstraint newConstraint = fromProtoConverter.convert();
+
+    AbstractConstraint newConstraintExpr = newConstraint.getConstraintExpr();
+    Assert.assertTrue(newConstraintExpr instanceof SingleConstraint);
+    SingleConstraint newSingle = (SingleConstraint) newConstraintExpr;
+    Assert.assertEquals(single.getScope(), newSingle.getScope());
+    Assert.assertEquals(single.getMinCardinality(),
+        newSingle.getMinCardinality());
+    Assert.assertEquals(single.getMaxCardinality(),
+        newSingle.getMaxCardinality());
+    Assert.assertEquals(single.getTargetExpressions(),
+        newSingle.getTargetExpressions());
+  }
+
+  @Test
+  public void testCardinalityConstraintProtoConverter() {
+    AbstractConstraint sConstraintExpr = cardinality(RACK, 3, 10);
+    Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
+    SingleConstraint single = (SingleConstraint) sConstraintExpr;
+    PlacementConstraint sConstraint =
+        PlacementConstraints.build(sConstraintExpr);
+
+    // Convert to proto.
+    PlacementConstraintToProtoConverter toProtoConverter =
+        new PlacementConstraintToProtoConverter(sConstraint);
+    PlacementConstraintProto protoConstraint = toProtoConverter.convert();
+
+    compareSimpleConstraintToProto(single, protoConstraint);
+
+    // Convert from proto.
+    PlacementConstraintFromProtoConverter fromProtoConverter =
+        new PlacementConstraintFromProtoConverter(protoConstraint);
+    PlacementConstraint newConstraint = fromProtoConverter.convert();
+
+    AbstractConstraint newConstraintExpr = newConstraint.getConstraintExpr();
+    Assert.assertTrue(newConstraintExpr instanceof SingleConstraint);
+    SingleConstraint newSingle = (SingleConstraint) newConstraintExpr;
+    compareSimpleConstraints(single, newSingle);
+  }
+
+  @Test
+  public void testCompositeConstraintProtoConverter() {
+    AbstractConstraint constraintExpr =
+        or(targetIn(RACK, allocationTag("spark")), maxCardinality(NODE, 3),
+            targetCardinality(RACK, 2, 10, allocationTag("zk")));
+    Assert.assertTrue(constraintExpr instanceof Or);
+    PlacementConstraint constraint = PlacementConstraints.build(constraintExpr);
+    Or orExpr = (Or) constraintExpr;
+
+    // Convert to proto.
+    PlacementConstraintToProtoConverter toProtoConverter =
+        new PlacementConstraintToProtoConverter(constraint);
+    PlacementConstraintProto protoConstraint = toProtoConverter.convert();
+
+    Assert.assertFalse(protoConstraint.hasSimpleConstraint());
+    Assert.assertTrue(protoConstraint.hasCompositeConstraint());
+    CompositePlacementConstraintProto cProto =
+        protoConstraint.getCompositeConstraint();
+
+    Assert.assertEquals(CompositeType.OR, cProto.getCompositeType());
+    Assert.assertEquals(3, cProto.getChildConstraintsCount());
+    Assert.assertEquals(0, cProto.getTimedChildConstraintsCount());
+    Iterator<AbstractConstraint> orChildren = orExpr.getChildren().iterator();
+    Iterator<PlacementConstraintProto> orProtoChildren =
+        cProto.getChildConstraintsList().iterator();
+    while (orChildren.hasNext() && orProtoChildren.hasNext()) {
+      AbstractConstraint orChild = orChildren.next();
+      PlacementConstraintProto orProtoChild = orProtoChildren.next();
+      compareSimpleConstraintToProto((SingleConstraint) orChild, orProtoChild);
+    }
+
+    // Convert from proto.
+    PlacementConstraintFromProtoConverter fromProtoConverter =
+        new PlacementConstraintFromProtoConverter(protoConstraint);
+    PlacementConstraint newConstraint = fromProtoConverter.convert();
+
+    AbstractConstraint newConstraintExpr = newConstraint.getConstraintExpr();
+    Assert.assertTrue(newConstraintExpr instanceof Or);
+    Or newOrExpr = (Or) newConstraintExpr;
+    Assert.assertEquals(3, newOrExpr.getChildren().size());
+    orChildren = orExpr.getChildren().iterator();
+    Iterator<AbstractConstraint> newOrChildren =
+        newOrExpr.getChildren().iterator();
+    while (orChildren.hasNext() && newOrChildren.hasNext()) {
+      AbstractConstraint orChild = orChildren.next();
+      AbstractConstraint newOrChild = newOrChildren.next();
+      compareSimpleConstraints((SingleConstraint) orChild,
+          (SingleConstraint) newOrChild);
+    }
+  }
+
+  private void compareSimpleConstraintToProto(SingleConstraint constraint,
+      PlacementConstraintProto proto) {
+    Assert.assertTrue(proto.hasSimpleConstraint());
+    Assert.assertFalse(proto.hasCompositeConstraint());
+    SimplePlacementConstraintProto sProto = proto.getSimpleConstraint();
+    Assert.assertEquals(constraint.getScope(), sProto.getScope());
+    Assert.assertEquals(constraint.getMinCardinality(),
+        sProto.getMinCardinality());
+    Assert.assertEquals(constraint.getMaxCardinality(),
+        sProto.getMaxCardinality());
+    Assert.assertEquals(constraint.getTargetExpressions().size(),
+        sProto.getTargetExpressionsList().size());
+  }
+
+  private void compareSimpleConstraints(SingleConstraint single,
+      SingleConstraint newSingle) {
+    Assert.assertEquals(single.getScope(), newSingle.getScope());
+    Assert.assertEquals(single.getMinCardinality(),
+        newSingle.getMinCardinality());
+    Assert.assertEquals(single.getMaxCardinality(),
+        newSingle.getMaxCardinality());
+    Assert.assertEquals(single.getTargetExpressions(),
+        newSingle.getTargetExpressions());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/276a62d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintTransformations.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintTransformations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintTransformations.java
new file mode 100644
index 0000000..1763735
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintTransformations.java
@@ -0,0 +1,183 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.resource;
+
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.NODE;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.RACK;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.cardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.maxCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.or;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetCardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.allocationTag;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.CardinalityConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetConstraint.TargetOperator;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraintTransformations.SingleConstraintTransformer;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraintTransformations.SpecializedConstraintTransformer;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test class for {@link PlacementConstraintTransformations}.
+ */
+public class TestPlacementConstraintTransformations {
+
+  @Test
+  public void testTargetConstraint() {
+    AbstractConstraint sConstraintExpr =
+        targetIn(NODE, allocationTag("hbase-m"));
+    Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
+    PlacementConstraint sConstraint =
+        PlacementConstraints.build(sConstraintExpr);
+
+    // Transform from SimpleConstraint to specialized TargetConstraint
+    SpecializedConstraintTransformer specTransformer =
+        new SpecializedConstraintTransformer(sConstraint);
+    PlacementConstraint tConstraint = specTransformer.transform();
+
+    AbstractConstraint tConstraintExpr = tConstraint.getConstraintExpr();
+    Assert.assertTrue(tConstraintExpr instanceof TargetConstraint);
+
+    SingleConstraint single = (SingleConstraint) sConstraintExpr;
+    TargetConstraint target = (TargetConstraint) tConstraintExpr;
+    Assert.assertEquals(single.getScope(), target.getScope());
+    Assert.assertEquals(TargetOperator.IN, target.getOp());
+    Assert.assertEquals(single.getTargetExpressions(),
+        target.getTargetExpressions());
+
+    // Transform from specialized TargetConstraint to SimpleConstraint
+    SingleConstraintTransformer singleTransformer =
+        new SingleConstraintTransformer(tConstraint);
+    sConstraint = singleTransformer.transform();
+
+    sConstraintExpr = sConstraint.getConstraintExpr();
+    Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
+
+    single = (SingleConstraint) sConstraintExpr;
+    Assert.assertEquals(target.getScope(), single.getScope());
+    Assert.assertEquals(1, single.getMinCardinality());
+    Assert.assertEquals(Integer.MAX_VALUE, single.getMaxCardinality());
+    Assert.assertEquals(single.getTargetExpressions(),
+        target.getTargetExpressions());
+  }
+
+  @Test
+  public void testCardinalityConstraint() {
+    AbstractConstraint sConstraintExpr = cardinality(RACK, 3, 10);
+    Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
+    PlacementConstraint sConstraint =
+        PlacementConstraints.build(sConstraintExpr);
+
+    // Transform from SimpleConstraint to specialized CardinalityConstraint
+    SpecializedConstraintTransformer specTransformer =
+        new SpecializedConstraintTransformer(sConstraint);
+    PlacementConstraint cConstraint = specTransformer.transform();
+
+    AbstractConstraint cConstraintExpr = cConstraint.getConstraintExpr();
+    Assert.assertTrue(cConstraintExpr instanceof CardinalityConstraint);
+
+    SingleConstraint single = (SingleConstraint) sConstraintExpr;
+    CardinalityConstraint cardinality = (CardinalityConstraint) cConstraintExpr;
+    Assert.assertEquals(single.getScope(), cardinality.getScope());
+    Assert.assertEquals(single.getMinCardinality(),
+        cardinality.getMinCardinality());
+    Assert.assertEquals(single.getMaxCardinality(),
+        cardinality.getMaxCardinality());
+
+    // Transform from specialized CardinalityConstraint to SimpleConstraint
+    SingleConstraintTransformer singleTransformer =
+        new SingleConstraintTransformer(cConstraint);
+    sConstraint = singleTransformer.transform();
+
+    sConstraintExpr = sConstraint.getConstraintExpr();
+    Assert.assertTrue(sConstraintExpr instanceof SingleConstraint);
+
+    single = (SingleConstraint) sConstraintExpr;
+    Assert.assertEquals(cardinality.getScope(), single.getScope());
+    Assert.assertEquals(cardinality.getMinCardinality(),
+        single.getMinCardinality());
+    Assert.assertEquals(cardinality.getMaxCardinality(),
+        single.getMaxCardinality());
+    Assert.assertEquals(new HashSet<>(Arrays.asList(PlacementTargets.self())),
+        single.getTargetExpressions());
+  }
+
+  @Test
+  public void testTargetCardinalityConstraint() {
+    AbstractConstraint constraintExpr =
+        targetCardinality(RACK, 3, 10, allocationTag("zk"));
+    Assert.assertTrue(constraintExpr instanceof SingleConstraint);
+    PlacementConstraint constraint = PlacementConstraints.build(constraintExpr);
+
+    // Apply transformation. Should be a no-op.
+    SpecializedConstraintTransformer specTransformer =
+        new SpecializedConstraintTransformer(constraint);
+    PlacementConstraint newConstraint = specTransformer.transform();
+
+    // The constraint expression should be the same.
+    Assert.assertEquals(constraintExpr, newConstraint.getConstraintExpr());
+  }
+
+  @Test
+  public void testCompositeConstraint() {
+    AbstractConstraint constraintExpr =
+        or(targetIn(RACK, allocationTag("spark")), maxCardinality(NODE, 3),
+            targetCardinality(RACK, 2, 10, allocationTag("zk")));
+    Assert.assertTrue(constraintExpr instanceof Or);
+    PlacementConstraint constraint = PlacementConstraints.build(constraintExpr);
+    Or orExpr = (Or) constraintExpr;
+    for (AbstractConstraint child : orExpr.getChildren()) {
+      Assert.assertTrue(child instanceof SingleConstraint);
+    }
+
+    // Apply transformation. Should transform target and cardinality constraints
+    // included in the composite constraint to specialized ones.
+    SpecializedConstraintTransformer specTransformer =
+        new SpecializedConstraintTransformer(constraint);
+    PlacementConstraint specConstraint = specTransformer.transform();
+
+    Or specOrExpr = (Or) specConstraint.getConstraintExpr();
+    List<AbstractConstraint> specChildren = specOrExpr.getChildren();
+    Assert.assertEquals(3, specChildren.size());
+    Assert.assertTrue(specChildren.get(0) instanceof TargetConstraint);
+    Assert.assertTrue(specChildren.get(1) instanceof CardinalityConstraint);
+    Assert.assertTrue(specChildren.get(2) instanceof SingleConstraint);
+
+    // Transform from specialized TargetConstraint to SimpleConstraint
+    SingleConstraintTransformer singleTransformer =
+        new SingleConstraintTransformer(specConstraint);
+    PlacementConstraint simConstraint = singleTransformer.transform();
+    Assert.assertTrue(constraintExpr instanceof Or);
+    Or simOrExpr = (Or) specConstraint.getConstraintExpr();
+    for (AbstractConstraint child : simOrExpr.getChildren()) {
+      Assert.assertTrue(child instanceof SingleConstraint);
+    }
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/49] hadoop git commit: Add 2.8.3 release jdiff files.

Posted by as...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f8caf5/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.8.3.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.8.3.xml b/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.8.3.xml
new file mode 100644
index 0000000..bd7e69c
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.8.3.xml
@@ -0,0 +1,38433 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Tue Dec 05 05:00:41 UTC 2017 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop Common 2.8.3"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-common-project/hadoop-common/target/hadoop-annotations.jar:/build/source/hadoop-common-project/hadoop-common/target/jdiff.jar -verbose -classpath /build/source/hadoop-common-project/hadoop-common/target/classes:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.8.3.jar:/usr/lib/jvm/java-7-openjdk-amd64/lib/tools.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/commons-net/commons-
 net/3.1/commons-net-3.1.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/maven/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/maven/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/maven/org/mortbay/jetty/jetty-sslengine/6.1.26/jetty-sslengine-6.1.26.jar:/maven/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/maven/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/maven/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/maven/javax/activation/activation/1.1/activation-1.1.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/maven/asm/asm/3.2/
 asm-3.2.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/maven/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/maven/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/maven/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/maven/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy
 -java/1.0.4.1/snappy-java-1.0.4.1.jar:/maven/org/apache/ant/ant/1.8.1/ant-1.8.1.jar:/maven/org/apache/ant/ant-launcher/1.8.1/ant-launcher-1.8.1.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.8.3.jar:/maven/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/maven/net/jcip/jcip-annotations/1.0/jcip-annotations-1.0.jar:/maven/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/maven/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/maven/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/maven/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/maven/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/maven/org/apache/directory/api/api-
 i18n/1.0.0-M20/api-i18n-1.0.0-M20.jar:/maven/org/apache/directory/api/api-ldap-model/1.0.0-M20/api-ldap-model-1.0.0-M20.jar:/maven/org/apache/mina/mina-core/2.0.0-M5/mina-core-2.0.0-M5.jar:/maven/net/sf/ehcache/ehcache-core/2.4.4/ehcache-core-2.4.4.jar:/maven/antlr/antlr/2.7.7/antlr-2.7.7.jar:/maven/org/apache/directory/api/api-asn1-ber/1.0.0-M20/api-asn1-ber-1.0.0-M20.jar:/maven/com/jcraft/jsch/0.1.54/jsch-0.1.54.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/htrace/htrace-core4/4.0.1-incubating/htrace-core4-4.0.1-incubating.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/maven/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar -sourcepath /build/source/hadoop-common-proje
 ct/hadoop-common/src/main/java -apidir /build/source/hadoop-common-project/hadoop-common/target/site/jdiff/xml -apiname Apache Hadoop Common 2.8.3 -->
+<package name="org.apache.hadoop">
+  <!-- start class org.apache.hadoop.HadoopIllegalArgumentException -->
+  <class name="HadoopIllegalArgumentException" extends="java.lang.IllegalArgumentException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="HadoopIllegalArgumentException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs exception with the specified detail message. 
+ @param message detailed message.]]>
+      </doc>
+    </constructor>
+    <doc>
+    <![CDATA[Indicates that a method has been passed illegal or invalid argument. This
+ exception is thrown instead of IllegalArgumentException to differentiate the
+ exception thrown in Hadoop implementation from the one thrown in JDK.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.HadoopIllegalArgumentException -->
+</package>
+<package name="org.apache.hadoop.conf">
+  <!-- start interface org.apache.hadoop.conf.Configurable -->
+  <interface name="Configurable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Set the configuration to be used by this object.]]>
+      </doc>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the configuration used by this object.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Something that may be configured with a {@link Configuration}.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.conf.Configurable -->
+  <!-- start class org.apache.hadoop.conf.Configuration -->
+  <class name="Configuration" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Iterable"/>
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[A new configuration.]]>
+      </doc>
+    </constructor>
+    <constructor name="Configuration" type="boolean"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[A new configuration where the behavior of reading from the default 
+ resources can be turned off.
+ 
+ If the parameter {@code loadDefaults} is false, the new instance
+ will not load resources from the default files. 
+ @param loadDefaults specifies whether to load from the default files]]>
+      </doc>
+    </constructor>
+    <constructor name="Configuration" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[A new configuration with the same settings cloned from another.
+ 
+ @param other the configuration from which to clone settings.]]>
+      </doc>
+    </constructor>
+    <method name="addDeprecations"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="deltas" type="org.apache.hadoop.conf.Configuration.DeprecationDelta[]"/>
+      <doc>
+      <![CDATA[Adds a set of deprecated keys to the global deprecations.
+
+ This method is lockless.  It works by means of creating a new
+ DeprecationContext based on the old one, and then atomically swapping in
+ the new context.  If someone else updated the context in between us reading
+ the old context and swapping in the new one, we try again until we win the
+ race.
+
+ @param deltas   The deprecations to add.]]>
+      </doc>
+    </method>
+    <method name="addDeprecation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="use {@link #addDeprecation(String key, String newKey,
+      String customMessage)} instead">
+      <param name="key" type="java.lang.String"/>
+      <param name="newKeys" type="java.lang.String[]"/>
+      <param name="customMessage" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Adds the deprecated key to the global deprecation map.
+ It does not override any existing entries in the deprecation map.
+ This is to be used only by the developers in order to add deprecation of
+ keys, and attempts to call this method after loading resources once,
+ would lead to <tt>UnsupportedOperationException</tt>
+ 
+ If a key is deprecated in favor of multiple keys, they are all treated as 
+ aliases of each other, and setting any one of them resets all the others 
+ to the new value.
+
+ If you have multiple deprecation entries to add, it is more efficient to
+ use #addDeprecations(DeprecationDelta[] deltas) instead.
+ 
+ @param key
+ @param newKeys
+ @param customMessage
+ @deprecated use {@link #addDeprecation(String key, String newKey,
+      String customMessage)} instead]]>
+      </doc>
+    </method>
+    <method name="addDeprecation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="newKey" type="java.lang.String"/>
+      <param name="customMessage" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Adds the deprecated key to the global deprecation map.
+ It does not override any existing entries in the deprecation map.
+ This is to be used only by the developers in order to add deprecation of
+ keys, and attempts to call this method after loading resources once,
+ would lead to <tt>UnsupportedOperationException</tt>
+ 
+ If you have multiple deprecation entries to add, it is more efficient to
+ use #addDeprecations(DeprecationDelta[] deltas) instead.
+
+ @param key
+ @param newKey
+ @param customMessage]]>
+      </doc>
+    </method>
+    <method name="addDeprecation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="use {@link #addDeprecation(String key, String newKey)} instead">
+      <param name="key" type="java.lang.String"/>
+      <param name="newKeys" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Adds the deprecated key to the global deprecation map when no custom
+ message is provided.
+ It does not override any existing entries in the deprecation map.
+ This is to be used only by the developers in order to add deprecation of
+ keys, and attempts to call this method after loading resources once,
+ would lead to <tt>UnsupportedOperationException</tt>
+ 
+ If a key is deprecated in favor of multiple keys, they are all treated as 
+ aliases of each other, and setting any one of them resets all the others 
+ to the new value.
+ 
+ If you have multiple deprecation entries to add, it is more efficient to
+ use #addDeprecations(DeprecationDelta[] deltas) instead.
+
+ @param key Key that is to be deprecated
+ @param newKeys list of keys that take up the values of deprecated key
+ @deprecated use {@link #addDeprecation(String key, String newKey)} instead]]>
+      </doc>
+    </method>
+    <method name="addDeprecation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="newKey" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Adds the deprecated key to the global deprecation map when no custom
+ message is provided.
+ It does not override any existing entries in the deprecation map.
+ This is to be used only by the developers in order to add deprecation of
+ keys, and attempts to call this method after loading resources once,
+ would lead to <tt>UnsupportedOperationException</tt>
+ 
+ If you have multiple deprecation entries to add, it is more efficient to
+ use #addDeprecations(DeprecationDelta[] deltas) instead.
+
+ @param key Key that is to be deprecated
+ @param newKey key that takes up the value of deprecated key]]>
+      </doc>
+    </method>
+    <method name="isDeprecated" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <doc>
+      <![CDATA[checks whether the given <code>key</code> is deprecated.
+ 
+ @param key the parameter which is to be checked for deprecation
+ @return <code>true</code> if the key is deprecated and 
+         <code>false</code> otherwise.]]>
+      </doc>
+    </method>
+    <method name="setDeprecatedProperties"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Sets all deprecated properties that are not currently set but have a
+ corresponding new property that is set. Useful for iterating the
+ properties when all deprecated properties for currently set properties
+ need to be present.]]>
+      </doc>
+    </method>
+    <method name="reloadExistingConfigurations"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Reload existing configuration instances.]]>
+      </doc>
+    </method>
+    <method name="addDefaultResource"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add a default resource. Resources are loaded in the order of the resources 
+ added.
+ @param name file name. File should be present in the classpath.]]>
+      </doc>
+    </method>
+    <method name="setRestrictSystemPropertiesDefault"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="boolean"/>
+    </method>
+    <method name="setRestrictSystemProperties"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="boolean"/>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add a configuration resource. 
+ 
+ The properties of this resource will override properties of previously 
+ added resources, unless they were marked <a href="#Final">final</a>. 
+ 
+ @param name resource to be added, the classpath is examined for a file 
+             with that name.]]>
+      </doc>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="restrictedParser" type="boolean"/>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <doc>
+      <![CDATA[Add a configuration resource. 
+ 
+ The properties of this resource will override properties of previously 
+ added resources, unless they were marked <a href="#Final">final</a>. 
+ 
+ @param url url of the resource to be added, the local filesystem is 
+            examined directly to find the resource, without referring to 
+            the classpath.]]>
+      </doc>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <param name="restrictedParser" type="boolean"/>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Add a configuration resource. 
+ 
+ The properties of this resource will override properties of previously 
+ added resources, unless they were marked <a href="#Final">final</a>. 
+ 
+ @param file file-path of resource to be added, the local filesystem is
+             examined directly to find the resource, without referring to 
+             the classpath.]]>
+      </doc>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <param name="restrictedParser" type="boolean"/>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <doc>
+      <![CDATA[Add a configuration resource. 
+ 
+ The properties of this resource will override properties of previously 
+ added resources, unless they were marked <a href="#Final">final</a>. 
+ 
+ WARNING: The contents of the InputStream will be cached, by this method. 
+ So use this sparingly because it does increase the memory consumption.
+ 
+ @param in InputStream to deserialize the object from. In will be read from
+ when a get or set is called next.  After it is read the stream will be
+ closed.]]>
+      </doc>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="restrictedParser" type="boolean"/>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add a configuration resource. 
+ 
+ The properties of this resource will override properties of previously 
+ added resources, unless they were marked <a href="#Final">final</a>. 
+ 
+ @param in InputStream to deserialize the object from.
+ @param name the name of the resource because InputStream.toString is not
+ very descriptive some times.]]>
+      </doc>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="restrictedParser" type="boolean"/>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param conf Configuration object from which to load properties]]>
+      </doc>
+    </method>
+    <method name="reloadConfiguration"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Reload configuration from previously added resources.
+
+ This method will clear all the configuration read from the added 
+ resources, and final parameters. This will make the resources to 
+ be read again before accessing the values. Values that are added
+ via set methods will overlay values read from the resources.]]>
+      </doc>
+    </method>
+    <method name="get" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property, <code>null</code> if
+ no such property exists. If the key is deprecated, it returns the value of
+ the first key which replaces the deprecated key and is not null.
+ 
+ Values are processed for <a href="#VariableExpansion">variable expansion</a> 
+ before being returned. 
+ 
+ @param name the property name, will be trimmed before get value.
+ @return the value of the <code>name</code> or its replacing property, 
+         or null if no such property exists.]]>
+      </doc>
+    </method>
+    <method name="setAllowNullValueProperties"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="boolean"/>
+      <doc>
+      <![CDATA[Set Configuration to allow keys without values during setup.  Intended
+ for use during testing.
+
+ @param val If true, will allow Configuration to store keys without values]]>
+      </doc>
+    </method>
+    <method name="setRestrictSystemProps"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="boolean"/>
+    </method>
+    <method name="onlyKeyExists" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Return existence of the <code>name</code> property, but only for
+ names which have no valid value, usually non-existent or commented
+ out in XML.
+
+ @param name the property name
+ @return true if the property <code>name</code> exists without value]]>
+      </doc>
+    </method>
+    <method name="getTrimmed" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a trimmed <code>String</code>, 
+ <code>null</code> if no such property exists. 
+ If the key is deprecated, it returns the value of
+ the first key which replaces the deprecated key and is not null
+ 
+ Values are processed for <a href="#VariableExpansion">variable expansion</a> 
+ before being returned. 
+ 
+ @param name the property name.
+ @return the value of the <code>name</code> or its replacing property, 
+         or null if no such property exists.]]>
+      </doc>
+    </method>
+    <method name="getTrimmed" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a trimmed <code>String</code>, 
+ <code>defaultValue</code> if no such property exists. 
+ See @{Configuration#getTrimmed} for more details.
+ 
+ @param name          the property name.
+ @param defaultValue  the property default value.
+ @return              the value of the <code>name</code> or defaultValue
+                      if it is not set.]]>
+      </doc>
+    </method>
+    <method name="getRaw" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property, without doing
+ <a href="#VariableExpansion">variable expansion</a>.If the key is 
+ deprecated, it returns the value of the first key which replaces 
+ the deprecated key and is not null.
+ 
+ @param name the property name.
+ @return the value of the <code>name</code> property or 
+         its replacing property and null if no such property exists.]]>
+      </doc>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <code>value</code> of the <code>name</code> property. If 
+ <code>name</code> is deprecated or there is a deprecated name associated to it,
+ it sets the value to both names. Name will be trimmed before put into
+ configuration.
+ 
+ @param name property name.
+ @param value property value.]]>
+      </doc>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="java.lang.String"/>
+      <param name="source" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <code>value</code> of the <code>name</code> property. If 
+ <code>name</code> is deprecated, it also sets the <code>value</code> to
+ the keys that replace the deprecated key. Name will be trimmed before put
+ into configuration.
+
+ @param name property name.
+ @param value property value.
+ @param source the place that this configuration value came from 
+ (For debugging).
+ @throws IllegalArgumentException when the value or name is null.]]>
+      </doc>
+    </method>
+    <method name="unset"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Unset a previously set property.]]>
+      </doc>
+    </method>
+    <method name="setIfUnset"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Sets a property if it is currently unset.
+ @param name the property name
+ @param value the new value]]>
+      </doc>
+    </method>
+    <method name="get" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code>. If the key is deprecated,
+ it returns the value of the first key which replaces the deprecated key
+ and is not null.
+ If no such property exists,
+ then <code>defaultValue</code> is returned.
+ 
+ @param name property name, will be trimmed before get value.
+ @param defaultValue default value.
+ @return property value, or <code>defaultValue</code> if the property 
+         doesn't exist.]]>
+      </doc>
+    </method>
+    <method name="getInt" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="int"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as an <code>int</code>.
+   
+ If no such property exists, the provided default value is returned,
+ or if the specified value is not a valid <code>int</code>,
+ then an error is thrown.
+ 
+ @param name property name.
+ @param defaultValue default value.
+ @throws NumberFormatException when the value is invalid
+ @return property value as an <code>int</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="getInts" return="int[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a set of comma-delimited
+ <code>int</code> values.
+ 
+ If no such property exists, an empty array is returned.
+ 
+ @param name property name
+ @return property value interpreted as an array of comma-delimited
+         <code>int</code> values]]>
+      </doc>
+    </method>
+    <method name="setInt"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="int"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to an <code>int</code>.
+ 
+ @param name property name.
+ @param value <code>int</code> value of the property.]]>
+      </doc>
+    </method>
+    <method name="getLong" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="long"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>long</code>.  
+ If no such property exists, the provided default value is returned,
+ or if the specified value is not a valid <code>long</code>,
+ then an error is thrown.
+ 
+ @param name property name.
+ @param defaultValue default value.
+ @throws NumberFormatException when the value is invalid
+ @return property value as a <code>long</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="getLongBytes" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="long"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>long</code> or
+ human readable format. If no such property exists, the provided default
+ value is returned, or if the specified value is not a valid
+ <code>long</code> or human readable format, then an error is thrown. You
+ can use the following suffix (case insensitive): k(kilo), m(mega), g(giga),
+ t(tera), p(peta), e(exa)
+
+ @param name property name.
+ @param defaultValue default value.
+ @throws NumberFormatException when the value is invalid
+ @return property value as a <code>long</code>,
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="setLong"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to a <code>long</code>.
+ 
+ @param name property name.
+ @param value <code>long</code> value of the property.]]>
+      </doc>
+    </method>
+    <method name="getFloat" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="float"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>float</code>.  
+ If no such property exists, the provided default value is returned,
+ or if the specified value is not a valid <code>float</code>,
+ then an error is thrown.
+
+ @param name property name.
+ @param defaultValue default value.
+ @throws NumberFormatException when the value is invalid
+ @return property value as a <code>float</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="setFloat"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="float"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to a <code>float</code>.
+ 
+ @param name property name.
+ @param value property value.]]>
+      </doc>
+    </method>
+    <method name="getDouble" return="double"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="double"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>double</code>.  
+ If no such property exists, the provided default value is returned,
+ or if the specified value is not a valid <code>double</code>,
+ then an error is thrown.
+
+ @param name property name.
+ @param defaultValue default value.
+ @throws NumberFormatException when the value is invalid
+ @return property value as a <code>double</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="setDouble"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="double"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to a <code>double</code>.
+ 
+ @param name property name.
+ @param value property value.]]>
+      </doc>
+    </method>
+    <method name="getBoolean" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="boolean"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>boolean</code>.  
+ If no such property is specified, or if the specified value is not a valid
+ <code>boolean</code>, then <code>defaultValue</code> is returned.
+ 
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>boolean</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="setBoolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="boolean"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to a <code>boolean</code>.
+ 
+ @param name property name.
+ @param value <code>boolean</code> value of the property.]]>
+      </doc>
+    </method>
+    <method name="setBooleanIfUnset"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="boolean"/>
+      <doc>
+      <![CDATA[Set the given property, if it is currently unset.
+ @param name property name
+ @param value new value]]>
+      </doc>
+    </method>
+    <method name="setEnum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="T"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to the given type. This
+ is equivalent to <code>set(&lt;name&gt;, value.toString())</code>.
+ @param name property name
+ @param value new value]]>
+      </doc>
+    </method>
+    <method name="getEnum" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="T"/>
+      <doc>
+      <![CDATA[Return value matching this enumerated type.
+ Note that the returned value is trimmed by this method.
+ @param name Property name
+ @param defaultValue Value returned if no mapping exists
+ @throws IllegalArgumentException If mapping is illegal for the type
+ provided]]>
+      </doc>
+    </method>
+    <method name="setTimeDuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="long"/>
+      <param name="unit" type="java.util.concurrent.TimeUnit"/>
+      <doc>
+      <![CDATA[Set the value of <code>name</code> to the given time duration. This
+ is equivalent to <code>set(&lt;name&gt;, value + &lt;time suffix&gt;)</code>.
+ @param name Property name
+ @param value Time duration
+ @param unit Unit of time]]>
+      </doc>
+    </method>
+    <method name="getTimeDuration" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="long"/>
+      <param name="unit" type="java.util.concurrent.TimeUnit"/>
+      <doc>
+      <![CDATA[Return time duration in the given time unit. Valid units are encoded in
+ properties as suffixes: nanoseconds (ns), microseconds (us), milliseconds
+ (ms), seconds (s), minutes (m), hours (h), and days (d).
+ @param name Property name
+ @param defaultValue Value returned if no mapping exists.
+ @param unit Unit to convert the stored property, if it exists.
+ @throws NumberFormatException If the property stripped of its unit is not
+         a number]]>
+      </doc>
+    </method>
+    <method name="getTimeDurations" return="long[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="unit" type="java.util.concurrent.TimeUnit"/>
+    </method>
+    <method name="getPattern" return="java.util.regex.Pattern"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.util.regex.Pattern"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>Pattern</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>Pattern</code>, then <code>DefaultValue</code> is returned.
+ Note that the returned value is NOT trimmed by this method.
+
+ @param name property name
+ @param defaultValue default value
+ @return property value as a compiled Pattern, or defaultValue]]>
+      </doc>
+    </method>
+    <method name="setPattern"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="pattern" type="java.util.regex.Pattern"/>
+      <doc>
+      <![CDATA[Set the given property to <code>Pattern</code>.
+ If the pattern is passed as null, sets the empty pattern which results in
+ further calls to getPattern(...) returning the default value.
+
+ @param name property name
+ @param pattern new value]]>
+      </doc>
+    </method>
+    <method name="getPropertySources" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Gets information about why a property was set.  Typically this is the 
+ path to the resource objects (file, URL, etc.) the property came from, but
+ it can also indicate that it was set programatically, or because of the
+ command line.
+
+ @param name - The property name to get the source of.
+ @return null - If the property or its source wasn't found. Otherwise, 
+ returns a list of the sources of the resource.  The older sources are
+ the first ones in the list.  So for example if a configuration is set from
+ the command line, and then written out to a file that is read back in the
+ first entry would indicate that it was set from the command line, while
+ the second one would indicate the file that the new configuration was read
+ in from.]]>
+      </doc>
+    </method>
+    <method name="getRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Parse the given attribute as a set of integer ranges
+ @param name the attribute name
+ @param defaultValue the default value if it is not set
+ @return a new set of ranges from the configured value]]>
+      </doc>
+    </method>
+    <method name="getStringCollection" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the comma delimited values of the <code>name</code> property as 
+ a collection of <code>String</code>s.  
+ If no such property is specified then empty collection is returned.
+ <p>
+ This is an optimized version of {@link #getStrings(String)}
+ 
+ @param name property name.
+ @return property value as a collection of <code>String</code>s.]]>
+      </doc>
+    </method>
+    <method name="getStrings" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the comma delimited values of the <code>name</code> property as 
+ an array of <code>String</code>s.  
+ If no such property is specified then <code>null</code> is returned.
+ 
+ @param name property name.
+ @return property value as an array of <code>String</code>s, 
+         or <code>null</code>.]]>
+      </doc>
+    </method>
+    <method name="getStrings" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Get the comma delimited values of the <code>name</code> property as 
+ an array of <code>String</code>s.  
+ If no such property is specified then default value is returned.
+ 
+ @param name property name.
+ @param defaultValue The default value
+ @return property value as an array of <code>String</code>s, 
+         or default value.]]>
+      </doc>
+    </method>
+    <method name="getTrimmedStringCollection" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the comma delimited values of the <code>name</code> property as 
+ a collection of <code>String</code>s, trimmed of the leading and trailing whitespace.  
+ If no such property is specified then empty <code>Collection</code> is returned.
+
+ @param name property name.
+ @return property value as a collection of <code>String</code>s, or empty <code>Collection</code>]]>
+      </doc>
+    </method>
+    <method name="getTrimmedStrings" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the comma delimited values of the <code>name</code> property as 
+ an array of <code>String</code>s, trimmed of the leading and trailing whitespace.
+ If no such property is specified then an empty array is returned.
+ 
+ @param name property name.
+ @return property value as an array of trimmed <code>String</code>s, 
+         or empty array.]]>
+      </doc>
+    </method>
+    <method name="getTrimmedStrings" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Get the comma delimited values of the <code>name</code> property as 
+ an array of <code>String</code>s, trimmed of the leading and trailing whitespace.
+ If no such property is specified then default value is returned.
+ 
+ @param name property name.
+ @param defaultValue The default value
+ @return property value as an array of trimmed <code>String</code>s, 
+         or default value.]]>
+      </doc>
+    </method>
+    <method name="setStrings"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="values" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Set the array of string values for the <code>name</code> property as 
+ as comma delimited values.  
+ 
+ @param name property name.
+ @param values The values]]>
+      </doc>
+    </method>
+    <method name="getPassword" return="char[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the value for a known password configuration element.
+ In order to enable the elimination of clear text passwords in config,
+ this method attempts to resolve the property name as an alias through
+ the CredentialProvider API and conditionally fallsback to config.
+ @param name property name
+ @return password]]>
+      </doc>
+    </method>
+    <method name="getPasswordFromCredentialProviders" return="char[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Try and resolve the provided element name as a credential provider
+ alias.
+ @param name alias of the provisioned credential
+ @return password or null if not found
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getPasswordFromConfig" return="char[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Fallback to clear text passwords in configuration.
+ @param name
+ @return clear text password or null]]>
+      </doc>
+    </method>
+    <method name="getSocketAddr" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hostProperty" type="java.lang.String"/>
+      <param name="addressProperty" type="java.lang.String"/>
+      <param name="defaultAddressValue" type="java.lang.String"/>
+      <param name="defaultPort" type="int"/>
+      <doc>
+      <![CDATA[Get the socket address for <code>hostProperty</code> as a
+ <code>InetSocketAddress</code>. If <code>hostProperty</code> is
+ <code>null</code>, <code>addressProperty</code> will be used. This
+ is useful for cases where we want to differentiate between host
+ bind address and address clients should use to establish connection.
+
+ @param hostProperty bind host property name.
+ @param addressProperty address property name.
+ @param defaultAddressValue the default value
+ @param defaultPort the default port
+ @return InetSocketAddress]]>
+      </doc>
+    </method>
+    <method name="getSocketAddr" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultAddress" type="java.lang.String"/>
+      <param name="defaultPort" type="int"/>
+      <doc>
+      <![CDATA[Get the socket address for <code>name</code> property as a
+ <code>InetSocketAddress</code>.
+ @param name property name.
+ @param defaultAddress the default value
+ @param defaultPort the default port
+ @return InetSocketAddress]]>
+      </doc>
+    </method>
+    <method name="setSocketAddr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetSocketAddress"/>
+      <doc>
+      <![CDATA[Set the socket address for the <code>name</code> property as
+ a <code>host:port</code>.]]>
+      </doc>
+    </method>
+    <method name="updateConnectAddr" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hostProperty" type="java.lang.String"/>
+      <param name="addressProperty" type="java.lang.String"/>
+      <param name="defaultAddressValue" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetSocketAddress"/>
+      <doc>
+      <![CDATA[Set the socket address a client can use to connect for the
+ <code>name</code> property as a <code>host:port</code>.  The wildcard
+ address is replaced with the local host's address. If the host and address
+ properties are configured the host component of the address will be combined
+ with the port component of the addr to generate the address.  This is to allow
+ optional control over which host name is used in multi-home bind-host
+ cases where a host can have multiple names
+ @param hostProperty the bind-host configuration name
+ @param addressProperty the service address configuration name
+ @param defaultAddressValue the service default address configuration value
+ @param addr InetSocketAddress of the service listener
+ @return InetSocketAddress for clients to connect]]>
+      </doc>
+    </method>
+    <method name="updateConnectAddr" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetSocketAddress"/>
+      <doc>
+      <![CDATA[Set the socket address a client can use to connect for the
+ <code>name</code> property as a <code>host:port</code>.  The wildcard
+ address is replaced with the local host's address.
+ @param name property name.
+ @param addr InetSocketAddress of a listener to store in the given property
+ @return InetSocketAddress for clients to connect]]>
+      </doc>
+    </method>
+    <method name="getClassByName" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+      <doc>
+      <![CDATA[Load a class by name.
+ 
+ @param name the class name.
+ @return the class object.
+ @throws ClassNotFoundException if the class is not found.]]>
+      </doc>
+    </method>
+    <method name="getClassByNameOrNull" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Load a class by name, returning null rather than throwing an exception
+ if it couldn't be loaded. This is to avoid the overhead of creating
+ an exception.
+ 
+ @param name the class name
+ @return the class object, or null if it could not be found.]]>
+      </doc>
+    </method>
+    <method name="getClasses" return="java.lang.Class[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.Class[]"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property
+ as an array of <code>Class</code>.
+ The value of the property specifies a list of comma separated class names.  
+ If no such property is specified, then <code>defaultValue</code> is 
+ returned.
+ 
+ @param name the property name.
+ @param defaultValue default value.
+ @return property value as a <code>Class[]</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="getClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>.  
+ If no such property is specified, then <code>defaultValue</code> is 
+ returned.
+ 
+ @param name the class name.
+ @param defaultValue default value.
+ @return property value as a <code>Class</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="getClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.Class"/>
+      <param name="xface" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>
+ implementing the interface specified by <code>xface</code>.
+   
+ If no such property is specified, then <code>defaultValue</code> is 
+ returned.
+ 
+ An exception is thrown if the returned class does not implement the named
+ interface. 
+ 
+ @param name the class name.
+ @param defaultValue default value.
+ @param xface the interface implemented by the named class.
+ @return property value as a <code>Class</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="getInstances" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="xface" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>List</code>
+ of objects implementing the interface specified by <code>xface</code>.
+ 
+ An exception is thrown if any of the classes does not exist, or if it does
+ not implement the named interface.
+ 
+ @param name the property name.
+ @param xface the interface implemented by the classes named by
+        <code>name</code>.
+ @return a <code>List</code> of objects implementing <code>xface</code>.]]>
+      </doc>
+    </method>
+    <method name="setClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="theClass" type="java.lang.Class"/>
+      <param name="xface" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to the name of a 
+ <code>theClass</code> implementing the given interface <code>xface</code>.
+ 
+ An exception is thrown if <code>theClass</code> does not implement the 
+ interface <code>xface</code>. 
+ 
+ @param name property name.
+ @param theClass property value.
+ @param xface the interface implemented by the named class.]]>
+      </doc>
+    </method>
+    <method name="getLocalPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dirsProp" type="java.lang.String"/>
+      <param name="path" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get a local file under a directory named by <i>dirsProp</i> with
+ the given <i>path</i>.  If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code.  If the selected
+ directory does not exist, an attempt is made to create it.
+ 
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+      </doc>
+    </method>
+    <method name="getFile" return="java.io.File"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dirsProp" type="java.lang.String"/>
+      <param name="path" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get a local file name under a directory named in <i>dirsProp</i> with
+ the given <i>path</i>.  If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code.  If the selected
+ directory does not exist, an attempt is made to create it.
+ 
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+      </doc>
+    </method>
+    <method name="getResource" return="java.net.URL"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the {@link URL} for the named resource.
+ 
+ @param name resource name.
+ @return the url for the named resource.]]>
+      </doc>
+    </method>
+    <method name="getConfResourceAsInputStream" return="java.io.InputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get an input stream attached to the configuration resource with the
+ given <code>name</code>.
+ 
+ @param name configuration resource name.
+ @return an input stream attached to the resource.]]>
+      </doc>
+    </method>
+    <method name="getConfResourceAsReader" return="java.io.Reader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get a {@link Reader} attached to the configuration resource with the
+ given <code>name</code>.
+ 
+ @param name configuration resource name.
+ @return a reader attached to the resource.]]>
+      </doc>
+    </method>
+    <method name="getFinalParameters" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the set of parameters marked final.
+
+ @return final parameter set.]]>
+      </doc>
+    </method>
+    <method name="getProps" return="java.util.Properties"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="size" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the number of keys in the configuration.
+
+ @return number of keys in the configuration.]]>
+      </doc>
+    </method>
+    <method name="clear"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Clears all keys from the configuration.]]>
+      </doc>
+    </method>
+    <method name="iterator" return="java.util.Iterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get an {@link Iterator} to go through the list of <code>String</code> 
+ key-value pairs in the configuration.
+ 
+ @return an iterator over the entries.]]>
+      </doc>
+    </method>
+    <method name="getPropsWithPrefix" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="confPrefix" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Constructs a mapping of configuration and includes all properties that
+ start with the specified configuration prefix.  Property names in the
+ mapping are trimmed to remove the configuration prefix.
+
+ @param confPrefix configuration prefix
+ @return mapping of configuration properties with prefix stripped]]>
+      </doc>
+    </method>
+    <method name="writeXml"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.OutputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write out the non-default properties in this configuration to the given
+ {@link OutputStream} using UTF-8 encoding.
+ 
+ @param out the output stream to write to.]]>
+      </doc>
+    </method>
+    <method name="writeXml"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.Writer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="writeXml"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="propertyName" type="java.lang.String"/>
+      <param name="out" type="java.io.Writer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+      <doc>
+      <![CDATA[Write out the non-default properties in this configuration to the
+ given {@link Writer}.
+
+ <li>
+ When property name is not empty and the property exists in the
+ configuration, this method writes the property and its attributes
+ to the {@link Writer}.
+ </li>
+ <p>
+
+ <li>
+ When property name is null or empty, this method writes all the
+ configuration properties and their attributes to the {@link Writer}.
+ </li>
+ <p>
+
+ <li>
+ When property name is not empty but the property doesn't exist in
+ the configuration, this method throws an {@link IllegalArgumentException}.
+ </li>
+ <p>
+ @param out the writer to write to.]]>
+      </doc>
+    </method>
+    <method name="dumpConfiguration"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="config" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="propertyName" type="java.lang.String"/>
+      <param name="out" type="java.io.Writer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes properties and their attributes (final and resource)
+  to the given {@link Writer}.
+
+  <li>
+  When propertyName is not empty, and the property exists
+  in the configuration, the format of the output would be,
+  <pre>
+  {
+    "property": {
+      "key" : "key1",
+      "value" : "value1",
+      "isFinal" : "key1.isFinal",
+      "resource" : "key1.resource"
+    }
+  }
+  </pre>
+  </li>
+
+  <li>
+  When propertyName is null or empty, it behaves same as
+  {@link #dumpConfiguration(Configuration, Writer)}, the
+  output would be,
+  <pre>
+  { "properties" :
+      [ { key : "key1",
+          value : "value1",
+          isFinal : "key1.isFinal",
+          resource : "key1.resource" },
+        { key : "key2",
+          value : "value2",
+          isFinal : "ke2.isFinal",
+          resource : "key2.resource" }
+       ]
+   }
+  </pre>
+  </li>
+
+  <li>
+  When propertyName is not empty, and the property is not
+  found in the configuration, this method will throw an
+  {@link IllegalArgumentException}.
+  </li>
+  <p>
+ @param config the configuration
+ @param propertyName property name
+ @param out the Writer to write to
+ @throws IOException
+ @throws IllegalArgumentException when property name is not
+   empty and the property is not found in configuration]]>
+      </doc>
+    </method>
+    <method name="dumpConfiguration"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="config" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="out" type="java.io.Writer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes out all properties and their attributes (final and resource) to
+  the given {@link Writer}, the format of the output would be,
+
+  <pre>
+  { "properties" :
+      [ { key : "key1",
+          value : "value1",
+          isFinal : "key1.isFinal",
+          resource : "key1.resource" },
+        { key : "key2",
+          value : "value2",
+          isFinal : "ke2.isFinal",
+          resource : "key2.resource" }
+       ]
+   }
+  </pre>
+
+  It does not output the properties of the configuration object which
+  is loaded from an input stream.
+  <p>
+
+ @param config the configuration
+ @param out the Writer to write to
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getClassLoader" return="java.lang.ClassLoader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link ClassLoader} for this job.
+
+ @return the correct class loader.]]>
+      </doc>
+    </method>
+    <method name="setClassLoader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="classLoader" type="java.lang.ClassLoader"/>
+      <doc>
+      <![CDATA[Set the class loader that will be used to load the various objects.
+ 
+ @param classLoader the new class loader.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setQuietMode"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="quietmode" type="boolean"/>
+      <doc>
+      <![CDATA[Set the quietness-mode. 
+ 
+ In the quiet-mode, error and informational messages might not be logged.
+ 
+ @param quietmode <code>true</code> to set quiet-mode on, <code>false</code>
+              to turn it off.]]>
+      </doc>
+    </method>
+    <method name="main"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[For debugging.  List non-default properties to the terminal and exit.]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getValByRegex" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="regex" type="java.lang.String"/>
+      <doc>
+      <![CDATA[get keys matching the the regex 
+ @param regex
+ @return Map<String,String> with matching keys]]>
+      </doc>
+    </method>
+    <method name="dumpDeprecatedKeys"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hasWarnedDeprecation" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns whether or not a deprecated name has been warned. If the name is not
+ deprecated then always return false]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Provides access to configuration parameters.
+
+ <h4 id="Resources">Resources</h4>
+
+ <p>Configurations are specified by resources. A resource contains a set of
+ name/value pairs as XML data. Each resource is named by either a 
+ <code>String</code> or by a {@link Path}. If named by a <code>String</code>, 
+ then the classpath is examined for a file with that name.  If named by a 
+ <code>Path</code>, then the local filesystem is examined directly, without 
+ referring to the classpath.
+
+ <p>Unless explicitly turned off, Hadoop by default specifies two 
+ resources, loaded in-order from the classpath: <ol>
+ <li><tt>
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a></tt>: Read-only defaults for hadoop.</li>
+ <li><tt>core-site.xml</tt>: Site-specific configuration for a given hadoop
+ installation.</li>
+ </ol>
+ Applications may add additional resources, which are loaded
+ subsequent to these resources in the order they are added.
+ 
+ <h4 id="FinalParams">Final Parameters</h4>
+
+ <p>Configuration parameters may be declared <i>final</i>. 
+ Once a resource declares a value final, no subsequently-loaded 
+ resource can alter that value.  
+ For example, one might define a final parameter with:
+ <tt><pre>
+  &lt;property&gt;
+    &lt;name&gt;dfs.hosts.include&lt;/name&gt;
+    &lt;value&gt;/etc/hadoop/conf/hosts.include&lt;/value&gt;
+    <b>&lt;final&gt;true&lt;/final&gt;</b>
+  &lt;/property&gt;</pre></tt>
+
+ Administrators typically define parameters as final in 
+ <tt>core-site.xml</tt> for values that user applications may not alter.
+
+ <h4 id="VariableExpansion">Variable Expansion</h4>
+
+ <p>Value strings are first processed for <i>variable expansion</i>. The
+ available properties are:<ol>
+ <li>Other properties defined in this Configuration; and, if a name is
+ undefined here,</li>
+ <li>Properties in {@link System#getProperties()}.</li>
+ </ol>
+
+ <p>For example, if a configuration resource contains the following property
+ definitions: 
+ <tt><pre>
+  &lt;property&gt;
+    &lt;name&gt;basedir&lt;/name&gt;
+    &lt;value&gt;/user/${<i>user.name</i>}&lt;/value&gt;
+  &lt;/property&gt;
+  
+  &lt;property&gt;
+    &lt;name&gt;tempdir&lt;/name&gt;
+    &lt;value&gt;${<i>basedir</i>}/tmp&lt;/value&gt;
+  &lt;/property&gt;</pre></tt>
+
+ When <tt>conf.get("tempdir")</tt> is called, then <tt>${<i>basedir</i>}</tt>
+ will be resolved to another property in this Configuration, while
+ <tt>${<i>user.name</i>}</tt> would then ordinarily be resolved to the value
+ of the System property with that name.
+ <p>When <tt>conf.get("otherdir")</tt> is called, then <tt>${<i>env.BASE_DIR</i>}</tt>
+ will be resolved to the value of the <tt>${<i>BASE_DIR</i>}</tt> environment variable.
+ It supports <tt>${<i>env.NAME:-default</i>}</tt> and <tt>${<i>env.NAME-default</i>}</tt> notations.
+ The former is resolved to "default" if <tt>${<i>NAME</i>}</tt> environment variable is undefined
+ or its value is empty.
+ The latter behaves the same way only if <tt>${<i>NAME</i>}</tt> is undefined.
+ <p>By default, warnings will be given to any deprecated configuration 
+ parameters and these are suppressible by configuring
+ <tt>log4j.logger.org.apache.hadoop.conf.Configuration.deprecation</tt> in
+ log4j.properties file.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.conf.Configuration -->
+  <!-- start class org.apache.hadoop.conf.Configured -->
+  <class name="Configured" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="Configured"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a Configured.]]>
+      </doc>
+    </constructor>
+    <constructor name="Configured" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a Configured.]]>
+      </doc>
+    </constructor>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Base class for things that may be configured with a {@link Configuration}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.conf.Configured -->
+  <!-- start class org.apache.hadoop.conf.ReconfigurationTaskStatus -->
+  <class name="ReconfigurationTaskStatus" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ReconfigurationTaskStatus" type="long, long, java.util.Map"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="hasTask" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return true if
+   - A reconfiguration task has finished or
+   - an active reconfiguration task is running]]>
+      </doc>
+    </method>
+    <method name="stopped" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return true if the latest reconfiguration task has finished and there is
+ no another active task running.]]>
+      </doc>
+    </method>
+    <method name="getStartTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getEndTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getStatus" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.conf.ReconfigurationTaskStatus -->
+  <doc>
+  <![CDATA[Configuration of system parameters.]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.crypto">
+</package>
+<package name="org.apache.hadoop.crypto.key">
+  <!-- start class org.apache.hadoop.crypto.key.KeyProvider -->
+  <class name="KeyProvider" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="KeyProvider" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor.
+ 
+ @param conf configuration for the provider]]>
+      </doc>
+    </constructor>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the provider configuration.
+ 
+ @return the provider configuration]]>
+      </doc>
+    </method>
+    <method name="options" return="org.apache.hadoop.crypto.key.KeyProvider.Options"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[A helper function to create an options object.
+ @param conf the configuration to use
+ @return a new options object]]>
+      </doc>
+    </method>
+    <method name="isTransient" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Indicates whether this provider represents a store
+ that is intended for transient use - such as the UserProvider
+ is. These providers are generally used to provide access to
+ keying material rather than for long term storage.
+ @return true if transient, false otherwise]]>
+      </doc>
+    </method>
+    <method name="getKeyVersion" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="versionName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the key material for a specific version of the key. This method is used
+ when decrypting data.
+ @param versionName the name of a specific version of the key
+ @return the key material
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getKeys" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the key names for all keys.
+ @return the list of key names
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getKeysMetadata" return="org.apache.hadoop.crypto.key.KeyProvider.Metadata[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="names" type="java.lang.String[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get key metadata in bulk.
+ @param names the names of the keys to get
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getKeyVersions" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the key material for all versions of a specific key name.
+ @return the list of key material
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getCurrentKey" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the current version of the key, which should be used for encrypting new
+ data.
+ @param name the base name of the key
+ @return the version name of the current version of the key or null if the
+    key version doesn't exist
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getMetadata" return="org.apache.hadoop.crypto.key.KeyProvider.Metadata"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get metadata about the key.
+ @param name the basename of the key
+ @return the key's metadata or null if the key doesn't exist
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="createKey" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="material" type="byte[]"/>
+      <param name="options" type="org.apache.hadoop.crypto.key.KeyProvider.Options"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a new key. The given key must not already exist.
+ @param name the base name of the key
+ @param material the key material for the first version of the key.
+ @param options the options for the new key.
+ @return the version name of the first version of the key.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="generateKey" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="size" type="int"/>
+      <param name="algorithm" type="java.lang.String"/>
+      <exception name="NoSuchAlgorithmException" type="java.security.NoSuchAlgorithmException"/>
+      <doc>
+      <![CDATA[Generates a key material.
+
+ @param size length of the key.
+ @param algorithm algorithm to use for generating the key.
+ @return the generated key.
+ @throws NoSuchAlgorithmException]]>
+      </doc>
+    </method>
+    <method name="createKey" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="options" type="org.apache.hadoop.crypto.key.KeyProvider.Options"/>
+      <exception name="NoSuchAlgorithmException" type="java.security.NoSuchAlgorithmException"/>
+      <exception name="IOException" type="java.io

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org