You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by bo...@apache.org on 2018/05/29 18:10:39 UTC

[01/50] [abbrv] hadoop git commit: HADOOP-15457. Add Security-Related HTTP Response Header in WEBUIs. (kanwaljeets via rkanter) [Forced Update!]

Repository: hadoop
Updated Branches:
  refs/heads/YARN-7402 db183f2ea -> c5bf22dc1 (forced update)


HADOOP-15457. Add Security-Related HTTP Response Header in WEBUIs. (kanwaljeets via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa23d49f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa23d49f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa23d49f

Branch: refs/heads/YARN-7402
Commit: aa23d49fc8b9c2537529dbdc13512000e2ab295a
Parents: bc6d9d4
Author: Robert Kanter <rk...@apache.org>
Authored: Wed May 23 10:23:17 2018 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Wed May 23 10:24:09 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/http/HttpServer2.java     | 79 +++++++++++++++-----
 .../org/apache/hadoop/http/TestHttpServer.java  | 61 +++++++++++++++
 2 files changed, 121 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa23d49f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 47ca841..c273c78 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -34,6 +34,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import javax.servlet.Filter;
 import javax.servlet.FilterChain;
@@ -172,10 +174,16 @@ public final class HttpServer2 implements FilterContainer {
   private final SignerSecretProvider secretProvider;
   private XFrameOption xFrameOption;
   private boolean xFrameOptionIsEnabled;
-  private static final String X_FRAME_VALUE = "xFrameOption";
-  private static final String X_FRAME_ENABLED = "X_FRAME_ENABLED";
-
-
+  public static final String HTTP_HEADER_PREFIX = "hadoop.http.header.";
+  private static final String HTTP_HEADER_REGEX =
+          "hadoop\\.http\\.header\\.([a-zA-Z\\-_]+)";
+  static final String X_XSS_PROTECTION  =
+          "X-XSS-Protection:1; mode=block";
+  static final String X_CONTENT_TYPE_OPTIONS =
+          "X-Content-Type-Options:nosniff";
+  private static final String X_FRAME_OPTIONS = "X-FRAME-OPTIONS";
+  private static final Pattern PATTERN_HTTP_HEADER_REGEX =
+          Pattern.compile(HTTP_HEADER_REGEX);
   /**
    * Class to construct instances of HTTP server with specific options.
    */
@@ -574,10 +582,7 @@ public final class HttpServer2 implements FilterContainer {
     addDefaultApps(contexts, appDir, conf);
     webServer.setHandler(handlers);
 
-    Map<String, String> xFrameParams = new HashMap<>();
-    xFrameParams.put(X_FRAME_ENABLED,
-        String.valueOf(this.xFrameOptionIsEnabled));
-    xFrameParams.put(X_FRAME_VALUE,  this.xFrameOption.toString());
+    Map<String, String> xFrameParams = setHeaders(conf);
     addGlobalFilter("safety", QuotingInputFilter.class.getName(), xFrameParams);
     final FilterInitializer[] initializers = getFilterInitializers(conf);
     if (initializers != null) {
@@ -1475,9 +1480,11 @@ public final class HttpServer2 implements FilterContainer {
   public static class QuotingInputFilter implements Filter {
 
     private FilterConfig config;
+    private Map<String, String> headerMap;
 
     public static class RequestQuoter extends HttpServletRequestWrapper {
       private final HttpServletRequest rawRequest;
+
       public RequestQuoter(HttpServletRequest rawRequest) {
         super(rawRequest);
         this.rawRequest = rawRequest;
@@ -1566,6 +1573,7 @@ public final class HttpServer2 implements FilterContainer {
     @Override
     public void init(FilterConfig config) throws ServletException {
       this.config = config;
+      initHttpHeaderMap();
     }
 
     @Override
@@ -1593,11 +1601,7 @@ public final class HttpServer2 implements FilterContainer {
       } else if (mime.startsWith("application/xml")) {
         httpResponse.setContentType("text/xml; charset=utf-8");
       }
-
-      if(Boolean.valueOf(this.config.getInitParameter(X_FRAME_ENABLED))) {
-        httpResponse.addHeader("X-FRAME-OPTIONS",
-            this.config.getInitParameter(X_FRAME_VALUE));
-      }
+      headerMap.forEach((k, v) -> httpResponse.addHeader(k, v));
       chain.doFilter(quoted, httpResponse);
     }
 
@@ -1613,14 +1617,25 @@ public final class HttpServer2 implements FilterContainer {
       return (mime == null) ? null : mime;
     }
 
+    private void initHttpHeaderMap() {
+      Enumeration<String> params = this.config.getInitParameterNames();
+      headerMap = new HashMap<>();
+      while (params.hasMoreElements()) {
+        String key = params.nextElement();
+        Matcher m = PATTERN_HTTP_HEADER_REGEX.matcher(key);
+        if (m.matches()) {
+          String headerKey = m.group(1);
+          headerMap.put(headerKey, config.getInitParameter(key));
+        }
+      }
+    }
   }
-
-  /**
-   * The X-FRAME-OPTIONS header in HTTP response to mitigate clickjacking
-   * attack.
-   */
+    /**
+     * The X-FRAME-OPTIONS header in HTTP response to mitigate clickjacking
+     * attack.
+     */
   public enum XFrameOption {
-    DENY("DENY") , SAMEORIGIN ("SAMEORIGIN"), ALLOWFROM ("ALLOW-FROM");
+    DENY("DENY"), SAMEORIGIN("SAMEORIGIN"), ALLOWFROM("ALLOW-FROM");
 
     XFrameOption(String name) {
       this.name = name;
@@ -1651,4 +1666,30 @@ public final class HttpServer2 implements FilterContainer {
       throw new IllegalArgumentException("Unexpected value in xFrameOption.");
     }
   }
+
+
+  private Map<String, String> setHeaders(Configuration conf) {
+    Map<String, String> xFrameParams = new HashMap<>();
+    Map<String, String> headerConfigMap =
+            conf.getValByRegex(HTTP_HEADER_REGEX);
+
+    xFrameParams.putAll(getDefaultHeaders());
+    if(this.xFrameOptionIsEnabled) {
+      xFrameParams.put(HTTP_HEADER_PREFIX+X_FRAME_OPTIONS,
+              this.xFrameOption.toString());
+    }
+    xFrameParams.putAll(headerConfigMap);
+    return xFrameParams;
+  }
+
+  private Map<String, String> getDefaultHeaders() {
+    Map<String, String> headers = new HashMap<>();
+    String[] splitVal = X_CONTENT_TYPE_OPTIONS.split(":");
+    headers.put(HTTP_HEADER_PREFIX + splitVal[0],
+            splitVal[1]);
+    splitVal = X_XSS_PROTECTION.split(":");
+    headers.put(HTTP_HEADER_PREFIX + splitVal[0],
+            splitVal[1]);
+    return headers;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa23d49f/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
index 6c1512e..26b1137 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
@@ -701,4 +701,65 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     ServerConnector listener = (ServerConnector)listeners.get(0);
     assertEquals(backlogSize, listener.getAcceptQueueSize());
   }
+
+  @Test
+  public void testHttpResponseDefaultHeaders() throws Exception {
+    Configuration conf = new Configuration();
+    HttpServer2  httpServer = createTestServer(conf);
+    try {
+      HttpURLConnection conn = getHttpURLConnection(httpServer);
+      assertEquals(HttpServer2.X_XSS_PROTECTION.split(":")[1],
+              conn.getHeaderField(
+              HttpServer2.X_XSS_PROTECTION.split(":")[0]));
+      assertEquals(HttpServer2.X_CONTENT_TYPE_OPTIONS.split(":")[1],
+              conn.getHeaderField(
+              HttpServer2.X_CONTENT_TYPE_OPTIONS.split(":")[0]));
+    } finally {
+      httpServer.stop();
+    }
+  }
+
+  @Test
+  public void testHttpResponseOverrideDefaultHeaders() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(HttpServer2.HTTP_HEADER_PREFIX+
+            HttpServer2.X_XSS_PROTECTION.split(":")[0], "customXssValue");
+    HttpServer2  httpServer = createTestServer(conf);
+    try {
+      HttpURLConnection conn = getHttpURLConnection(httpServer);
+      assertEquals("customXssValue",
+              conn.getHeaderField(
+              HttpServer2.X_XSS_PROTECTION.split(":")[0])
+      );
+      assertEquals(HttpServer2.X_CONTENT_TYPE_OPTIONS.split(":")[1],
+              conn.getHeaderField(
+              HttpServer2.X_CONTENT_TYPE_OPTIONS.split(":")[0])
+      );
+    } finally {
+      httpServer.stop();
+    }
+  }
+
+  @Test
+  public void testHttpResponseCustomHeaders() throws Exception {
+    Configuration conf = new Configuration();
+    String key = "customKey";
+    String value = "customValue";
+    conf.set(HttpServer2.HTTP_HEADER_PREFIX+key, value);
+    HttpServer2  httpServer = createTestServer(conf);
+    try {
+      HttpURLConnection conn = getHttpURLConnection(httpServer);
+      assertEquals(HttpServer2.X_XSS_PROTECTION.split(":")[1],
+              conn.getHeaderField(
+              HttpServer2.X_XSS_PROTECTION.split(":")[0]));
+      assertEquals(HttpServer2.X_CONTENT_TYPE_OPTIONS.split(":")[1],
+              conn.getHeaderField(
+              HttpServer2.X_CONTENT_TYPE_OPTIONS.split(":")[0]));
+      assertEquals(value, conn.getHeaderField(
+              key));
+    } finally {
+      httpServer.stop();
+    }
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: HADOOP-15473. Configure serialFilter in KeyProvider to avoid UnrecoverableKeyException caused by JDK-8189997. Contributed by Gabor Bota.

Posted by bo...@apache.org.
HADOOP-15473. Configure serialFilter in KeyProvider to avoid UnrecoverableKeyException caused by JDK-8189997. Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02322de3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02322de3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02322de3

Branch: refs/heads/YARN-7402
Commit: 02322de3f95ba78a22c057037ef61aa3ab1d3824
Parents: 8d5509c
Author: Xiao Chen <xi...@apache.org>
Authored: Fri May 25 09:08:15 2018 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Fri May 25 09:10:51 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/crypto/key/KeyProvider.java   | 18 +++++++++++++++
 .../fs/CommonConfigurationKeysPublic.java       |  7 ++++++
 .../src/main/resources/core-default.xml         | 23 ++++++++++++++++++++
 3 files changed, 48 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/02322de3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
index 5d670e5..050540b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
@@ -42,6 +42,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 
 import javax.crypto.KeyGenerator;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_JCEKS_KEY_SERIALFILTER;
+
 /**
  * A provider of secret key material for Hadoop applications. Provides an
  * abstraction to separate key storage from users of encryption. It
@@ -61,6 +63,14 @@ public abstract class KeyProvider {
       CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_DEFAULT_BITLENGTH_KEY;
   public static final int DEFAULT_BITLENGTH = CommonConfigurationKeysPublic.
       HADOOP_SECURITY_KEY_DEFAULT_BITLENGTH_DEFAULT;
+  public static final String JCEKS_KEY_SERIALFILTER_DEFAULT =
+      "java.lang.Enum;"
+          + "java.security.KeyRep;"
+          + "java.security.KeyRep$Type;"
+          + "javax.crypto.spec.SecretKeySpec;"
+          + "org.apache.hadoop.crypto.key.JavaKeyStoreProvider$KeyMetadata;"
+          + "!*";
+  public static final String JCEKS_KEY_SERIAL_FILTER = "jceks.key.serialFilter";
 
   private final Configuration conf;
 
@@ -394,6 +404,14 @@ public abstract class KeyProvider {
    */
   public KeyProvider(Configuration conf) {
     this.conf = new Configuration(conf);
+    // Added for HADOOP-15473. Configured serialFilter property fixes
+    // java.security.UnrecoverableKeyException in JDK 8u171.
+    if(System.getProperty(JCEKS_KEY_SERIAL_FILTER) == null) {
+      String serialFilter =
+          conf.get(HADOOP_SECURITY_CRYPTO_JCEKS_KEY_SERIALFILTER,
+              JCEKS_KEY_SERIALFILTER_DEFAULT);
+      System.setProperty(JCEKS_KEY_SERIAL_FILTER, serialFilter);
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/02322de3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 8837cfb..9e0ba20 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -662,6 +662,13 @@ public class CommonConfigurationKeysPublic {
    * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
    * core-default.xml</a>
    */
+  public static final String HADOOP_SECURITY_CRYPTO_JCEKS_KEY_SERIALFILTER =
+      "hadoop.security.crypto.jceks.key.serialfilter";
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY = 
     "hadoop.security.crypto.buffer.size";
   /** Defalt value for HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/02322de3/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index fad2985..9564587 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2487,6 +2487,29 @@
 </property>
 
 <property>
+  <name>hadoop.security.crypto.jceks.key.serialfilter</name>
+  <description>
+    Enhanced KeyStore Mechanisms in JDK 8u171 introduced jceks.key.serialFilter.
+    If jceks.key.serialFilter is configured, the JCEKS KeyStore uses it during
+    the deserialization of the encrypted Key object stored inside a
+    SecretKeyEntry.
+    If jceks.key.serialFilter is not configured it will cause an error when
+    recovering keystore file in KeyProviderFactory when recovering key from
+    keystore file using JDK 8u171 or newer. The filter pattern uses the same
+    format as jdk.serialFilter.
+
+    The value of this property will be used as the following:
+    1. The value of jceks.key.serialFilter system property takes precedence
+    over the value of this property.
+    2. In the absence of jceks.key.serialFilter system property the value of
+    this property will be set as the value of jceks.key.serialFilter.
+    3. If the value of this property and jceks.key.serialFilter system
+    property has not been set, org.apache.hadoop.crypto.key.KeyProvider
+    sets a default value for jceks.key.serialFilter.
+  </description>
+</property>
+
+<property>
   <name>hadoop.security.crypto.buffer.size</name>
   <value>8192</value>
   <description>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: YARN-8344. Missing nm.stop() in TestNodeManagerResync to fix testKillContainersOnResync. Contributed by Giovanni Matteo Fumarola.

Posted by bo...@apache.org.
YARN-8344. Missing nm.stop() in TestNodeManagerResync to fix testKillContainersOnResync. Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e99e5bf1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e99e5bf1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e99e5bf1

Branch: refs/heads/YARN-7402
Commit: e99e5bf104e9664bc1b43a2639d87355d47a77e2
Parents: cddbbe5
Author: Inigo Goiri <in...@apache.org>
Authored: Wed May 23 14:15:26 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Wed May 23 14:15:26 2018 -0700

----------------------------------------------------------------------
 .../nodemanager/TestNodeManagerResync.java      | 87 +++++++++++---------
 1 file changed, 48 insertions(+), 39 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e99e5bf1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
index 97e9922..cf33775 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
@@ -150,7 +150,6 @@ public class TestNodeManagerResync {
     testContainerPreservationOnResyncImpl(nm, true);
   }
 
-  @SuppressWarnings("unchecked")
   protected void testContainerPreservationOnResyncImpl(TestNodeManager1 nm,
       boolean isWorkPreservingRestartEnabled)
       throws IOException, YarnException, InterruptedException {
@@ -186,32 +185,35 @@ public class TestNodeManagerResync {
     }
   }
 
-  @SuppressWarnings("unchecked")
+  @SuppressWarnings("resource")
   @Test(timeout=10000)
   public void testNMshutdownWhenResyncThrowException() throws IOException,
       InterruptedException, YarnException {
     NodeManager nm = new TestNodeManager3();
     YarnConfiguration conf = createNMConfig();
-    nm.init(conf);
-    nm.start();
-    Assert.assertEquals(1, ((TestNodeManager3) nm).getNMRegistrationCount());
-    nm.getNMDispatcher().getEventHandler()
-        .handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
-
-    synchronized (isNMShutdownCalled) {
-      while (isNMShutdownCalled.get() == false) {
-        try {
-          isNMShutdownCalled.wait();
-        } catch (InterruptedException e) {
+    try {
+      nm.init(conf);
+      nm.start();
+      Assert.assertEquals(1, ((TestNodeManager3) nm).getNMRegistrationCount());
+      nm.getNMDispatcher().getEventHandler()
+          .handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
+
+      synchronized (isNMShutdownCalled) {
+        while (!isNMShutdownCalled.get()) {
+          try {
+            isNMShutdownCalled.wait();
+          } catch (InterruptedException e) {
+          }
         }
       }
-    }
 
-    Assert.assertTrue("NM shutdown not called.",isNMShutdownCalled.get());
-    nm.stop();
+      Assert.assertTrue("NM shutdown not called.", isNMShutdownCalled.get());
+    } finally {
+      nm.stop();
+    }
   }
 
-  @SuppressWarnings("unchecked")
+  @SuppressWarnings("resource")
   @Test(timeout=60000)
   public void testContainerResourceIncreaseIsSynchronizedWithRMResync()
       throws IOException, InterruptedException, YarnException {
@@ -219,28 +221,32 @@ public class TestNodeManagerResync {
     YarnConfiguration conf = createNMConfig();
     conf.setBoolean(
         YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
-    nm.init(conf);
-    nm.start();
-    // Start a container and make sure it is in RUNNING state
-    ((TestNodeManager4)nm).startContainer();
-    // Simulate a container resource increase in a separate thread
-    ((TestNodeManager4)nm).updateContainerResource();
-    // Simulate RM restart by sending a RESYNC event
-    LOG.info("Sending out RESYNC event");
-    nm.getNMDispatcher().getEventHandler().handle(
-        new NodeManagerEvent(NodeManagerEventType.RESYNC));
     try {
-      syncBarrier.await();
-    } catch (BrokenBarrierException e) {
-      e.printStackTrace();
+      nm.init(conf);
+      nm.start();
+      // Start a container and make sure it is in RUNNING state
+      ((TestNodeManager4) nm).startContainer();
+      // Simulate a container resource increase in a separate thread
+      ((TestNodeManager4) nm).updateContainerResource();
+      // Simulate RM restart by sending a RESYNC event
+      LOG.info("Sending out RESYNC event");
+      nm.getNMDispatcher().getEventHandler()
+          .handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
+      try {
+        syncBarrier.await();
+      } catch (BrokenBarrierException e) {
+        e.printStackTrace();
+      }
+      Assert.assertFalse(assertionFailedInThread.get());
+    } finally {
+      nm.stop();
     }
-    Assert.assertFalse(assertionFailedInThread.get());
-    nm.stop();
   }
 
   // This is to test when NM gets the resync response from last heart beat, it
   // should be able to send the already-sent-via-last-heart-beat container
   // statuses again when it re-register with RM.
+  @SuppressWarnings("resource")
   @Test
   public void testNMSentContainerStatusOnResync() throws Exception {
     final ContainerStatus testCompleteContainer =
@@ -323,15 +329,18 @@ public class TestNodeManagerResync {
       }
     };
     YarnConfiguration conf = createNMConfig();
-    nm.init(conf);
-    nm.start();
-
     try {
-      syncBarrier.await();
-    } catch (BrokenBarrierException e) {
+      nm.init(conf);
+      nm.start();
+
+      try {
+        syncBarrier.await();
+      } catch (BrokenBarrierException e) {
+      }
+      Assert.assertFalse(assertionFailedInThread.get());
+    } finally {
+      nm.stop();
     }
-    Assert.assertFalse(assertionFailedInThread.get());
-    nm.stop();
   }
 
   // This can be used as a common base class for testing NM resync behavior.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: YARN-8339. Service AM should localize static/archive resource types to container working directory instead of 'resources'. (Suma Shivaprasad via wangda)

Posted by bo...@apache.org.
YARN-8339. Service AM should localize static/archive resource types to container working directory instead of 'resources'. (Suma Shivaprasad via wangda)

Change-Id: I9f8e8f621650347f6c2f9e3420edee9eb2f356a4


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3061bfcd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3061bfcd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3061bfcd

Branch: refs/heads/YARN-7402
Commit: 3061bfcde53210d2032df3814243498b27a997b7
Parents: 3c75f8e
Author: Wangda Tan <wa...@apache.org>
Authored: Tue May 29 09:23:11 2018 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Tue May 29 09:23:11 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/yarn/service/provider/ProviderUtils.java | 3 +--
 .../apache/hadoop/yarn/service/provider/TestProviderUtils.java | 6 +++---
 2 files changed, 4 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3061bfcd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
index 1ad5fd8..ac90992 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java
@@ -298,8 +298,7 @@ public class ProviderUtils implements YarnServiceConstants {
         destFile = new Path(staticFile.getDestFile());
       }
 
-      String symlink = APP_RESOURCES_DIR + "/" + destFile.getName();
-      addLocalResource(launcher, symlink, localResource, destFile);
+      addLocalResource(launcher, destFile.getName(), localResource, destFile);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3061bfcd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
index 6e8bc43..5d794d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java
@@ -154,11 +154,11 @@ public class TestProviderUtils {
 
     ProviderUtils.handleStaticFilesForLocalization(launcher, sfs,
         compLaunchCtx);
-    Mockito.verify(launcher).addLocalResource(Mockito.eq("resources/destFile1"),
+    Mockito.verify(launcher).addLocalResource(Mockito.eq("destFile1"),
         any(LocalResource.class));
     Mockito.verify(launcher).addLocalResource(
-        Mockito.eq("resources/destFile_2"), any(LocalResource.class));
+        Mockito.eq("destFile_2"), any(LocalResource.class));
     Mockito.verify(launcher).addLocalResource(
-        Mockito.eq("resources/sourceFile4"), any(LocalResource.class));
+        Mockito.eq("sourceFile4"), any(LocalResource.class));
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: HADOOP-15477. Make unjar in RunJar overrideable

Posted by bo...@apache.org.
HADOOP-15477. Make unjar in RunJar overrideable

Signed-off-by: Akira Ajisaka <aa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d14e26b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d14e26b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d14e26b3

Branch: refs/heads/YARN-7402
Commit: d14e26b31fe46fb47a8e99a212c70016fd15a4d9
Parents: 0cf6e87
Author: Johan Gustavsson <jo...@treasure-data.com>
Authored: Mon May 28 17:29:59 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon May 28 17:29:59 2018 +0900

----------------------------------------------------------------------
 .../java/org/apache/hadoop/util/RunJar.java     | 17 ++++++---
 .../java/org/apache/hadoop/util/TestRunJar.java | 37 ++++++++++++++++++--
 .../org/apache/hadoop/streaming/StreamJob.java  |  4 ++-
 3 files changed, 51 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d14e26b3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
index 9dd770c..f1b643c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
@@ -76,7 +76,11 @@ public class RunJar {
    */
   public static final String HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES =
       "HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES";
-
+  /**
+   * Environment key for disabling unjar in client code.
+   */
+  public static final String HADOOP_CLIENT_SKIP_UNJAR =
+      "HADOOP_CLIENT_SKIP_UNJAR";
   /**
    * Buffer size for copy the content of compressed file to new file.
    */
@@ -93,7 +97,7 @@ public class RunJar {
    * @throws IOException if an I/O error has occurred or toDir
    * cannot be created and does not already exist
    */
-  public static void unJar(File jarFile, File toDir) throws IOException {
+  public void unJar(File jarFile, File toDir) throws IOException {
     unJar(jarFile, toDir, MATCH_ANY);
   }
 
@@ -292,8 +296,9 @@ public class RunJar {
           }
         }, SHUTDOWN_HOOK_PRIORITY);
 
-
-    unJar(file, workDir);
+    if (!skipUnjar()) {
+      unJar(file, workDir);
+    }
 
     ClassLoader loader = createClassLoader(file, workDir);
 
@@ -364,6 +369,10 @@ public class RunJar {
     return Boolean.parseBoolean(System.getenv(HADOOP_USE_CLIENT_CLASSLOADER));
   }
 
+  boolean skipUnjar() {
+    return Boolean.parseBoolean(System.getenv(HADOOP_CLIENT_SKIP_UNJAR));
+  }
+
   String getHadoopClasspath() {
     return System.getenv(HADOOP_CLASSPATH);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d14e26b3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
index 19485d6..ea07b97 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
@@ -17,10 +17,14 @@
  */
 package org.apache.hadoop.util;
 
+import static org.apache.hadoop.util.RunJar.MATCH_ANY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
 import java.io.File;
@@ -99,7 +103,7 @@ public class TestRunJar {
 
     // Unjar everything
     RunJar.unJar(new File(TEST_ROOT_DIR, TEST_JAR_NAME),
-                 unjarDir);
+                 unjarDir, MATCH_ANY);
     assertTrue("foobar unpacked",
                new File(unjarDir, TestRunJar.FOOBAR_TXT).exists());
     assertTrue("foobaz unpacked",
@@ -177,7 +181,7 @@ public class TestRunJar {
 
     // Unjar everything
     RunJar.unJar(new File(TEST_ROOT_DIR, TEST_JAR_NAME),
-            unjarDir);
+            unjarDir, MATCH_ANY);
 
     String failureMessage = "Last modify time was lost during unJar";
     assertEquals(failureMessage, MOCKED_NOW, new File(unjarDir, TestRunJar.FOOBAR_TXT).lastModified());
@@ -221,5 +225,34 @@ public class TestRunJar {
     // run RunJar
     runJar.run(args);
     // it should not throw an exception
+    verify(runJar, times(1)).unJar(any(File.class), any(File.class));
+  }
+
+  @Test
+  public void testClientClassLoaderSkipUnjar() throws Throwable {
+    RunJar runJar = spy(new RunJar());
+    // enable the client classloader
+    when(runJar.useClientClassLoader()).thenReturn(true);
+    // set the system classes and blacklist the test main class and the test
+    // third class so they can be loaded by the application classloader
+    String mainCls = ClassLoaderCheckMain.class.getName();
+    String thirdCls = ClassLoaderCheckThird.class.getName();
+    String systemClasses = "-" + mainCls + "," +
+        "-" + thirdCls + "," +
+        ApplicationClassLoader.SYSTEM_CLASSES_DEFAULT;
+    when(runJar.getSystemClasses()).thenReturn(systemClasses);
+
+    // create the test jar
+    File testJar = JarFinder.makeClassLoaderTestJar(this.getClass(),
+        TEST_ROOT_DIR, TEST_JAR_2_NAME, BUFF_SIZE, mainCls, thirdCls);
+    // form the args
+    String[] args = new String[3];
+    args[0] = testJar.getAbsolutePath();
+    args[1] = mainCls;
+    when(runJar.skipUnjar()).thenReturn(true);
+    // run RunJar
+    runJar.run(args);
+    // it should not throw an exception
+    verify(runJar, times(0)).unJar(any(File.class), any(File.class));
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d14e26b3/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
index 9b09729..1fe8710 100644
--- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
+++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
@@ -72,6 +72,8 @@ import org.apache.hadoop.util.RunJar;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 
+import static org.apache.hadoop.util.RunJar.MATCH_ANY;
+
 /** All the client-side work happens here.
  * (Jar packaging, MapRed job submission and monitoring)
  */
@@ -1006,7 +1008,7 @@ public class StreamJob implements Tool {
     if (jar_ != null && isLocalHadoop()) {
       // getAbs became required when shell and subvm have different working dirs...
       File wd = new File(".").getAbsoluteFile();
-      RunJar.unJar(new File(jar_), wd);
+      RunJar.unJar(new File(jar_), wd, MATCH_ANY);
     }
 
     // if jobConf_ changes must recreate a JobClient


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: YARN-8319. More YARN pages need to honor yarn.resourcemanager.display.per-user-apps. Contributed by Sunil G.

Posted by bo...@apache.org.
YARN-8319. More YARN pages need to honor yarn.resourcemanager.display.per-user-apps. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c05b5d42
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c05b5d42
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c05b5d42

Branch: refs/heads/YARN-7402
Commit: c05b5d424b000bab766f57e88a07f2b4e9a56647
Parents: 4cc0c9b
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Thu May 24 14:19:46 2018 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Thu May 24 14:19:46 2018 +0530

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     | 11 +++-
 .../yarn/conf/TestYarnConfigurationFields.java  |  2 +
 .../src/main/resources/yarn-default.xml         |  2 +-
 .../nodemanager/webapp/NMWebServices.java       | 63 +++++++++++++++++-
 .../webapp/TestNMWebServicesApps.java           | 68 +++++++++++++++++---
 .../server/resourcemanager/ClientRMService.java | 10 +--
 .../resourcemanager/webapp/RMWebServices.java   |  8 +--
 .../reader/TimelineReaderWebServices.java       | 33 ++++++++++
 8 files changed, 175 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c05b5d42/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 6d08831..004a59f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -121,6 +121,10 @@ public class YarnConfiguration extends Configuration {
         new DeprecationDelta(RM_ZK_RETRY_INTERVAL_MS,
             CommonConfigurationKeys.ZK_RETRY_INTERVAL_MS),
     });
+    Configuration.addDeprecations(new DeprecationDelta[] {
+        new DeprecationDelta("yarn.resourcemanager.display.per-user-apps",
+            FILTER_ENTITY_LIST_BY_USER)
+    });
   }
 
   //Configurations
@@ -3569,11 +3573,16 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_SCRIPT_BASED_NODE_LABELS_PROVIDER_SCRIPT_OPTS =
       NM_SCRIPT_BASED_NODE_LABELS_PROVIDER_PREFIX + "opts";
 
-  /*
+  /**
    * Support to view apps for given user in secure cluster.
+   * @deprecated This field is deprecated for {@link #FILTER_ENTITY_LIST_BY_USER}
    */
+  @Deprecated
   public static final String DISPLAY_APPS_FOR_LOGGED_IN_USER =
       RM_PREFIX + "display.per-user-apps";
+
+  public static final String FILTER_ENTITY_LIST_BY_USER =
+      "yarn.webapp.filter-entity-list-by-user";
   public static final boolean DEFAULT_DISPLAY_APPS_FOR_LOGGED_IN_USER =
       false;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c05b5d42/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index f4d1ac0..b9ba543 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -182,6 +182,8 @@ public class TestYarnConfigurationFields extends TestConfigurationFieldsBase {
     // Ignore deprecated properties
     configurationPrefixToSkipCompare
         .add(YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS);
+    configurationPrefixToSkipCompare
+        .add(YarnConfiguration.DISPLAY_APPS_FOR_LOGGED_IN_USER);
 
     // Allocate for usage
     xmlPropsToSkipCompare = new HashSet<String>();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c05b5d42/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index da44ccb..c82474c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3529,7 +3529,7 @@
   </property>
  
   <property>
-    <name>yarn.resourcemanager.display.per-user-apps</name>
+    <name>yarn.webapp.filter-entity-list-by-user</name>
     <value>false</value>
       <description>
         Flag to enable display of applications per user as an admin

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c05b5d42/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
index 9157374..b675d5a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
@@ -22,6 +22,7 @@ import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.nio.charset.Charset;
+import java.security.Principal;
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
@@ -54,6 +55,8 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.http.JettyUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -99,6 +102,7 @@ public class NMWebServices {
       .getRecordFactory(null);
   private final String redirectWSUrl;
   private final LogAggregationFileControllerFactory factory;
+  private boolean filterAppsByUser = false;
 
   private @javax.ws.rs.core.Context 
     HttpServletRequest request;
@@ -119,6 +123,15 @@ public class NMWebServices {
         YarnConfiguration.YARN_LOG_SERVER_WEBSERVICE_URL);
     this.factory = new LogAggregationFileControllerFactory(
         this.nmContext.getConf());
+    this.filterAppsByUser = this.nmContext.getConf().getBoolean(
+        YarnConfiguration.FILTER_ENTITY_LIST_BY_USER,
+        YarnConfiguration.DEFAULT_DISPLAY_APPS_FOR_LOGGED_IN_USER);
+  }
+
+  public NMWebServices(final Context nm, final ResourceView view,
+      final WebApp webapp, HttpServletResponse response) {
+    this(nm, view, webapp);
+    this.response = response;
   }
 
   private void init() {
@@ -146,7 +159,8 @@ public class NMWebServices {
   @Path("/apps")
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
-  public AppsInfo getNodeApps(@QueryParam("state") String stateQuery,
+  public AppsInfo getNodeApps(@javax.ws.rs.core.Context HttpServletRequest hsr,
+      @QueryParam("state") String stateQuery,
       @QueryParam("user") String userQuery) {
     init();
     AppsInfo allApps = new AppsInfo();
@@ -169,6 +183,14 @@ public class NMWebServices {
           continue;
         }
       }
+
+      // Allow only application-owner/admin for any type of access on the
+      // application.
+      if (filterAppsByUser
+          && !hasAccess(appInfo.getUser(), entry.getKey(), hsr)) {
+        continue;
+      }
+
       allApps.add(appInfo);
     }
     return allApps;
@@ -205,6 +227,16 @@ public class NMWebServices {
       }
       ContainerInfo info = new ContainerInfo(this.nmContext, entry.getValue(),
           uriInfo.getBaseUri().toString(), webapp.name(), hsr.getRemoteUser());
+
+      ApplicationId appId = entry.getKey().getApplicationAttemptId()
+          .getApplicationId();
+      // Allow only application-owner/admin for any type of access on the
+      // application.
+      if (filterAppsByUser
+          && !hasAccess(entry.getValue().getUser(), appId, hsr)) {
+        continue;
+      }
+
       allContainers.add(info);
     }
     return allContainers;
@@ -553,4 +585,33 @@ public class NMWebServices {
     res.header("Location", redirectPath.toString());
     return res.build();
   }
+
+  protected Boolean hasAccess(String user, ApplicationId appId,
+      HttpServletRequest hsr) {
+    // Check for the authorization.
+    UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
+
+    if (callerUGI != null && !(this.nmContext.getApplicationACLsManager()
+        .checkAccess(callerUGI, ApplicationAccessType.VIEW_APP, user, appId))) {
+      return false;
+    }
+    return true;
+  }
+
+  private UserGroupInformation getCallerUserGroupInformation(
+      HttpServletRequest hsr, boolean usePrincipal) {
+
+    String remoteUser = hsr.getRemoteUser();
+    if (usePrincipal) {
+      Principal princ = hsr.getUserPrincipal();
+      remoteUser = princ == null ? null : princ.getName();
+    }
+
+    UserGroupInformation callerUGI = null;
+    if (remoteUser != null) {
+      callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
+    }
+
+    return callerUGI;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c05b5d42/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java
index 6316282..3533d16 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java
@@ -22,12 +22,17 @@ import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.assertResponseS
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 import java.io.File;
 import java.io.IOException;
 import java.io.StringReader;
+import java.security.Principal;
 import java.util.HashMap;
 
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.core.MediaType;
 import javax.xml.parsers.DocumentBuilder;
 import javax.xml.parsers.DocumentBuilderFactory;
@@ -44,11 +49,13 @@ import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService;
 import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
+import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
 import org.apache.hadoop.yarn.server.nodemanager.ResourceView;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.webapp.WebServer.NMWebApp;
+import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
@@ -192,25 +199,28 @@ public class TestNMWebServicesApps extends JerseyTestBase {
 
   private HashMap<String, String> addAppContainers(Application app) 
       throws IOException {
+    return addAppContainers(app, nmContext);
+  }
+
+  private HashMap<String, String> addAppContainers(Application app,
+      Context context) throws IOException {
     Dispatcher dispatcher = new AsyncDispatcher();
-    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
-        app.getAppId(), 1);
+    ApplicationAttemptId appAttemptId = BuilderUtils
+        .newApplicationAttemptId(app.getAppId(), 1);
     Container container1 = new MockContainer(appAttemptId, dispatcher, conf,
         app.getUser(), app.getAppId(), 1);
     Container container2 = new MockContainer(appAttemptId, dispatcher, conf,
         app.getUser(), app.getAppId(), 2);
-    nmContext.getContainers()
-        .put(container1.getContainerId(), container1);
-    nmContext.getContainers()
-        .put(container2.getContainerId(), container2);
+    context.getContainers().put(container1.getContainerId(), container1);
+    context.getContainers().put(container2.getContainerId(), container2);
 
     app.getContainers().put(container1.getContainerId(), container1);
     app.getContainers().put(container2.getContainerId(), container2);
     HashMap<String, String> hash = new HashMap<String, String>();
-    hash.put(container1.getContainerId().toString(), container1
-        .getContainerId().toString());
-    hash.put(container2.getContainerId().toString(), container2
-        .getContainerId().toString());
+    hash.put(container1.getContainerId().toString(),
+        container1.getContainerId().toString());
+    hash.put(container2.getContainerId().toString(),
+        container2.getContainerId().toString());
     return hash;
   }
 
@@ -721,4 +731,42 @@ public class TestNMWebServicesApps extends JerseyTestBase {
         user);
   }
 
+  @Test
+  public void testNodeAppsUserFiltering() throws JSONException, Exception {
+    Configuration yarnConf = new Configuration();
+    yarnConf.setBoolean(YarnConfiguration.FILTER_ENTITY_LIST_BY_USER, true);
+    yarnConf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
+    yarnConf.setStrings(YarnConfiguration.YARN_ADMIN_ACL, "admin");
+    ApplicationACLsManager aclManager = new ApplicationACLsManager(yarnConf);
+
+    NMContext context = new NodeManager.NMContext(null, null, dirsHandler,
+        aclManager, null, false, yarnConf);
+    Application app = new MockApp(1);
+    context.getApplications().put(app.getAppId(), app);
+    addAppContainers(app, context);
+    Application app2 = new MockApp("foo", 1234, 2);
+    context.getApplications().put(app2.getAppId(), app2);
+    addAppContainers(app2, context);
+
+    // User "foo" could only see its own apps/containers.
+    NMWebServices webSvc = new NMWebServices(context, null, nmWebApp,
+        mock(HttpServletResponse.class));
+    HttpServletRequest mockHsr = mockHttpServletRequestByUserName("foo");
+    AppsInfo appsInfo = webSvc.getNodeApps(mockHsr, null, null);
+    assertEquals(1, appsInfo.getApps().size());
+
+    // Admin could see all apps and containers.
+    HttpServletRequest mockHsrAdmin = mockHttpServletRequestByUserName("admin");
+    AppsInfo appsInfo2 = webSvc.getNodeApps(mockHsrAdmin, null, null);
+    assertEquals(2, appsInfo2.getApps().size());
+  }
+
+  private HttpServletRequest mockHttpServletRequestByUserName(String username) {
+    HttpServletRequest mockHsr = mock(HttpServletRequest.class);
+    when(mockHsr.getRemoteUser()).thenReturn(username);
+    Principal principal = mock(Principal.class);
+    when(principal.getName()).thenReturn(username);
+    when(mockHsr.getUserPrincipal()).thenReturn(principal);
+    return mockHsr;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c05b5d42/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index feaa5cb..e92a3c8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -216,7 +216,7 @@ public class ClientRMService extends AbstractService implements
   private ReservationSystem reservationSystem;
   private ReservationInputValidator rValidator;
 
-  private boolean displayPerUserApps = false;
+  private boolean filterAppsByUser = false;
 
   private static final EnumSet<RMAppState> ACTIVE_APP_STATES = EnumSet.of(
       RMAppState.ACCEPTED, RMAppState.RUNNING);
@@ -283,8 +283,8 @@ public class ClientRMService extends AbstractService implements
       refreshServiceAcls(conf, RMPolicyProvider.getInstance());
     }
 
-    this.displayPerUserApps  = conf.getBoolean(
-        YarnConfiguration.DISPLAY_APPS_FOR_LOGGED_IN_USER,
+    this.filterAppsByUser  = conf.getBoolean(
+        YarnConfiguration.FILTER_ENTITY_LIST_BY_USER,
         YarnConfiguration.DEFAULT_DISPLAY_APPS_FOR_LOGGED_IN_USER);
 
     this.server.start();
@@ -922,7 +922,7 @@ public class ClientRMService extends AbstractService implements
 
       // Given RM is configured to display apps per user, skip apps to which
       // this caller doesn't have access to view.
-      if (displayPerUserApps && !allowAccess) {
+      if (filterAppsByUser && !allowAccess) {
         continue;
       }
 
@@ -1840,6 +1840,6 @@ public class ClientRMService extends AbstractService implements
 
   @VisibleForTesting
   public void setDisplayPerUserApps(boolean displayPerUserApps) {
-    this.displayPerUserApps = displayPerUserApps;
+    this.filterAppsByUser = displayPerUserApps;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c05b5d42/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 69c9562..864653c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -228,7 +228,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
 
   @VisibleForTesting
   boolean isCentralizedNodeLabelConfiguration = true;
-  private boolean displayPerUserApps = false;
+  private boolean filterAppsByUser = false;
 
   public final static String DELEGATION_TOKEN_HEADER =
       "Hadoop-YARN-RM-Delegation-Token";
@@ -241,8 +241,8 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
     this.conf = conf;
     isCentralizedNodeLabelConfiguration =
         YarnConfiguration.isCentralizedNodeLabelConfiguration(conf);
-    this.displayPerUserApps  = conf.getBoolean(
-        YarnConfiguration.DISPLAY_APPS_FOR_LOGGED_IN_USER,
+    this.filterAppsByUser  = conf.getBoolean(
+        YarnConfiguration.FILTER_ENTITY_LIST_BY_USER,
         YarnConfiguration.DEFAULT_DISPLAY_APPS_FOR_LOGGED_IN_USER);
   }
 
@@ -654,7 +654,7 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
       boolean allowAccess = hasAccess(rmapp, hsr);
       // Given RM is configured to display apps per user, skip apps to which
       // this caller doesn't have access to view.
-      if (displayPerUserApps && !allowAccess) {
+      if (filterAppsByUser && !allowAccess) {
         continue;
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c05b5d42/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
index dfe04f9..a671f33 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
@@ -23,6 +23,7 @@ import java.text.ParseException;
 import java.text.SimpleDateFormat;
 import java.util.Collections;
 import java.util.Date;
+import java.util.LinkedHashSet;
 import java.util.Locale;
 import java.util.Set;
 import java.util.TimeZone;
@@ -42,12 +43,15 @@ import javax.ws.rs.core.Response;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.JettyUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout;
+import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.apache.hadoop.yarn.webapp.BadRequestException;
@@ -1450,6 +1454,19 @@ public class TimelineReaderWebServices {
     long endTime = Time.monotonicNow();
     if (entities == null) {
       entities = Collections.emptySet();
+    } else if (isDisplayEntityPerUserFilterEnabled(
+        timelineReaderManager.getConfig())) {
+      Set<TimelineEntity> userEntities = new LinkedHashSet<>();
+      userEntities.addAll(entities);
+      for (TimelineEntity entity : userEntities) {
+        if (entity.getInfo() != null) {
+          String userId =
+              (String) entity.getInfo().get(FlowActivityEntity.USER_INFO_KEY);
+          if (!validateAuthUserWithEntityUser(callerUGI, userId)) {
+            entities.remove(entity);
+          }
+        }
+      }
     }
     LOG.info("Processed URL " + url +
         " (Took " + (endTime - startTime) + " ms.)");
@@ -3403,4 +3420,20 @@ public class TimelineReaderWebServices {
         "Processed URL " + url + " (Took " + (endTime - startTime) + " ms.)");
     return entities;
   }
+
+  private boolean isDisplayEntityPerUserFilterEnabled(Configuration config) {
+    return config
+        .getBoolean(YarnConfiguration.FILTER_ENTITY_LIST_BY_USER, false);
+  }
+
+  private boolean validateAuthUserWithEntityUser(UserGroupInformation ugi,
+      String entityUser) {
+    String authUser = TimelineReaderWebServicesUtils.getUserName(ugi);
+    String requestedUser = TimelineReaderWebServicesUtils.parseStr(entityUser);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(
+          "Authenticated User: " + authUser + " Requested User:" + entityUser);
+    }
+    return authUser.equals(requestedUser);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: HDDS-45. Removal of old OzoneRestClient. Contributed by Lokesh Jain.

Posted by bo...@apache.org.
HDDS-45. Removal of old OzoneRestClient. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/774daa8d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/774daa8d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/774daa8d

Branch: refs/heads/YARN-7402
Commit: 774daa8d532f9eeee1fe8e342a8da2cfa65a8629
Parents: c05b5d4
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Thu May 24 15:53:42 2018 +0530
Committer: Mukul Kumar Singh <ms...@apache.org>
Committed: Thu May 24 15:53:42 2018 +0530

----------------------------------------------------------------------
 .../apache/hadoop/hdds/scm/XceiverClient.java   |  22 +-
 .../hadoop/ozone/web/client/OzoneBucket.java    | 646 ---------------
 .../hadoop/ozone/web/client/OzoneKey.java       |  44 -
 .../ozone/web/client/OzoneRestClient.java       | 804 -------------------
 .../hadoop/ozone/web/client/OzoneVolume.java    | 584 --------------
 .../hadoop/ozone/web/client/package-info.java   |  34 -
 .../hadoop/ozone/MiniOzoneClusterImpl.java      |   3 +-
 .../apache/hadoop/ozone/RatisTestHelper.java    |  14 +-
 .../ozone/web/TestOzoneRestWithMiniCluster.java | 207 ++---
 .../hadoop/ozone/web/client/TestBuckets.java    | 193 +++--
 .../ozone/web/client/TestBucketsRatis.java      |  15 +-
 .../hadoop/ozone/web/client/TestKeys.java       | 286 ++++---
 .../hadoop/ozone/web/client/TestKeysRatis.java  |  29 +-
 .../hadoop/ozone/web/client/TestVolume.java     | 285 +++----
 .../ozone/web/client/TestVolumeRatis.java       |  29 +-
 15 files changed, 548 insertions(+), 2647 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
index 6d33cd4..42e02f9 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java
@@ -54,6 +54,7 @@ public class XceiverClient extends XceiverClientSpi {
   private Bootstrap b;
   private EventLoopGroup group;
   private final Semaphore semaphore;
+  private boolean closed = false;
 
   /**
    * Constructs a client that can communicate with the Container framework on
@@ -74,6 +75,10 @@ public class XceiverClient extends XceiverClientSpi {
 
   @Override
   public void connect() throws Exception {
+    if (closed) {
+      throw new IOException("This channel is not connected.");
+    }
+
     if (channel != null && channel.isActive()) {
       throw new IOException("This client is already connected to a host.");
     }
@@ -97,6 +102,18 @@ public class XceiverClient extends XceiverClientSpi {
     channel = b.connect(leader.getHostName(), port).sync().channel();
   }
 
+  public void reconnect() throws IOException {
+    try {
+      connect();
+      if (channel == null || !channel.isActive()) {
+        throw new IOException("This channel is not connected.");
+      }
+    } catch (Exception e) {
+      LOG.error("Error while connecting: ", e);
+      throw new IOException(e);
+    }
+  }
+
   /**
    * Returns if the exceiver client connects to a server.
    *
@@ -109,6 +126,7 @@ public class XceiverClient extends XceiverClientSpi {
 
   @Override
   public void close() {
+    closed = true;
     if (group != null) {
       group.shutdownGracefully().awaitUninterruptibly();
     }
@@ -124,7 +142,7 @@ public class XceiverClient extends XceiverClientSpi {
       ContainerProtos.ContainerCommandRequestProto request) throws IOException {
     try {
       if ((channel == null) || (!channel.isActive())) {
-        throw new IOException("This channel is not connected.");
+        reconnect();
       }
       XceiverClientHandler handler =
           channel.pipeline().get(XceiverClientHandler.class);
@@ -160,7 +178,7 @@ public class XceiverClient extends XceiverClientSpi {
       sendCommandAsync(ContainerProtos.ContainerCommandRequestProto request)
       throws IOException, ExecutionException, InterruptedException {
     if ((channel == null) || (!channel.isActive())) {
-      throw new IOException("This channel is not connected.");
+      reconnect();
     }
     XceiverClientHandler handler =
         channel.pipeline().get(XceiverClientHandler.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java
deleted file mode 100644
index 3183d03..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java
+++ /dev/null
@@ -1,646 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.client;
-
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.io.IOUtils;
-
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.OzoneClientException;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.client.rest.headers.Header;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.web.response.BucketInfo;
-import org.apache.hadoop.ozone.web.response.KeyInfo;
-import org.apache.hadoop.ozone.web.response.ListKeys;
-
-import static org.apache.hadoop.hdds.server.ServerUtils.releaseConnection;
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.methods.HttpDelete;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.client.methods.HttpPut;
-import org.apache.http.client.utils.URIBuilder;
-import org.apache.http.entity.ContentType;
-import org.apache.http.entity.FileEntity;
-import org.apache.http.entity.InputStreamEntity;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClientBuilder;
-import org.apache.http.util.EntityUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Strings;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.nio.file.Path;
-import java.util.LinkedList;
-import java.util.List;
-
-import static java.net.HttpURLConnection.HTTP_CREATED;
-import static java.net.HttpURLConnection.HTTP_OK;
-import static org.apache.hadoop.ozone.web.utils.OzoneUtils.ENCODING;
-import static org.apache.hadoop.ozone.web.utils.OzoneUtils.ENCODING_NAME;
-
-/**
- * A Bucket class the represents an Ozone Bucket.
- */
-public class OzoneBucket {
-  static final Logger LOG = LoggerFactory.getLogger(OzoneBucket.class);
-
-  private BucketInfo bucketInfo;
-  private OzoneVolume volume;
-
-  /**
-   * Constructor for bucket.
-   *
-   * @param info   - BucketInfo
-   * @param volume - OzoneVolume Object that contains this bucket
-   */
-  public OzoneBucket(BucketInfo info, OzoneVolume volume) {
-    this.bucketInfo = info;
-    this.volume = volume;
-  }
-
-  /**
-   * Gets bucket Info.
-   *
-   * @return BucketInfo
-   */
-  public BucketInfo getBucketInfo() {
-    return bucketInfo;
-  }
-
-  /**
-   * Sets Bucket Info.
-   *
-   * @param bucketInfo BucketInfo
-   */
-  public void setBucketInfo(BucketInfo bucketInfo) {
-    this.bucketInfo = bucketInfo;
-  }
-
-  /**
-   * Returns the parent volume class.
-   *
-   * @return - OzoneVolume
-   */
-  OzoneVolume getVolume() {
-    return volume;
-  }
-
-  /**
-   * Returns bucket name.
-   *
-   * @return Bucket Name
-   */
-  public String getBucketName() {
-    return bucketInfo.getBucketName();
-  }
-
-  /**
-   * Returns the Acls on the bucket.
-   *
-   * @return - Acls
-   */
-  public List<OzoneAcl> getAcls() {
-    return bucketInfo.getAcls();
-  }
-
-  /**
-   * Return versioning info on the bucket - Enabled or disabled.
-   *
-   * @return - Version Enum
-   */
-  public OzoneConsts.Versioning getVersioning() {
-    return bucketInfo.getVersioning();
-  }
-
-  /**
-   * Gets the Storage class for the bucket.
-   *
-   * @return Storage Class Enum
-   */
-  public StorageType getStorageType() {
-    return bucketInfo.getStorageType();
-  }
-
-  /**
-   * Gets the creation time of the bucket.
-   *
-   * @return String
-   */
-  public String getCreatedOn() {
-    return bucketInfo.getCreatedOn();
-  }
-
-  /**
-   * Puts an Object in Ozone bucket.
-   *
-   * @param keyName - Name of the key
-   * @param data    - Data that you want to put
-   * @throws OzoneException
-   */
-  public void putKey(String keyName, String data) throws OzoneException {
-    if ((keyName == null) || keyName.isEmpty()) {
-      throw new OzoneClientException("Invalid key Name.");
-    }
-
-    if (data == null) {
-      throw new OzoneClientException("Invalid data.");
-    }
-
-    HttpPut putRequest = null;
-    InputStream is = null;
-    try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) {
-      URIBuilder builder = new URIBuilder(volume.getClient().getEndPointURI());
-      builder.setPath("/" + getVolume().getVolumeName() + "/" + getBucketName()
-          + "/" + keyName).build();
-
-      putRequest = getVolume().getClient().getHttpPut(builder.toString());
-
-      is = new ByteArrayInputStream(data.getBytes(ENCODING));
-      putRequest.setEntity(new InputStreamEntity(is, data.length()));
-      is.mark(data.length());
-      try {
-        putRequest.setHeader(Header.CONTENT_MD5, DigestUtils.md5Hex(is));
-      } finally {
-        is.reset();
-      }
-      executePutKey(putRequest, httpClient);
-    } catch (IOException | URISyntaxException ex) {
-      throw new OzoneClientException(ex.getMessage(), ex);
-    } finally {
-      IOUtils.closeStream(is);
-      releaseConnection(putRequest);
-    }
-  }
-
-  /**
-   * Puts an Object in Ozone Bucket.
-   *
-   * @param dataFile - File from which you want the data to be put. Key Name
-   *                 will same as the file name, devoid of any path.
-   * @throws OzoneException
-   */
-  public void putKey(File dataFile) throws OzoneException {
-    if (dataFile == null) {
-      throw new OzoneClientException("Invalid file object.");
-    }
-    String keyName = dataFile.getName();
-    putKey(keyName, dataFile);
-  }
-
-  /**
-   * Puts a Key in Ozone Bucket.
-   *
-   * @param keyName - Name of the Key
-   * @param file    - Stream that gets read to be put into Ozone.
-   * @throws OzoneException
-   */
-  public void putKey(String keyName, File file)
-      throws OzoneException {
-
-    if ((keyName == null) || keyName.isEmpty()) {
-      throw new OzoneClientException("Invalid key Name");
-    }
-
-    if (file == null) {
-      throw new OzoneClientException("Invalid data stream");
-    }
-
-    HttpPut putRequest = null;
-    FileInputStream fis = null;
-    try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) {
-      URIBuilder builder = new URIBuilder(volume.getClient().getEndPointURI());
-      builder.setPath("/" + getVolume().getVolumeName() + "/" + getBucketName()
-          + "/" + keyName).build();
-
-      putRequest = getVolume().getClient().getHttpPut(builder.toString());
-
-      FileEntity fileEntity = new FileEntity(file, ContentType
-          .APPLICATION_OCTET_STREAM);
-      putRequest.setEntity(fileEntity);
-
-      fis = new FileInputStream(file);
-      putRequest.setHeader(Header.CONTENT_MD5, DigestUtils.md5Hex(fis));
-      executePutKey(putRequest, httpClient);
-
-    } catch (IOException | URISyntaxException ex) {
-      final OzoneClientException orce = new OzoneClientException(
-          "Failed to putKey: keyName=" + keyName + ", file=" + file);
-      orce.initCause(ex);
-      LOG.trace("", orce);
-      throw orce;
-    } finally {
-      IOUtils.closeStream(fis);
-      releaseConnection(putRequest);
-    }
-  }
-
-  /**
-   * executePutKey executes the Put request against the Ozone Server.
-   *
-   * @param putRequest - Http Put Request
-   * @param httpClient - httpClient
-   * @throws OzoneException
-   * @throws IOException
-   */
-  public static void executePutKey(HttpPut putRequest,
-      CloseableHttpClient httpClient) throws OzoneException, IOException {
-    HttpEntity entity = null;
-    try {
-      HttpResponse response = httpClient.execute(putRequest);
-      int errorCode = response.getStatusLine().getStatusCode();
-      entity = response.getEntity();
-
-      if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) {
-        return;
-      }
-
-      if (entity == null) {
-        throw new OzoneClientException("Unexpected null in http payload");
-      }
-
-      throw OzoneException.parse(EntityUtils.toString(entity));
-    } finally {
-      if (entity != null) {
-        EntityUtils.consumeQuietly(entity);
-      }
-    }
-  }
-
-  /**
-   * Gets a key from the Ozone server and writes to the file pointed by the
-   * downloadTo PAth.
-   *
-   * @param keyName    - Key Name in Ozone.
-   * @param downloadTo File Name to download the Key's Data to
-   */
-  public void getKey(String keyName, Path downloadTo) throws OzoneException {
-
-    if ((keyName == null) || keyName.isEmpty()) {
-      throw new OzoneClientException("Invalid key Name");
-    }
-
-    if (downloadTo == null) {
-      throw new OzoneClientException("Invalid download path");
-    }
-
-    FileOutputStream outPutFile = null;
-    HttpGet getRequest = null;
-    try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) {
-      outPutFile = new FileOutputStream(downloadTo.toFile());
-
-      URIBuilder builder = new URIBuilder(volume.getClient().getEndPointURI());
-      builder.setPath("/" + getVolume().getVolumeName() + "/" + getBucketName()
-          + "/" + keyName).build();
-
-      getRequest = getVolume().getClient().getHttpGet(builder.toString());
-      executeGetKey(getRequest, httpClient, outPutFile);
-      outPutFile.flush();
-    } catch (IOException | URISyntaxException ex) {
-      throw new OzoneClientException(ex.getMessage(), ex);
-    } finally {
-      IOUtils.closeStream(outPutFile);
-      releaseConnection(getRequest);
-    }
-  }
-
-  /**
-   * Returns the data part of the key as a string.
-   *
-   * @param keyName - KeyName to get
-   * @return String - Data
-   * @throws OzoneException
-   */
-  public String getKey(String keyName) throws OzoneException {
-
-    if ((keyName == null) || keyName.isEmpty()) {
-      throw new OzoneClientException("Invalid key Name");
-    }
-
-    HttpGet getRequest = null;
-    ByteArrayOutputStream outPutStream = null;
-    try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) {
-      outPutStream = new ByteArrayOutputStream();
-
-      URIBuilder builder = new URIBuilder(volume.getClient().getEndPointURI());
-
-      builder.setPath("/" + getVolume().getVolumeName() + "/" + getBucketName()
-          + "/" + keyName).build();
-
-      getRequest = getVolume().getClient().getHttpGet(builder.toString());
-      executeGetKey(getRequest, httpClient, outPutStream);
-      return outPutStream.toString(ENCODING_NAME);
-    } catch (IOException | URISyntaxException ex) {
-      throw new OzoneClientException(ex.getMessage(), ex);
-    } finally {
-      IOUtils.closeStream(outPutStream);
-      releaseConnection(getRequest);
-    }
-
-  }
-
-  /**
-   * Executes get key and returns the data.
-   *
-   * @param getRequest - http Get Request
-   * @param httpClient - Client
-   * @param stream     - Stream to write data to.
-   * @throws IOException
-   * @throws OzoneException
-   */
-  public static void executeGetKey(HttpGet getRequest,
-      CloseableHttpClient httpClient, OutputStream stream)
-      throws IOException, OzoneException {
-
-    HttpEntity entity = null;
-    try {
-
-      HttpResponse response = httpClient.execute(getRequest);
-      int errorCode = response.getStatusLine().getStatusCode();
-      entity = response.getEntity();
-
-      if (errorCode == HTTP_OK) {
-        entity.writeTo(stream);
-        return;
-      }
-
-      if (entity == null) {
-        throw new OzoneClientException("Unexpected null in http payload");
-      }
-
-      throw OzoneException.parse(EntityUtils.toString(entity));
-    } finally {
-      if (entity != null) {
-        EntityUtils.consumeQuietly(entity);
-      }
-    }
-  }
-
-  /**
-   * Deletes a key in this bucket.
-   *
-   * @param keyName - Name of the Key
-   * @throws OzoneException
-   */
-  public void deleteKey(String keyName) throws OzoneException {
-
-    if ((keyName == null) || keyName.isEmpty()) {
-      throw new OzoneClientException("Invalid key Name");
-    }
-
-    HttpDelete deleteRequest = null;
-    try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) {
-      URIBuilder builder = new URIBuilder(volume.getClient().getEndPointURI());
-      builder.setPath("/" + getVolume().getVolumeName() + "/" + getBucketName()
-          + "/" + keyName).build();
-
-      deleteRequest = getVolume()
-          .getClient().getHttpDelete(builder.toString());
-      executeDeleteKey(deleteRequest, httpClient);
-    } catch (IOException | URISyntaxException ex) {
-      throw new OzoneClientException(ex.getMessage(), ex);
-    } finally {
-      releaseConnection(deleteRequest);
-    }
-  }
-
-  /**
-   * Executes deleteKey.
-   *
-   * @param deleteRequest - http Delete Request
-   * @param httpClient    - Client
-   * @throws IOException
-   * @throws OzoneException
-   */
-  private void executeDeleteKey(HttpDelete deleteRequest,
-      CloseableHttpClient httpClient)
-      throws IOException, OzoneException {
-
-    HttpEntity entity = null;
-    try {
-
-      HttpResponse response = httpClient.execute(deleteRequest);
-      int errorCode = response.getStatusLine().getStatusCode();
-      entity = response.getEntity();
-
-      if (errorCode == HTTP_OK) {
-        return;
-      }
-
-      if (entity == null) {
-        throw new OzoneClientException("Unexpected null in http payload");
-      }
-
-      throw OzoneException.parse(EntityUtils.toString(entity));
-    } finally {
-      if (entity != null) {
-        EntityUtils.consumeQuietly(entity);
-      }
-    }
-  }
-
-  /**
-   * List all keys in a bucket.
-   *
-   * @param resultLength The max length of listing result.
-   * @param previousKey The key from where listing should start,
-   *                    this key is excluded in the result.
-   * @param prefix The prefix that return list keys start with.
-   * @return List of OzoneKeys
-   * @throws OzoneException
-   */
-  public List<OzoneKey> listKeys(String resultLength, String previousKey,
-      String prefix) throws OzoneException {
-    HttpGet getRequest = null;
-    try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) {
-      OzoneRestClient client = getVolume().getClient();
-      URIBuilder builder = new URIBuilder(volume.getClient().getEndPointURI());
-      builder.setPath("/" + getVolume().getVolumeName() + "/" + getBucketName())
-          .build();
-
-      if (!Strings.isNullOrEmpty(resultLength)) {
-        builder.addParameter(Header.OZONE_LIST_QUERY_MAXKEYS, resultLength);
-      }
-
-      if (!Strings.isNullOrEmpty(previousKey)) {
-        builder.addParameter(Header.OZONE_LIST_QUERY_PREVKEY, previousKey);
-      }
-
-      if (!Strings.isNullOrEmpty(prefix)) {
-        builder.addParameter(Header.OZONE_LIST_QUERY_PREFIX, prefix);
-      }
-
-      final String uri = builder.toString();
-      getRequest = client.getHttpGet(uri);
-      LOG.trace("listKeys URI={}", uri);
-      return executeListKeys(getRequest, httpClient);
-
-    } catch (IOException | URISyntaxException e) {
-      throw new OzoneClientException(e.getMessage(), e);
-    } finally {
-      releaseConnection(getRequest);
-    }
-  }
-
-  /**
-   * List keys in a bucket with the provided prefix, with paging results.
-   *
-   * @param prefix The prefix of the object keys
-   * @param maxResult max size per response
-   * @param prevKey the previous key for paging
-   */
-  public List<OzoneKey> listKeys(String prefix, int maxResult, String prevKey)
-      throws OzoneException {
-    HttpGet getRequest = null;
-    try {
-      final URI uri =  new URIBuilder(volume.getClient().getEndPointURI())
-          .setPath(OzoneConsts.KSM_KEY_PREFIX + getVolume().getVolumeName() +
-              OzoneConsts.KSM_KEY_PREFIX + getBucketName())
-          .setParameter(Header.OZONE_LIST_QUERY_PREFIX, prefix)
-          .setParameter(Header.OZONE_LIST_QUERY_MAXKEYS,
-              String.valueOf(maxResult))
-          .setParameter(Header.OZONE_LIST_QUERY_PREVKEY, prevKey)
-          .build();
-      final OzoneRestClient client = getVolume().getClient();
-      getRequest = client.getHttpGet(uri.toString());
-      return executeListKeys(getRequest, HttpClientBuilder.create().build());
-    } catch (IOException | URISyntaxException e) {
-      throw new OzoneClientException(e.getMessage());
-    } finally {
-      releaseConnection(getRequest);
-    }
-  }
-
-  /**
-   * Execute list Key.
-   *
-   * @param getRequest - HttpGet
-   * @param httpClient - HttpClient
-   * @return List<OzoneKey>
-   * @throws IOException
-   * @throws OzoneException
-   */
-  public static List<OzoneKey> executeListKeys(HttpGet getRequest,
-      CloseableHttpClient httpClient) throws IOException, OzoneException {
-    HttpEntity entity = null;
-    List<OzoneKey> ozoneKeyList = new LinkedList<OzoneKey>();
-    try {
-      HttpResponse response = httpClient.execute(getRequest);
-      int errorCode = response.getStatusLine().getStatusCode();
-
-      entity = response.getEntity();
-
-      if (entity == null) {
-        throw new OzoneClientException("Unexpected null in http payload");
-      }
-      if (errorCode == HTTP_OK) {
-        String temp = EntityUtils.toString(entity);
-        ListKeys keyList = ListKeys.parse(temp);
-
-        for (KeyInfo info : keyList.getKeyList()) {
-          ozoneKeyList.add(new OzoneKey(info));
-        }
-        return ozoneKeyList;
-
-      } else {
-        throw OzoneException.parse(EntityUtils.toString(entity));
-      }
-    } finally {
-      if (entity != null) {
-        EntityUtils.consumeQuietly(entity);
-      }
-    }
-  }
-
-  /**
-   * Get info of the specified key.
-   */
-  public OzoneKey getKeyInfo(String keyName) throws OzoneException {
-    if ((keyName == null) || keyName.isEmpty()) {
-      throw new OzoneClientException(
-          "Unable to get key info, key name is null or empty");
-    }
-
-    HttpGet getRequest = null;
-    try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) {
-      OzoneRestClient client = getVolume().getClient();
-      URIBuilder builder = new URIBuilder(volume.getClient().getEndPointURI());
-      builder
-          .setPath("/" + getVolume().getVolumeName() + "/" + getBucketName()
-              + "/" + keyName)
-          .setParameter(Header.OZONE_INFO_QUERY_TAG,
-              Header.OZONE_INFO_QUERY_KEY)
-          .build();
-
-      getRequest = client.getHttpGet(builder.toString());
-      return executeGetKeyInfo(getRequest, httpClient);
-    } catch (IOException | URISyntaxException e) {
-      throw new OzoneClientException(e.getMessage(), e);
-    } finally {
-      releaseConnection(getRequest);
-    }
-  }
-
-  /**
-   * Execute get Key info.
-   *
-   * @param getRequest - HttpGet
-   * @param httpClient - HttpClient
-   * @return List<OzoneKey>
-   * @throws IOException
-   * @throws OzoneException
-   */
-  private OzoneKey executeGetKeyInfo(HttpGet getRequest,
-      CloseableHttpClient httpClient) throws IOException, OzoneException {
-    HttpEntity entity = null;
-    try {
-      HttpResponse response = httpClient.execute(getRequest);
-      int errorCode = response.getStatusLine().getStatusCode();
-      entity = response.getEntity();
-      if (entity == null) {
-        throw new OzoneClientException("Unexpected null in http payload");
-      }
-
-      if (errorCode == HTTP_OK) {
-        OzoneKey key = new OzoneKey(
-            KeyInfo.parse(EntityUtils.toString(entity)));
-        return key;
-      }
-      throw OzoneException.parse(EntityUtils.toString(entity));
-    } finally {
-      if (entity != null) {
-        EntityUtils.consumeQuietly(entity);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneKey.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneKey.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneKey.java
deleted file mode 100644
index 5a3a0c4..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneKey.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.client;
-
-import org.apache.hadoop.ozone.web.response.KeyInfo;
-
-/**
- * Client side representation of an ozone Key.
- */
-public class OzoneKey {
-  private KeyInfo keyInfo;
-
-  /**
-   * Constructor for Ozone Key.
-   * @param keyInfo - Key Info
-   */
-  public OzoneKey(KeyInfo keyInfo) {
-    this.keyInfo = keyInfo;
-  }
-
-  /**
-   * Returns Key Info.
-   * @return Object Info
-   */
-  public KeyInfo getObjectInfo() {
-    return keyInfo;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
deleted file mode 100644
index 8373f67..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
+++ /dev/null
@@ -1,804 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.client;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Strings;
-
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.client.OzoneClientException;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.client.rest.headers.Header;
-import org.apache.hadoop.ozone.web.response.ListVolumes;
-import org.apache.hadoop.ozone.web.response.VolumeInfo;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.util.Time;
-
-import static org.apache.hadoop.hdds.server.ServerUtils.releaseConnection;
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.methods.HttpDelete;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.client.methods.HttpPost;
-import org.apache.http.client.methods.HttpPut;
-import org.apache.http.client.methods.HttpRequestBase;
-import org.apache.http.client.utils.URIBuilder;
-import org.apache.http.entity.ContentType;
-import org.apache.http.entity.FileEntity;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.util.EntityUtils;
-
-import javax.ws.rs.core.HttpHeaders;
-import java.io.Closeable;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.nio.file.Path;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Locale;
-
-import static java.net.HttpURLConnection.HTTP_CREATED;
-import static java.net.HttpURLConnection.HTTP_OK;
-
-/**
- * Ozone client that connects to an Ozone server. Please note that this class is
- * not  thread safe.
- */
-public class OzoneRestClient implements Closeable {
-  private URI endPointURI;
-  private String userAuth;
-
-  /**
-   * Constructor for OzoneRestClient.
-   */
-  public OzoneRestClient() {
-  }
-
-  /**
-   * Constructor for OzoneRestClient.
-   */
-  public OzoneRestClient(String ozoneURI)
-      throws OzoneException, URISyntaxException {
-    setEndPoint(ozoneURI);
-  }
-
-  /**
-   * Constructor for OzoneRestClient.
-   */
-  public OzoneRestClient(String ozoneURI, String userAuth)
-      throws OzoneException, URISyntaxException {
-    setEndPoint(ozoneURI);
-    setUserAuth(userAuth);
-  }
-
-  /**
-   * Returns the end Point.
-   *
-   * @return String
-   */
-  public URI getEndPointURI() {
-    return endPointURI;
-  }
-
-  /**
-   * Sets the End Point info using an URI.
-   *
-   * @param endPointURI - URI
-   * @throws OzoneException
-   */
-  public void setEndPointURI(URI endPointURI) throws OzoneException {
-    if ((endPointURI == null) || (endPointURI.toString().isEmpty())) {
-      throw new OzoneClientException("Invalid ozone URI");
-    }
-    this.endPointURI = endPointURI;
-  }
-
-  /**
-   * Set endPoint.
-   *
-   * @param clusterFQDN - cluster FQDN.
-   */
-  public void setEndPoint(String clusterFQDN) throws
-      OzoneException, URISyntaxException {
-    setEndPointURI(new URI(clusterFQDN));
-  }
-
-  /**
-   * Get user Auth String.
-   *
-   * @return - User Auth String
-   */
-  public String getUserAuth() {
-    return this.userAuth;
-  }
-
-  /**
-   * Set User Auth.
-   *
-   * @param userAuth - User Auth String
-   */
-  public void setUserAuth(String userAuth) {
-    this.userAuth = userAuth;
-  }
-
-  /**
-   * create volume.
-   *
-   * @param volumeName - volume name 3 - 63 chars, small letters.
-   * @param onBehalfOf - The user on behalf we are making the call for
-   * @param quota      - Quota's are specified in a specific format. it is
-   *                   integer(MB|GB|TB), for example 100TB.
-   * @throws OzoneClientException
-   */
-  public OzoneVolume createVolume(String volumeName, String onBehalfOf,
-                                  String quota) throws OzoneException {
-    HttpPost httpPost = null;
-    try (CloseableHttpClient httpClient = newHttpClient()) {
-      OzoneUtils.verifyResourceName(volumeName);
-
-      URIBuilder builder = new URIBuilder(endPointURI);
-      builder.setPath("/" + volumeName);
-      if (quota != null) {
-        builder.setParameter(Header.OZONE_QUOTA_QUERY_TAG, quota);
-      }
-
-      httpPost = getHttpPost(onBehalfOf, builder.build().toString());
-      executeCreateVolume(httpPost, httpClient);
-      return getVolume(volumeName);
-    } catch (IOException | URISyntaxException | IllegalArgumentException ex) {
-      throw new OzoneClientException(ex.getMessage(), ex);
-    } finally {
-      releaseConnection(httpPost);
-    }
-  }
-
-  /**
-   * Returns information about an existing Volume. if the Volume does not exist,
-   * or if the user does not have access rights OzoneException is thrown
-   *
-   * @param volumeName - volume name 3 - 63 chars, small letters.
-   * @return OzoneVolume Ozone Client Volume Class.
-   * @throws OzoneException
-   */
-  public OzoneVolume getVolume(String volumeName) throws OzoneException {
-    HttpGet httpGet = null;
-    try (CloseableHttpClient httpClient = newHttpClient()) {
-      OzoneUtils.verifyResourceName(volumeName);
-      URIBuilder builder = new URIBuilder(endPointURI);
-      builder.setPath("/" + volumeName)
-          .setParameter(Header.OZONE_INFO_QUERY_TAG,
-              Header.OZONE_INFO_QUERY_VOLUME)
-          .build();
-
-      httpGet = getHttpGet(builder.toString());
-      return executeInfoVolume(httpGet, httpClient);
-    } catch (IOException | URISyntaxException | IllegalArgumentException ex) {
-      throw new OzoneClientException(ex.getMessage(), ex);
-    } finally {
-      releaseConnection(httpGet);
-    }
-  }
-
-  /**
-   * List all the volumes owned by the user or Owned by the user specified in
-   * the behalf of string.
-   *
-   * @param onBehalfOf
-   *  User Name of the user if it is not the caller. for example,
-   *  an admin wants to list some other users volumes.
-   * @param prefix
-   *   Return only volumes that match this prefix.
-   * @param maxKeys
-   *   Maximum number of results to return, if the result set
-   *   is smaller than requested size, it means that list is
-   *   complete.
-   * @param previousVolume
-   *   The previous volume name.
-   * @return List of Volumes
-   * @throws OzoneException
-   */
-  public List<OzoneVolume> listVolumes(String onBehalfOf, String prefix,
-      int maxKeys, String previousVolume) throws OzoneException {
-    HttpGet httpGet = null;
-    try (CloseableHttpClient httpClient = newHttpClient()) {
-      URIBuilder builder = new URIBuilder(endPointURI);
-      if (!Strings.isNullOrEmpty(prefix)) {
-        builder.addParameter(Header.OZONE_LIST_QUERY_PREFIX, prefix);
-      }
-
-      if (maxKeys > 0) {
-        builder.addParameter(Header.OZONE_LIST_QUERY_MAXKEYS, Integer
-            .toString(maxKeys));
-      }
-
-      if (!Strings.isNullOrEmpty(previousVolume)) {
-        builder.addParameter(Header.OZONE_LIST_QUERY_PREVKEY,
-            previousVolume);
-      }
-
-      builder.setPath("/").build();
-
-      httpGet = getHttpGet(builder.toString());
-      if (onBehalfOf != null) {
-        httpGet.addHeader(Header.OZONE_USER, onBehalfOf);
-      }
-      return executeListVolume(httpGet, httpClient);
-    } catch (IOException | URISyntaxException ex) {
-      throw new OzoneClientException(ex.getMessage(), ex);
-    } finally {
-      releaseConnection(httpGet);
-    }
-  }
-
-  /**
-   * List all the volumes owned by the user or Owned by the user specified in
-   * the behalf of string.
-   *
-   * @param onBehalfOf - User Name of the user if it is not the caller. for
-   *                   example, an admin wants to list some other users
-   *                   volumes.
-   * @param prefix     - Return only volumes that match this prefix.
-   * @param maxKeys    - Maximum number of results to return, if the result set
-   *                   is smaller than requested size, it means that list is
-   *                   complete.
-   * @param prevKey    - The last key that client got, server will continue
-   *                   returning results from that point.
-   * @return List of Volumes
-   * @throws OzoneException
-   */
-  public List<OzoneVolume> listVolumes(String onBehalfOf, String prefix,
-      int maxKeys, OzoneVolume prevKey) throws OzoneException {
-    String volumeName = null;
-
-    if (prevKey != null) {
-      volumeName = prevKey.getVolumeName();
-    }
-
-    return listVolumes(onBehalfOf, prefix, maxKeys, volumeName);
-  }
-
-  /**
-   * List volumes of the current user or if onBehalfof is not null lists volume
-   * owned by that user. You need admin privilege to read other users volume
-   * lists.
-   *
-   * @param onBehalfOf - Name of the user you want to get volume list
-   * @return - Volume list.
-   * @throws OzoneException
-   */
-  public List<OzoneVolume> listVolumes(String onBehalfOf)
-      throws OzoneException {
-    return listVolumes(onBehalfOf, null,
-        Integer.parseInt(Header.OZONE_DEFAULT_LIST_SIZE), StringUtils.EMPTY);
-  }
-
-  /**
-   * List all volumes in a cluster. This can be invoked only by an Admin.
-   *
-   * @param prefix  - Returns only volumes that match this prefix.
-   * @param maxKeys - Maximum niumber of keys to return
-   * @param prevKey - Last Ozone Volume from the last Iteration.
-   * @return List of Volumes
-   * @throws OzoneException
-   */
-  public List<OzoneVolume> listAllVolumes(String prefix, int maxKeys,
-      OzoneVolume prevKey) throws OzoneException {
-    HttpGet httpGet = null;
-    try (CloseableHttpClient httpClient = newHttpClient()) {
-      URIBuilder builder = new URIBuilder(endPointURI);
-      if (prefix != null) {
-        builder.addParameter(Header.OZONE_LIST_QUERY_PREFIX, prefix);
-      }
-
-      if (maxKeys > 0) {
-        builder.addParameter(Header.OZONE_LIST_QUERY_MAXKEYS, Integer
-            .toString(maxKeys));
-      }
-
-      if (prevKey != null) {
-        builder.addParameter(Header.OZONE_LIST_QUERY_PREVKEY,
-            prevKey.getOwnerName()+ "/" + prevKey.getVolumeName());
-      }
-
-      builder.addParameter(Header.OZONE_LIST_QUERY_ROOTSCAN, "true");
-      builder.setPath("/").build();
-      httpGet = getHttpGet(builder.toString());
-      return executeListVolume(httpGet, httpClient);
-
-    } catch (IOException | URISyntaxException ex) {
-      throw new OzoneClientException(ex.getMessage(), ex);
-    } finally {
-      releaseConnection(httpGet);
-    }
-  }
-
-    /**
-     * delete a given volume.
-     *
-     * @param volumeName - volume to be deleted.
-     * @throws OzoneException - Ozone Exception
-     */
-  public void deleteVolume(String volumeName) throws OzoneException {
-    HttpDelete httpDelete = null;
-    try (CloseableHttpClient httpClient = newHttpClient()) {
-      OzoneUtils.verifyResourceName(volumeName);
-      URIBuilder builder = new URIBuilder(endPointURI);
-      builder.setPath("/" + volumeName).build();
-
-      httpDelete = getHttpDelete(builder.toString());
-      executeDeleteVolume(httpDelete, httpClient);
-    } catch (IOException | URISyntaxException | IllegalArgumentException ex) {
-      throw new OzoneClientException(ex.getMessage(), ex);
-    } finally {
-      releaseConnection(httpDelete);
-    }
-  }
-
-  /**
-   * Sets the Volume Owner.
-   *
-   * @param volumeName - Volume Name
-   * @param newOwner   - New Owner Name
-   * @throws OzoneException
-   */
-  public void setVolumeOwner(String volumeName, String newOwner)
-      throws OzoneException {
-    HttpPut putRequest = null;
-    if (newOwner == null || newOwner.isEmpty()) {
-      throw new OzoneClientException("Invalid new owner name");
-    }
-    try (CloseableHttpClient httpClient = newHttpClient()) {
-      OzoneUtils.verifyResourceName(volumeName);
-      URIBuilder builder = new URIBuilder(endPointURI);
-      builder.setPath("/" + volumeName).build();
-
-      putRequest = getHttpPut(builder.toString());
-      putRequest.addHeader(Header.OZONE_USER, newOwner);
-      executePutVolume(putRequest, httpClient);
-
-    } catch (URISyntaxException | IllegalArgumentException | IOException ex) {
-      throw new OzoneClientException(ex.getMessage(), ex);
-    } finally {
-      releaseConnection(putRequest);
-    }
-  }
-
-  /**
-   * Sets the Volume Quota. Quota's are specified in a specific format. it is
-   * <integer>|(MB|GB|TB. for example 100TB.
-   * <p>
-   * To Remove a quota you can specify Header.OZONE_QUOTA_REMOVE
-   *
-   * @param volumeName - volume name
-   * @param quota      - Quota String or  Header.OZONE_QUOTA_REMOVE
-   * @throws OzoneException
-   */
-  public void setVolumeQuota(String volumeName, String quota)
-      throws OzoneException {
-    if (quota == null || quota.isEmpty()) {
-      throw new OzoneClientException("Invalid quota");
-    }
-    HttpPut putRequest = null;
-    try (CloseableHttpClient httpClient = newHttpClient()) {
-      OzoneUtils.verifyResourceName(volumeName);
-      URIBuilder builder = new URIBuilder(endPointURI);
-      builder.setPath("/" + volumeName)
-          .setParameter(Header.OZONE_QUOTA_QUERY_TAG, quota)
-          .build();
-
-      putRequest = getHttpPut(builder.toString());
-      executePutVolume(putRequest, httpClient);
-
-    } catch (URISyntaxException | IllegalArgumentException | IOException ex) {
-      throw new OzoneClientException(ex.getMessage(), ex);
-    } finally {
-      releaseConnection(putRequest);
-    }
-  }
-
-  /**
-   * Sends the create Volume request to the server.
-   *
-   * @param httppost   - http post class
-   * @param httpClient - httpClient
-   * @throws IOException    -
-   * @throws OzoneException
-   */
-  private void executeCreateVolume(HttpPost httppost,
-      final CloseableHttpClient httpClient)
-      throws IOException, OzoneException {
-    HttpEntity entity = null;
-    try {
-      HttpResponse response = httpClient.execute(httppost);
-      int errorCode = response.getStatusLine().getStatusCode();
-      entity = response.getEntity();
-
-      if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) {
-        return;
-      }
-
-      if (entity != null) {
-        throw OzoneException.parse(EntityUtils.toString(entity));
-      } else {
-        throw new OzoneClientException("Unexpected null in http payload");
-      }
-    } finally {
-      if (entity != null) {
-        EntityUtils.consume(entity);
-      }
-    }
-  }
-
-  /**
-   * Sends the create Volume request to the server.
-   *
-   * @param httpGet - httpGet
-   * @return OzoneVolume
-   * @throws IOException    -
-   * @throws OzoneException
-   */
-  private OzoneVolume executeInfoVolume(HttpGet httpGet,
-      final CloseableHttpClient httpClient)
-      throws IOException, OzoneException {
-    HttpEntity entity = null;
-    try {
-      HttpResponse response = httpClient.execute(httpGet);
-      int errorCode = response.getStatusLine().getStatusCode();
-
-      entity = response.getEntity();
-      if (entity == null) {
-        throw new OzoneClientException("Unexpected null in http payload");
-      }
-
-      if (errorCode == HTTP_OK) {
-        OzoneVolume volume = new OzoneVolume(this);
-        volume.setVolumeInfo(EntityUtils.toString(entity));
-        return volume;
-      } else {
-        throw OzoneException.parse(EntityUtils.toString(entity));
-      }
-    } finally {
-      if (entity != null) {
-        EntityUtils.consumeQuietly(entity);
-      }
-    }
-  }
-
-  /**
-   * Sends update volume requests to the server.
-   *
-   * @param putRequest http request
-   * @throws IOException
-   * @throws OzoneException
-   */
-  private void executePutVolume(HttpPut putRequest,
-      final CloseableHttpClient httpClient)
-      throws IOException, OzoneException {
-    HttpEntity entity = null;
-    try {
-      HttpResponse response = httpClient.execute(putRequest);
-      int errorCode = response.getStatusLine().getStatusCode();
-      entity = response.getEntity();
-      if (errorCode != HTTP_OK) {
-        throw OzoneException.parse(EntityUtils.toString(entity));
-      }
-    } finally {
-      if (entity != null) {
-        EntityUtils.consume(entity);
-      }
-    }
-  }
-
-  /**
-   * List Volumes.
-   *
-   * @param httpGet - httpGet
-   * @return OzoneVolume
-   * @throws IOException    -
-   * @throws OzoneException
-   */
-  private List<OzoneVolume> executeListVolume(HttpGet httpGet,
-      final CloseableHttpClient httpClient)
-      throws IOException, OzoneException {
-    HttpEntity entity = null;
-    List<OzoneVolume> volList = new LinkedList<>();
-    try {
-      HttpResponse response = httpClient.execute(httpGet);
-      int errorCode = response.getStatusLine().getStatusCode();
-      entity = response.getEntity();
-
-      if (entity == null) {
-        throw new OzoneClientException("Unexpected null in http payload");
-      }
-
-      String temp = EntityUtils.toString(entity);
-      if (errorCode == HTTP_OK) {
-        ListVolumes listVolumes =
-            ListVolumes.parse(temp);
-
-        for (VolumeInfo info : listVolumes.getVolumes()) {
-          volList.add(new OzoneVolume(info, this));
-        }
-        return volList;
-
-      } else {
-        throw OzoneException.parse(EntityUtils.toString(entity));
-      }
-    } finally {
-      if (entity != null) {
-        EntityUtils.consumeQuietly(entity);
-      }
-    }
-  }
-
-  /**
-   * Delete Volume.
-   *
-   * @param httpDelete - Http Delete Request
-   * @throws IOException
-   * @throws OzoneException
-   */
-  private void executeDeleteVolume(HttpDelete httpDelete,
-      final CloseableHttpClient httpClient)
-      throws IOException, OzoneException {
-    HttpEntity entity = null;
-    try {
-      HttpResponse response = httpClient.execute(httpDelete);
-      int errorCode = response.getStatusLine().getStatusCode();
-      entity = response.getEntity();
-
-      if (errorCode != HTTP_OK) {
-        throw OzoneException.parse(EntityUtils.toString(entity));
-      }
-    } finally {
-      if (entity != null) {
-        EntityUtils.consumeQuietly(entity);
-      }
-    }
-  }
-
-  /**
-   * Puts a Key in Ozone Bucket.
-   *
-   * @param volumeName - Name of the Volume
-   * @param bucketName - Name of the Bucket
-   * @param keyName - Name of the Key
-   * @param file    - Stream that gets read to be put into Ozone.
-   * @throws OzoneException
-   */
-  public void putKey(String volumeName, String bucketName, String keyName,
-      File file) throws OzoneException {
-    OzoneUtils.verifyResourceName(volumeName);
-    OzoneUtils.verifyResourceName(bucketName);
-
-    if (StringUtils.isEmpty(keyName)) {
-      throw new OzoneClientException("Invalid key Name");
-    }
-
-    if (file == null) {
-      throw new OzoneClientException("Invalid data stream");
-    }
-
-    HttpPut putRequest = null;
-    FileInputStream fis = null;
-    try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) {
-      URIBuilder builder = new URIBuilder(getEndPointURI());
-      builder.setPath("/" + volumeName + "/" + bucketName + "/" + keyName)
-          .build();
-
-      putRequest = getHttpPut(builder.toString());
-
-      FileEntity fileEntity = new FileEntity(file, ContentType
-          .APPLICATION_OCTET_STREAM);
-      putRequest.setEntity(fileEntity);
-
-      fis = new FileInputStream(file);
-      putRequest.setHeader(Header.CONTENT_MD5, DigestUtils.md5Hex(fis));
-      OzoneBucket.executePutKey(putRequest, httpClient);
-    } catch (IOException | URISyntaxException ex) {
-      throw new OzoneClientException(ex.getMessage(), ex);
-    } finally {
-      IOUtils.closeStream(fis);
-      releaseConnection(putRequest);
-    }
-  }
-
-  /**
-   * Gets a key from the Ozone server and writes to the file pointed by the
-   * downloadTo Path.
-   *
-   * @param volumeName - Volume Name in Ozone.
-   * @param bucketName - Bucket Name in Ozone.
-   * @param keyName - Key Name in Ozone.
-   * @param downloadTo File Name to download the Key's Data to
-   */
-  public void getKey(String volumeName, String bucketName, String keyName,
-      Path downloadTo) throws OzoneException {
-    OzoneUtils.verifyResourceName(volumeName);
-    OzoneUtils.verifyResourceName(bucketName);
-
-    if (StringUtils.isEmpty(keyName)) {
-      throw new OzoneClientException("Invalid key Name");
-    }
-
-    if (downloadTo == null) {
-      throw new OzoneClientException("Invalid download path");
-    }
-
-    FileOutputStream outPutFile = null;
-    HttpGet getRequest = null;
-    try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) {
-      outPutFile = new FileOutputStream(downloadTo.toFile());
-
-      URIBuilder builder = new URIBuilder(getEndPointURI());
-      builder.setPath("/" + volumeName + "/" + bucketName + "/" + keyName)
-          .build();
-
-      getRequest = getHttpGet(builder.toString());
-      OzoneBucket.executeGetKey(getRequest, httpClient, outPutFile);
-      outPutFile.flush();
-    } catch (IOException | URISyntaxException ex) {
-      throw new OzoneClientException(ex.getMessage(), ex);
-    } finally {
-      IOUtils.closeStream(outPutFile);
-      releaseConnection(getRequest);
-    }
-  }
-
-  /**
-   * List all keys in the given bucket.
-   *
-   * @param volumeName - Volume name
-   * @param bucketName - Bucket name
-   * @param resultLength The max length of listing result.
-   * @param previousKey The key from where listing should start,
-   *                    this key is excluded in the result.
-   * @param prefix The prefix that return list keys start with.
-   *
-   * @return List of OzoneKeys
-   */
-  public List<OzoneKey> listKeys(String volumeName, String bucketName,
-      String resultLength, String previousKey, String prefix)
-      throws OzoneException {
-    OzoneUtils.verifyResourceName(volumeName);
-    OzoneUtils.verifyResourceName(bucketName);
-
-    HttpGet getRequest = null;
-    try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) {
-      URIBuilder builder = new URIBuilder(getEndPointURI());
-      builder.setPath("/" + volumeName + "/" + bucketName).build();
-
-      if (!Strings.isNullOrEmpty(resultLength)) {
-        builder.addParameter(Header.OZONE_LIST_QUERY_MAXKEYS, resultLength);
-      }
-
-      if (!Strings.isNullOrEmpty(previousKey)) {
-        builder.addParameter(Header.OZONE_LIST_QUERY_PREVKEY, previousKey);
-      }
-
-      if (!Strings.isNullOrEmpty(prefix)) {
-        builder.addParameter(Header.OZONE_LIST_QUERY_PREFIX, prefix);
-      }
-
-      getRequest = getHttpGet(builder.toString());
-      return OzoneBucket.executeListKeys(getRequest, httpClient);
-    } catch (IOException | URISyntaxException e) {
-      throw new OzoneClientException(e.getMessage(), e);
-    } finally {
-      releaseConnection(getRequest);
-    }
-  }
-
-  /**
-   * Returns a standard HttpPost Object to use for ozone post requests.
-   *
-   * @param onBehalfOf - If the use is being made on behalf of user, that user
-   * @param uriString  - UriString
-   * @return HttpPost
-   */
-  public HttpPost getHttpPost(String onBehalfOf, String uriString) {
-    HttpPost httpPost = new HttpPost(uriString);
-    addOzoneHeaders(httpPost);
-    if (onBehalfOf != null) {
-      httpPost.addHeader(Header.OZONE_USER, onBehalfOf);
-    }
-    return httpPost;
-  }
-
-  /**
-   * Returns a standard HttpGet Object to use for ozone Get requests.
-   *
-   * @param uriString - The full Uri String
-   * @return HttpGet
-   */
-  public HttpGet getHttpGet(String uriString) {
-    HttpGet httpGet = new HttpGet(uriString);
-    addOzoneHeaders(httpGet);
-    return httpGet;
-  }
-
-  /**
-   * Returns httpDelete.
-   *
-   * @param uriString - uri
-   * @return HttpDelete
-   */
-  public HttpDelete getHttpDelete(String uriString) {
-    HttpDelete httpDel = new HttpDelete(uriString);
-    addOzoneHeaders(httpDel);
-    return httpDel;
-  }
-
-  /**
-   * returns an HttpPut Object.
-   *
-   * @param uriString - Uri
-   * @return HttpPut
-   */
-  public HttpPut getHttpPut(String uriString) {
-    HttpPut httpPut = new HttpPut(uriString);
-    addOzoneHeaders(httpPut);
-    return httpPut;
-  }
-
-  /**
-   * Add Ozone Headers.
-   *
-   * @param httpRequest - Http Request
-   */
-  private void addOzoneHeaders(HttpRequestBase httpRequest) {
-    SimpleDateFormat format =
-        new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
-
-    httpRequest.addHeader(Header.OZONE_VERSION_HEADER,
-        Header.OZONE_V1_VERSION_HEADER);
-    httpRequest.addHeader(HttpHeaders.DATE,
-        format.format(new Date(Time.monotonicNow())));
-    if (getUserAuth() != null) {
-      httpRequest.addHeader(HttpHeaders.AUTHORIZATION,
-          Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
-              getUserAuth());
-    }
-  }
-
-  /**
-   * Closes this stream and releases any system resources associated with it. If
-   * the stream is already closed then invoking this method has no effect.
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override
-  public void close() throws IOException {
-    // TODO : Currently we create a new HTTP client. We should switch
-    // This to a Pool and cleanup the pool here.
-  }
-
-  @VisibleForTesting
-  public CloseableHttpClient newHttpClient() {
-    return HddsClientUtils.newHttpClient();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneVolume.java
deleted file mode 100644
index 9d3831c..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneVolume.java
+++ /dev/null
@@ -1,584 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.client;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Strings;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.ozone.client.OzoneClientException;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.client.rest.headers.Header;
-import org.apache.hadoop.ozone.web.request.OzoneQuota;
-import org.apache.hadoop.ozone.web.response.BucketInfo;
-import org.apache.hadoop.ozone.web.response.ListBuckets;
-import org.apache.hadoop.ozone.web.response.VolumeInfo;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-
-import static org.apache.hadoop.hdds.server.ServerUtils.releaseConnection;
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.methods.HttpDelete;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.client.methods.HttpPost;
-import org.apache.http.client.methods.HttpPut;
-import org.apache.http.client.utils.URIBuilder;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.util.EntityUtils;
-
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-import static java.net.HttpURLConnection.HTTP_CREATED;
-import static java.net.HttpURLConnection.HTTP_OK;
-
-/**
- * Ozone Volume Class.
- */
-public class OzoneVolume {
-  private VolumeInfo volumeInfo;
-  private Map<String, String> headerMap;
-  private final OzoneRestClient client;
-
-  /**
-   * Constructor for OzoneVolume.
-   */
-  public OzoneVolume(OzoneRestClient client) {
-    this.client = client;
-    this.headerMap = new HashMap<>();
-  }
-
-  /**
-   * Constructor for OzoneVolume.
-   *
-   * @param volInfo - volume Info.
-   * @param client  Client
-   */
-  public OzoneVolume(VolumeInfo volInfo, OzoneRestClient client) {
-    this.volumeInfo = volInfo;
-    this.client = client;
-  }
-
-  /**
-   * Returns a Json String of this class.
-   * @return String
-   * @throws IOException
-   */
-  public String getJsonString() throws IOException {
-    return volumeInfo.toJsonString();
-  }
-
-  /**
-   * sets the Volume Info.
-   *
-   * @param volInfoString - Volume Info String
-   */
-  public void setVolumeInfo(String volInfoString) throws IOException {
-    this.volumeInfo = VolumeInfo.parse(volInfoString);
-  }
-
-  /**
-   * @return the volume info.
-   */
-  public VolumeInfo getVolumeInfo() {
-    return this.volumeInfo;
-  }
-
-  /**
-   * Returns volume Name.
-   *
-   * @return Volume Name.
-   */
-  public String getVolumeName() {
-    return this.volumeInfo.getVolumeName();
-  }
-
-  /**
-   * Get created by.
-   *
-   * @return String
-   */
-  public String getCreatedby() {
-    return this.volumeInfo.getCreatedBy();
-  }
-
-  /**
-   * returns the Owner name.
-   *
-   * @return String
-   */
-  public String getOwnerName() {
-    return this.volumeInfo.getOwner().getName();
-  }
-
-  /**
-   * Returns Quota Info.
-   *
-   * @return Quota
-   */
-  public OzoneQuota getQuota() {
-    return volumeInfo.getQuota();
-  }
-
-  /**
-   * Returns creation time of Volume.
-   *
-   * @return String
-   */
-  public String getCreatedOn() {
-    return volumeInfo.getCreatedOn();
-  }
-
-  /**
-   * Returns a Http header from the Last Volume related call.
-   *
-   * @param headerName - Name of the header
-   * @return - Header Value
-   */
-  public String getHeader(String headerName) {
-    return headerMap.get(headerName);
-  }
-
-  /**
-   * Gets the Client, this is used by Bucket and Key Classes.
-   *
-   * @return - Ozone Client
-   */
-  OzoneRestClient getClient() {
-    return client;
-  }
-
-  /**
-   * Create Bucket - Creates a bucket under a given volume.
-   *
-   * @param bucketName - Bucket Name
-   * @param acls - Acls - User Acls
-   * @param storageType - Storage Class
-   * @param versioning - enable versioning support on a bucket.
-   *
-   *
-   * @return - a Ozone Bucket Object
-   */
-  public OzoneBucket createBucket(String bucketName, String[] acls,
-                                  StorageType storageType,
-                                  OzoneConsts.Versioning versioning)
-      throws OzoneException {
-
-    HttpPost httpPost = null;
-    try (CloseableHttpClient httpClient = newHttpClient()) {
-      OzoneUtils.verifyResourceName(bucketName);
-      URIBuilder builder = new URIBuilder(getClient().getEndPointURI());
-      builder.setPath("/" + getVolumeName() + "/" + bucketName).build();
-
-      httpPost = client.getHttpPost(null, builder.toString());
-      if (acls != null) {
-        for (String acl : acls) {
-          httpPost
-              .addHeader(Header.OZONE_ACLS, Header.OZONE_ACL_ADD + " " + acl);
-        }
-      }
-
-      httpPost.addHeader(Header.OZONE_STORAGE_TYPE, storageType.toString());
-      httpPost.addHeader(Header.OZONE_BUCKET_VERSIONING, versioning.toString());
-      executeCreateBucket(httpPost, httpClient);
-      return getBucket(bucketName);
-    } catch (IOException | URISyntaxException | IllegalArgumentException ex) {
-      throw new OzoneClientException(ex.getMessage(), ex);
-    } finally {
-      releaseConnection(httpPost);
-    }
-  }
-
-  /**
-   * Create Bucket.
-   *
-   * @param bucketName - bucket name
-   * @param acls - acls
-   * @param storageType - storage class
-   *
-   * @throws OzoneException
-   */
-  public OzoneBucket createBucket(String bucketName, String[] acls,
-                                  StorageType storageType)
-      throws OzoneException {
-    return createBucket(bucketName, acls, storageType,
-        OzoneConsts.Versioning.DISABLED);
-  }
-
-  /**
-   * Create Bucket.
-   *
-   * @param bucketName - bucket name
-   * @param acls - acls
-   *
-   * @throws OzoneException
-   */
-  public OzoneBucket createBucket(String bucketName, String[] acls)
-      throws OzoneException {
-    return createBucket(bucketName, acls, StorageType.DEFAULT,
-        OzoneConsts.Versioning.DISABLED);
-  }
-
-
-  /**
-   * Create Bucket.
-   *
-   * @param bucketName - bucket name
-   *
-   * @throws OzoneException
-   */
-  public OzoneBucket createBucket(String bucketName) throws OzoneException {
-    return createBucket(bucketName, null,  StorageType.DEFAULT,
-        OzoneConsts.Versioning.DISABLED);
-  }
-
-
-  /**
-   * execute a Create Bucket Request against Ozone server.
-   *
-   * @param httppost - httpPost
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  private void executeCreateBucket(HttpPost httppost,
-      CloseableHttpClient httpClient)
-      throws IOException, OzoneException {
-    HttpEntity entity = null;
-    try {
-      HttpResponse response = httpClient.execute(httppost);
-      int errorCode = response.getStatusLine().getStatusCode();
-      entity = response.getEntity();
-      if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) {
-        return;
-      }
-
-      if (entity != null) {
-        throw OzoneException.parse(EntityUtils.toString(entity));
-      } else {
-        throw new OzoneClientException("Unexpected null in http payload");
-      }
-    } finally {
-      if (entity != null) {
-        EntityUtils.consumeQuietly(entity);
-      }
-    }
-  }
-
-  /**
-   * Adds Acls to an existing bucket.
-   *
-   * @param bucketName - Name of the bucket
-   * @param acls - Acls
-   *
-   * @throws OzoneException
-   */
-  public void addAcls(String bucketName, String[] acls) throws OzoneException {
-    HttpPut putRequest = null;
-    try (CloseableHttpClient httpClient = newHttpClient()) {
-      OzoneUtils.verifyResourceName(bucketName);
-      URIBuilder builder = new URIBuilder(getClient().getEndPointURI());
-      builder.setPath("/" + getVolumeName() + "/" + bucketName).build();
-      putRequest = client.getHttpPut(builder.toString());
-
-      for (String acl : acls) {
-        putRequest
-            .addHeader(Header.OZONE_ACLS, Header.OZONE_ACL_ADD + " " + acl);
-      }
-      executePutBucket(putRequest, httpClient);
-    } catch (URISyntaxException | IOException ex) {
-      throw new OzoneClientException(ex.getMessage(), ex);
-    } finally {
-      releaseConnection(putRequest);
-    }
-  }
-
-  /**
-   * Removes ACLs from a bucket.
-   *
-   * @param bucketName - Bucket Name
-   * @param acls - Acls to be removed
-   *
-   * @throws OzoneException
-   */
-  public void removeAcls(String bucketName, String[] acls)
-      throws OzoneException {
-    HttpPut putRequest = null;
-    try (CloseableHttpClient httpClient = newHttpClient()) {
-      OzoneUtils.verifyResourceName(bucketName);
-      URIBuilder builder = new URIBuilder(getClient().getEndPointURI());
-      builder.setPath("/" + getVolumeName() + "/" + bucketName).build();
-      putRequest = client.getHttpPut(builder.toString());
-
-      for (String acl : acls) {
-        putRequest
-            .addHeader(Header.OZONE_ACLS, Header.OZONE_ACL_REMOVE + " " + acl);
-      }
-      executePutBucket(putRequest, httpClient);
-    } catch (URISyntaxException | IOException ex) {
-      throw new OzoneClientException(ex.getMessage(), ex);
-    } finally {
-      releaseConnection(putRequest);
-    }
-  }
-
-  /**
-   * Returns information about an existing bucket.
-   *
-   * @param bucketName - BucketName
-   *
-   * @return OZoneBucket
-   */
-  public OzoneBucket getBucket(String bucketName) throws OzoneException {
-    HttpGet getRequest = null;
-    try (CloseableHttpClient httpClient = newHttpClient()) {
-      OzoneUtils.verifyResourceName(bucketName);
-      URIBuilder builder = new URIBuilder(getClient().getEndPointURI());
-      builder.setPath("/" + getVolumeName() + "/" + bucketName)
-        .setParameter(Header.OZONE_INFO_QUERY_TAG,
-            Header.OZONE_INFO_QUERY_BUCKET).build();
-      getRequest = client.getHttpGet(builder.toString());
-      return executeInfoBucket(getRequest, httpClient);
-
-    } catch (IOException | URISyntaxException | IllegalArgumentException ex) {
-      throw new OzoneClientException(ex.getMessage(), ex);
-    } finally {
-      releaseConnection(getRequest);
-    }
-  }
-
-
-  /**
-   * Execute the info bucket call.
-   *
-   * @param getRequest - httpGet Request
-   * @param httpClient - Http Client
-   *
-   * @return OzoneBucket
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  private OzoneBucket executeInfoBucket(HttpGet getRequest,
-      CloseableHttpClient httpClient)
-      throws IOException, OzoneException {
-    HttpEntity entity = null;
-    try {
-      HttpResponse response = httpClient.execute(getRequest);
-      int errorCode = response.getStatusLine().getStatusCode();
-      entity = response.getEntity();
-      if (entity == null) {
-        throw new OzoneClientException("Unexpected null in http payload");
-      }
-      if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) {
-        OzoneBucket bucket =
-            new OzoneBucket(BucketInfo.parse(EntityUtils.toString(entity)),
-                this);
-        return bucket;
-      }
-      throw OzoneException.parse(EntityUtils.toString(entity));
-    } finally {
-      if (entity != null) {
-        EntityUtils.consumeQuietly(entity);
-      }
-    }
-  }
-
-  /**
-   * Execute the put bucket call.
-   *
-   * @param putRequest - http put request
-   * @param httpClient - Http Client
-   *
-   * @return OzoneBucket
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  private void executePutBucket(HttpPut putRequest,
-      CloseableHttpClient httpClient)
-      throws IOException, OzoneException {
-    HttpEntity entity = null;
-    try {
-      HttpResponse response = httpClient.execute(putRequest);
-      int errorCode = response.getStatusLine().getStatusCode();
-      entity = response.getEntity();
-
-      if (errorCode == HTTP_OK) {
-        return;
-      }
-
-      if (entity != null) {
-        throw OzoneException.parse(EntityUtils.toString(entity));
-      }
-
-      throw new OzoneClientException("Unexpected null in http result");
-    } finally {
-      if (entity != null) {
-        EntityUtils.consumeQuietly(entity);
-      }
-    }
-  }
-
-  /**
-   * Gets a list of buckets on this volume.
-   *
-   * @return - List of buckets
-   *
-   * @throws OzoneException
-   */
-  public List<OzoneBucket> listBuckets(String resultLength,
-      String previousBucket, String prefix) throws OzoneException {
-    HttpGet getRequest = null;
-    try (CloseableHttpClient httpClient = newHttpClient()) {
-      URIBuilder builder = new URIBuilder(getClient().getEndPointURI());
-      builder.setPath("/" + getVolumeName()).build();
-      if (!Strings.isNullOrEmpty(resultLength)) {
-        builder.addParameter(Header.OZONE_LIST_QUERY_MAXKEYS, resultLength);
-      }
-      if (!Strings.isNullOrEmpty(previousBucket)) {
-        builder.addParameter(Header.OZONE_LIST_QUERY_PREVKEY, previousBucket);
-      }
-      if (!Strings.isNullOrEmpty(prefix)) {
-        builder.addParameter(Header.OZONE_LIST_QUERY_PREFIX, prefix);
-      }
-
-      getRequest = client.getHttpGet(builder.toString());
-      return executeListBuckets(getRequest, httpClient);
-
-    } catch (IOException | URISyntaxException e) {
-      throw new OzoneClientException(e.getMessage(), e);
-    } finally {
-      releaseConnection(getRequest);
-    }
-  }
-
-  /**
-   * executes the List Bucket Call.
-   *
-   * @param getRequest - http Request
-   * @param httpClient - http Client
-   *
-   * @return List of OzoneBuckets
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  private List<OzoneBucket> executeListBuckets(HttpGet getRequest,
-      CloseableHttpClient httpClient)
-      throws IOException, OzoneException {
-    HttpEntity entity = null;
-    List<OzoneBucket> ozoneBucketList = new LinkedList<OzoneBucket>();
-    try {
-      HttpResponse response = httpClient.execute(getRequest);
-      int errorCode = response.getStatusLine().getStatusCode();
-
-      entity = response.getEntity();
-
-      if (entity == null) {
-        throw new OzoneClientException("Unexpected null in http payload");
-      }
-      if (errorCode == HTTP_OK) {
-        ListBuckets bucketList =
-            ListBuckets.parse(EntityUtils.toString(entity));
-
-        for (BucketInfo info : bucketList.getBuckets()) {
-          ozoneBucketList.add(new OzoneBucket(info, this));
-        }
-        return ozoneBucketList;
-
-      } else {
-        throw OzoneException.parse(EntityUtils.toString(entity));
-      }
-    } finally {
-      if (entity != null) {
-        EntityUtils.consumeQuietly(entity);
-      }
-    }
-  }
-
-  /**
-   * Delete an empty bucket.
-   *
-   * @param bucketName - Name of the bucket to delete
-   *
-   * @throws OzoneException
-   */
-  public void deleteBucket(String bucketName) throws OzoneException {
-    HttpDelete delRequest = null;
-    try (CloseableHttpClient httpClient = newHttpClient()) {
-      OzoneUtils.verifyResourceName(bucketName);
-      URIBuilder builder = new URIBuilder(getClient().getEndPointURI());
-      builder.setPath("/" + getVolumeName() + "/" + bucketName).build();
-
-      delRequest = client.getHttpDelete(builder.toString());
-      executeDeleteBucket(delRequest, httpClient);
-
-    } catch (IOException | URISyntaxException | IllegalArgumentException ex) {
-      throw new OzoneClientException(ex.getMessage(), ex);
-    } finally {
-      releaseConnection(delRequest);
-    }
-  }
-
-  /**
-   * Executes delete bucket call.
-   *
-   * @param delRequest - Delete Request
-   * @param httpClient - Http Client
-7   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  private void executeDeleteBucket(HttpDelete delRequest,
-      CloseableHttpClient httpClient)
-      throws IOException, OzoneException {
-    HttpEntity entity = null;
-    try {
-      HttpResponse response = httpClient.execute(delRequest);
-      int errorCode = response.getStatusLine().getStatusCode();
-      entity = response.getEntity();
-
-      if (errorCode == HTTP_OK) {
-        return;
-      }
-
-      if (entity == null) {
-        throw new OzoneClientException("Unexpected null in http payload.");
-      }
-
-      throw OzoneException.parse(EntityUtils.toString(entity));
-
-    } finally {
-      if (entity != null) {
-        EntityUtils.consumeQuietly(entity);
-      }
-    }
-  }
-
-  @VisibleForTesting
-  public CloseableHttpClient newHttpClient() {
-    return HddsClientUtils.newHttpClient();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/package-info.java
deleted file mode 100644
index 046568b..0000000
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/package-info.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-
-/**
- * Ozone client library is a java client for the Ozone
- * Object Store.
- */
-package org.apache.hadoop.ozone.web.client;
-
-/**
- This library is  a simple Ozone REST Library.
-
- This library is a very *minimal* client written for tests and
- command line utils that work against Ozone. It does not have
- things like thread-pools and support for extended security models yet.
-
- OzoneClients return OzoneVolumes and OzoneVolumes return OzoneBuckets.
- **/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 9936815..ad8b016 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
 import org.apache.hadoop.ozone.ksm.KeySpaceManager;
 import org.apache.hadoop.hdds.scm.server.SCMStorage;
 import org.apache.hadoop.ozone.ksm.KSMStorage;
-import org.apache.hadoop.ozone.web.client.OzoneRestClient;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.protocolPB
     .StorageContainerLocationProtocolClientSideTranslatorPB;
@@ -167,7 +166,7 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
   }
 
   /**
-   * Creates an {@link OzoneRestClient} connected to this cluster's REST
+   * Creates an {@link OzoneClient} connected to this cluster's REST
    * service. Callers take ownership of the client and must close it when done.
    *
    * @return OzoneRestClient connected to this cluster's REST service

http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
index 9aefe9a..1a35c50 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
@@ -20,8 +20,9 @@ package org.apache.hadoop.ozone;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
+import org.apache.hadoop.ozone.client.rpc.RpcClient;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.web.client.OzoneRestClient;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.ratis.rpc.RpcType;
 import org.apache.ratis.rpc.SupportedRpcType;
@@ -65,9 +66,9 @@ public interface RatisTestHelper {
       return cluster;
     }
 
-    public OzoneRestClient newOzoneRestClient()
-        throws OzoneException, URISyntaxException {
-      return RatisTestHelper.newOzoneRestClient(getDatanodeOzoneRestPort());
+    public ClientProtocol newOzoneClient()
+        throws OzoneException, URISyntaxException, IOException {
+      return new RpcClient(conf);
     }
 
     @Override
@@ -102,9 +103,4 @@ public interface RatisTestHelper {
         .setNumDatanodes(numDatanodes).build();
     return cluster;
   }
-
-  static OzoneRestClient newOzoneRestClient(int port)
-      throws OzoneException, URISyntaxException {
-    return new OzoneRestClient("http://localhost:" + port);
-  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: YARN-8338. TimelineService V1.5 doesn't come up after HADOOP-15406. Contributed by Vinod Kumar Vavilapalli

Posted by bo...@apache.org.
YARN-8338. TimelineService V1.5 doesn't come up after HADOOP-15406. Contributed by Vinod Kumar Vavilapalli


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31ab960f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31ab960f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31ab960f

Branch: refs/heads/YARN-7402
Commit: 31ab960f4f931df273481927b897388895d803ba
Parents: 438ef49
Author: Jason Lowe <jl...@apache.org>
Authored: Tue May 29 11:00:30 2018 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Tue May 29 11:00:30 2018 -0500

----------------------------------------------------------------------
 hadoop-project/pom.xml                                          | 5 +++++
 .../hadoop-yarn-server-applicationhistoryservice/pom.xml        | 5 +++++
 2 files changed, 10 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ab960f/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 73c3f5b..59a9bd2 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1144,6 +1144,11 @@
         <version>1.8.5</version>
       </dependency>
       <dependency>
+        <groupId>org.objenesis</groupId>
+        <artifactId>objenesis</artifactId>
+        <version>1.0</version>
+      </dependency>
+      <dependency>
         <groupId>org.mock-server</groupId>
         <artifactId>mockserver-netty</artifactId>
         <version>3.9.2</version>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ab960f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
index f310518..0527095 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
@@ -155,6 +155,11 @@
       <artifactId>leveldbjni-all</artifactId>
     </dependency>
 
+    <dependency>
+      <groupId>org.objenesis</groupId>
+      <artifactId>objenesis</artifactId>
+    </dependency>
+
     <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
     <dependency>
       <groupId>org.apache.hadoop</groupId>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: HDDS-125. Cleanup HDDS CheckStyle issues. Contributed by Anu Engineer.

Posted by bo...@apache.org.
HDDS-125. Cleanup HDDS CheckStyle issues.
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9502b47b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9502b47b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9502b47b

Branch: refs/heads/YARN-7402
Commit: 9502b47bd2a3cf32edae635293169883c2914475
Parents: 17aa40f
Author: Anu Engineer <ae...@apache.org>
Authored: Tue May 29 09:54:06 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Tue May 29 09:54:06 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdds/scm/block/BlockManagerImpl.java |  1 -
 .../hdds/scm/block/DeletedBlockLogImpl.java     |  2 +-
 .../hdds/scm/container/ContainerMapping.java    |  6 +-
 .../scm/container/ContainerStateManager.java    | 24 +++----
 .../hadoop/hdds/scm/container/Mapping.java      |  9 ++-
 .../hdds/scm/node/SCMNodeStorageStatMXBean.java |  4 +-
 .../hdds/scm/node/SCMNodeStorageStatMap.java    | 19 +++---
 .../hdds/scm/node/StorageReportResult.java      |  8 +--
 .../hdds/scm/node/states/Node2ContainerMap.java |  2 +-
 .../hdds/scm/pipelines/PipelineSelector.java    |  5 +-
 .../scm/server/StorageContainerManager.java     |  3 +-
 .../TestStorageContainerManagerHttpServer.java  |  1 -
 .../hadoop/hdds/scm/block/package-info.java     | 23 +++++++
 .../scm/container/TestContainerMapping.java     | 12 ++--
 .../hdds/scm/container/closer/package-info.java | 22 +++++++
 .../hadoop/hdds/scm/container/package-info.java | 22 +++++++
 .../hdds/scm/container/states/package-info.java | 22 +++++++
 .../hadoop/hdds/scm/node/TestNodeManager.java   | 66 ++++++++++----------
 .../scm/node/TestSCMNodeStorageStatMap.java     | 32 +++++-----
 .../hadoop/hdds/scm/node/package-info.java      | 22 +++++++
 .../ozone/container/common/TestEndPoint.java    |  2 -
 .../ozone/container/common/package-info.java    | 22 +++++++
 .../ozone/container/placement/package-info.java | 22 +++++++
 .../replication/TestContainerSupervisor.java    |  7 ++-
 24 files changed, 263 insertions(+), 95 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index 5a98e85..d17d6c0 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -41,7 +41,6 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
-import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index cabcb46..cedc506 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -190,7 +190,7 @@ public class DeletedBlockLogImpl implements DeletedBlockLog {
     try {
       for(Long txID : txIDs) {
         try {
-          byte [] deleteBlockBytes =
+          byte[] deleteBlockBytes =
               deletedStore.get(Longs.toByteArray(txID));
           if (deleteBlockBytes == null) {
             LOG.warn("Delete txID {} not found", txID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index e569874..2d88621 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
@@ -152,7 +152,8 @@ public class ContainerMapping implements Mapping {
     ContainerInfo containerInfo;
     lock.lock();
     try {
-      byte[] containerBytes = containerStore.get(Longs.toByteArray(containerID));
+      byte[] containerBytes = containerStore.get(
+          Longs.toByteArray(containerID));
       if (containerBytes == null) {
         throw new SCMException(
             "Specified key does not exist. key : " + containerID,
@@ -229,7 +230,8 @@ public class ContainerMapping implements Mapping {
           containerStateManager.allocateContainer(
               pipelineSelector, type, replicationFactor, owner);
 
-      byte[] containerIDBytes = Longs.toByteArray(containerInfo.getContainerID());
+      byte[] containerIDBytes = Longs.toByteArray(
+          containerInfo.getContainerID());
       containerStore.put(containerIDBytes, containerInfo.getProtobuf()
               .toByteArray());
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
index f11a50c..4895b78 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
@@ -230,18 +230,18 @@ public class ContainerStateManager implements Closeable {
    *
    * Container State Flow:
    *
-   * [ALLOCATED]------->[CREATING]--------->[OPEN]---------->[CLOSING]------->[CLOSED]
-   *            (CREATE)     |    (CREATED)       (FINALIZE)          (CLOSE)    |
-   *                         |                                                   |
-   *                         |                                                   |
-   *                         |(TIMEOUT)                                  (DELETE)|
-   *                         |                                                   |
-   *                         +------------------> [DELETING] <-------------------+
-   *                                                   |
-   *                                                   |
-   *                                          (CLEANUP)|
-   *                                                   |
-   *                                               [DELETED]
+   * [ALLOCATED]---->[CREATING]------>[OPEN]-------->[CLOSING]------->[CLOSED]
+   *            (CREATE)     |    (CREATED)       (FINALIZE)     (CLOSE)    |
+   *                         |                                              |
+   *                         |                                              |
+   *                         |(TIMEOUT)                             (DELETE)|
+   *                         |                                              |
+   *                         +-------------> [DELETING] <-------------------+
+   *                                            |
+   *                                            |
+   *                                   (CLEANUP)|
+   *                                            |
+   *                                        [DELETED]
    */
   private void initializeStateMachine() {
     stateMachine.addTransition(LifeCycleState.ALLOCATED,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
index 61dee2b..f560174 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
@@ -45,7 +45,8 @@ public interface Mapping extends Closeable {
    * The max size of the searching range cannot exceed the
    * value of count.
    *
-   * @param startContainerID start containerID, >=0, start searching at the head if 0.
+   * @param startContainerID start containerID, >=0,
+   * start searching at the head if 0.
    * @param count count must be >= 0
    *              Usually the count will be replace with a very big
    *              value instead of being unlimited in case the db is very big.
@@ -53,7 +54,8 @@ public interface Mapping extends Closeable {
    * @return a list of container.
    * @throws IOException
    */
-  List<ContainerInfo> listContainer(long startContainerID, int count) throws IOException;
+  List<ContainerInfo> listContainer(long startContainerID, int count)
+      throws IOException;
 
   /**
    * Allocates a new container for a given keyName and replication factor.
@@ -64,7 +66,8 @@ public interface Mapping extends Closeable {
    * @throws IOException
    */
   ContainerInfo allocateContainer(HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor replicationFactor, String owner) throws IOException;
+      HddsProtos.ReplicationFactor replicationFactor, String owner)
+      throws IOException;
 
   /**
    * Deletes a container from SCM.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
index d81ff0f..32ecbad 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
@@ -31,7 +31,7 @@ import java.util.UUID;
 @InterfaceAudience.Private
 public interface SCMNodeStorageStatMXBean {
   /**
-   * Get the capacity of the dataNode
+   * Get the capacity of the dataNode.
    * @param datanodeID Datanode Id
    * @return long
    */
@@ -52,7 +52,7 @@ public interface SCMNodeStorageStatMXBean {
   long getUsedSpace(UUID datanodeId);
 
   /**
-   * Returns the total capacity of all dataNodes
+   * Returns the total capacity of all dataNodes.
    * @return long
    */
   long getTotalCapacity();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
index f8ad2af..fa423bb 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
@@ -56,7 +56,7 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
   // NodeStorageInfo MXBean
   private ObjectName scmNodeStorageInfoBean;
   /**
-   * constructs the scmNodeStorageReportMap object
+   * constructs the scmNodeStorageReportMap object.
    */
   public SCMNodeStorageStatMap(OzoneConfiguration conf) {
     // scmNodeStorageReportMap = new ConcurrentHashMap<>();
@@ -73,6 +73,9 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
             HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT);
   }
 
+  /**
+   * Enum that Describes what we should do at various thresholds.
+   */
   public enum UtilizationThreshold {
     NORMAL, WARN, CRITICAL;
   }
@@ -107,8 +110,8 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
    * @param datanodeID -- Datanode UUID
    * @param report - set if StorageReports.
    */
-  public void insertNewDatanode(UUID datanodeID, Set<StorageLocationReport> report)
-      throws SCMException {
+  public void insertNewDatanode(UUID datanodeID,
+      Set<StorageLocationReport> report) throws SCMException {
     Preconditions.checkNotNull(report);
     Preconditions.checkState(report.size() != 0);
     Preconditions.checkNotNull(datanodeID);
@@ -142,8 +145,8 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
    * @throws SCMException - if we don't know about this datanode, for new DN
    *                      use insertNewDatanode.
    */
-  public void updateDatanodeMap(UUID datanodeID, Set<StorageLocationReport> report)
-      throws SCMException {
+  public void updateDatanodeMap(UUID datanodeID,
+      Set<StorageLocationReport> report) throws SCMException {
     Preconditions.checkNotNull(datanodeID);
     Preconditions.checkNotNull(report);
     Preconditions.checkState(report.size() != 0);
@@ -301,7 +304,7 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
   }
 
   /**
-   * removes the dataNode from scmNodeStorageReportMap
+   * removes the dataNode from scmNodeStorageReportMap.
    * @param datanodeID
    * @throws SCMException in case the dataNode is not found in the map.
    */
@@ -339,11 +342,11 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
   }
 
   /**
-   * get the scmUsed ratio
+   * get the scmUsed ratio.
    */
   public  double getScmUsedratio(long scmUsed, long capacity) {
     double scmUsedRatio =
-        truncateDecimals (scmUsed / (double) capacity);
+        truncateDecimals(scmUsed / (double) capacity);
     return scmUsedRatio;
   }
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java
index 3436e77..0b63ceb 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java
@@ -69,14 +69,14 @@ public class StorageReportResult {
     }
 
     public ReportResultBuilder setFullVolumeSet(
-        Set<StorageLocationReport> fullVolumes) {
-      this.fullVolumes = fullVolumes;
+        Set<StorageLocationReport> fullVolumesSet) {
+      this.fullVolumes = fullVolumesSet;
       return this;
     }
 
     public ReportResultBuilder setFailedVolumeSet(
-        Set<StorageLocationReport> failedVolumes) {
-      this.failedVolumes = failedVolumes;
+        Set<StorageLocationReport> failedVolumesSet) {
+      this.failedVolumes = failedVolumesSet;
       return this;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
index f850e7a..1960604 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
@@ -92,7 +92,7 @@ public class Node2ContainerMap {
   }
 
   /**
-   * Removes datanode Entry from the map
+   * Removes datanode Entry from the map.
    * @param datanodeID - Datanode ID.
    */
   public void removeDatanode(UUID datanodeID) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
index d29bb84..2e56043 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
@@ -170,8 +170,9 @@ public class PipelineSelector {
       throws IOException {
     PipelineManager manager = getPipelineManager(replicationType);
     Preconditions.checkNotNull(manager, "Found invalid pipeline manager");
-    LOG.debug("Getting replication pipeline forReplicationType {} : ReplicationFactor {}",
-        replicationType.toString(), replicationFactor.toString());
+    LOG.debug("Getting replication pipeline forReplicationType {} :" +
+            " ReplicationFactor {}", replicationType.toString(),
+        replicationFactor.toString());
     return manager.
         getPipeline(replicationFactor, replicationType);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 0fd6843..78f13cb 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -45,7 +45,6 @@ import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
 import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.util.MBeans;
@@ -87,7 +86,7 @@ import static org.apache.hadoop.util.ExitUtil.terminate;
  * create a container, which then can be used to store data.
  */
 @InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
-public class StorageContainerManager extends ServiceRuntimeInfoImpl
+public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     implements SCMMXBean {
 
   private static final Logger LOG = LoggerFactory

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
index 0dbb7c1..d9e1425 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
@@ -39,7 +39,6 @@ import org.junit.runners.Parameterized.Parameters;
 import java.io.File;
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.URLConnection;
 import java.util.Arrays;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java
new file mode 100644
index 0000000..a67df69
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/**
+ * Make checkstyle happy.
+ * */
+package org.apache.hadoop.hdds.scm.block;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
index a27068bb..f318316 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
@@ -216,8 +216,10 @@ public class TestContainerMapping {
 
     mapping.processContainerReports(crBuilder.build());
 
-    ContainerInfo updatedContainer = mapping.getContainer(info.getContainerID());
-    Assert.assertEquals(100000000L, updatedContainer.getNumberOfKeys());
+    ContainerInfo updatedContainer =
+        mapping.getContainer(info.getContainerID());
+    Assert.assertEquals(100000000L,
+        updatedContainer.getNumberOfKeys());
     Assert.assertEquals(2000000000L, updatedContainer.getUsedBytes());
   }
 
@@ -251,8 +253,10 @@ public class TestContainerMapping {
 
     mapping.processContainerReports(crBuilder.build());
 
-    ContainerInfo updatedContainer = mapping.getContainer(info.getContainerID());
-    Assert.assertEquals(500000000L, updatedContainer.getNumberOfKeys());
+    ContainerInfo updatedContainer =
+        mapping.getContainer(info.getContainerID());
+    Assert.assertEquals(500000000L,
+        updatedContainer.getNumberOfKeys());
     Assert.assertEquals(5368705120L, updatedContainer.getUsedBytes());
     NavigableSet<ContainerID> pendingCloseContainers = mapping.getStateManager()
         .getMatchingContainerIDs(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java
new file mode 100644
index 0000000..2f35719
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * Make CheckStyle happy.
+ */
+package org.apache.hadoop.hdds.scm.container.closer;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java
new file mode 100644
index 0000000..f93aea6
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * Make CheckStyle Happy.
+ */
+package org.apache.hadoop.hdds.scm.container;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
new file mode 100644
index 0000000..795dfc1
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * Make CheckStyle Happy.
+ */
+package org.apache.hadoop.hdds.scm.container.states;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
index 36e796f..de87e50 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
@@ -510,42 +510,42 @@ public class TestNodeManager {
    * @throws InterruptedException
    * @throws TimeoutException
    */
+  /**
+   * These values are very important. Here is what it means so you don't
+   * have to look it up while reading this code.
+   *
+   *  OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL - This the frequency of the
+   *  HB processing thread that is running in the SCM. This thread must run
+   *  for the SCM  to process the Heartbeats.
+   *
+   *  OZONE_SCM_HEARTBEAT_INTERVAL - This is the frequency at which
+   *  datanodes will send heartbeats to SCM. Please note: This is the only
+   *  config value for node manager that is specified in seconds. We don't
+   *  want SCM heartbeat resolution to be more than in seconds.
+   *  In this test it is not used, but we are forced to set it because we
+   *  have validation code that checks Stale Node interval and Dead Node
+   *  interval is larger than the value of
+   *  OZONE_SCM_HEARTBEAT_INTERVAL.
+   *
+   *  OZONE_SCM_STALENODE_INTERVAL - This is the time that must elapse
+   *  from the last heartbeat for us to mark a node as stale. In this test
+   *  we set that to 3. That is if a node has not heartbeat SCM for last 3
+   *  seconds we will mark it as stale.
+   *
+   *  OZONE_SCM_DEADNODE_INTERVAL - This is the time that must elapse
+   *  from the last heartbeat for a node to be marked dead. We have an
+   *  additional constraint that this must be at least 2 times bigger than
+   *  Stale node Interval.
+   *
+   *  With these we are trying to explore the state of this cluster with
+   *  various timeouts. Each section is commented so that you can keep
+   *  track of the state of the cluster nodes.
+   *
+   */
+
   @Test
   public void testScmClusterIsInExpectedState1() throws IOException,
       InterruptedException, TimeoutException {
-    /**
-     * These values are very important. Here is what it means so you don't
-     * have to look it up while reading this code.
-     *
-     *  OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL - This the frequency of the
-     *  HB processing thread that is running in the SCM. This thread must run
-     *  for the SCM  to process the Heartbeats.
-     *
-     *  OZONE_SCM_HEARTBEAT_INTERVAL - This is the frequency at which
-     *  datanodes will send heartbeats to SCM. Please note: This is the only
-     *  config value for node manager that is specified in seconds. We don't
-     *  want SCM heartbeat resolution to be more than in seconds.
-     *  In this test it is not used, but we are forced to set it because we
-     *  have validation code that checks Stale Node interval and Dead Node
-     *  interval is larger than the value of
-     *  OZONE_SCM_HEARTBEAT_INTERVAL.
-     *
-     *  OZONE_SCM_STALENODE_INTERVAL - This is the time that must elapse
-     *  from the last heartbeat for us to mark a node as stale. In this test
-     *  we set that to 3. That is if a node has not heartbeat SCM for last 3
-     *  seconds we will mark it as stale.
-     *
-     *  OZONE_SCM_DEADNODE_INTERVAL - This is the time that must elapse
-     *  from the last heartbeat for a node to be marked dead. We have an
-     *  additional constraint that this must be at least 2 times bigger than
-     *  Stale node Interval.
-     *
-     *  With these we are trying to explore the state of this cluster with
-     *  various timeouts. Each section is commented so that you can keep
-     *  track of the state of the cluster nodes.
-     *
-     */
-
     OzoneConfiguration conf = getConf();
     conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
         MILLISECONDS);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
index 571de77..b824412 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
@@ -42,11 +42,14 @@ import java.util.HashSet;
 import java.io.IOException;
 import java.util.concurrent.ConcurrentHashMap;
 
+/**
+ * Test Node Storage Map.
+ */
 public class TestSCMNodeStorageStatMap {
   private final static int DATANODE_COUNT = 100;
-  final long capacity = 10L * OzoneConsts.GB;
-  final long used = 2L * OzoneConsts.GB;
-  final long remaining = capacity - used;
+  private final long capacity = 10L * OzoneConsts.GB;
+  private final long used = 2L * OzoneConsts.GB;
+  private final long remaining = capacity - used;
   private static OzoneConfiguration conf = new OzoneConfiguration();
   private final Map<UUID, Set<StorageLocationReport>> testData =
       new ConcurrentHashMap<>();
@@ -59,9 +62,10 @@ public class TestSCMNodeStorageStatMap {
       UUID dnId = UUID.randomUUID();
       Set<StorageLocationReport> reportSet = new HashSet<>();
       String path = GenericTestUtils.getTempPath(
-          TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + Integer
-              .toString(dnIndex));
-      StorageLocationReport.Builder builder = StorageLocationReport.newBuilder();
+          TestSCMNodeStorageStatMap.class.getSimpleName() + "-" +
+              Integer.toString(dnIndex));
+      StorageLocationReport.Builder builder =
+          StorageLocationReport.newBuilder();
       builder.setStorageType(StorageType.DISK).setId(dnId.toString())
           .setStorageLocation(path).setScmUsed(used).setRemaining(remaining)
           .setCapacity(capacity).setFailed(false);
@@ -139,12 +143,12 @@ public class TestSCMNodeStorageStatMap {
     String path =
         GenericTestUtils.getRandomizedTempPath().concat("/" + storageId);
     StorageLocationReport report = reportSet.iterator().next();
-    long capacity = report.getCapacity();
-    long used = report.getScmUsed();
-    long remaining = report.getRemaining();
+    long reportCapacity = report.getCapacity();
+    long reportScmUsed = report.getScmUsed();
+    long reportRemaining = report.getRemaining();
     List<SCMStorageReport> reports = TestUtils
-        .createStorageReport(capacity, used, remaining, path, null, storageId,
-            1);
+        .createStorageReport(reportCapacity, reportScmUsed, reportRemaining,
+            path, null, storageId, 1);
     StorageReportResult result =
         map.processNodeReport(key, TestUtils.createNodeReport(reports));
     Assert.assertEquals(result.getStatus(),
@@ -158,7 +162,7 @@ public class TestSCMNodeStorageStatMap {
         SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL);
 
     reportList.add(TestUtils
-        .createStorageReport(capacity, capacity, 0, path, null,
+        .createStorageReport(reportCapacity, reportCapacity, 0, path, null,
             UUID.randomUUID().toString(), 1).get(0));
     result = map.processNodeReport(key, TestUtils.createNodeReport(reportList));
     Assert.assertEquals(result.getStatus(),
@@ -166,8 +170,8 @@ public class TestSCMNodeStorageStatMap {
     // Mark a disk failed 
     SCMStorageReport srb2 = SCMStorageReport.newBuilder()
         .setStorageUuid(UUID.randomUUID().toString())
-        .setStorageLocation(srb.getStorageLocation()).setScmUsed(capacity)
-        .setCapacity(capacity).setRemaining(0).setFailed(true).build();
+        .setStorageLocation(srb.getStorageLocation()).setScmUsed(reportCapacity)
+        .setCapacity(reportCapacity).setRemaining(0).setFailed(true).build();
     reportList.add(srb2);
     nrb.addAllStorageReport(reportList);
     result = map.processNodeReport(key, nrb.addStorageReport(srb).build());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java
new file mode 100644
index 0000000..dfd8397
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * Make CheckStyle Happy.
+ */
+package org.apache.hadoop.hdds.scm.node;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index e82dc98..1d92cdc 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -33,8 +33,6 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
-import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMStorageReport;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
new file mode 100644
index 0000000..da2ae84
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * Make CheckStyle Happy.
+ */
+package org.apache.hadoop.ozone.container.common;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java
new file mode 100644
index 0000000..ddd751c
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * Make CheckStyle Happy.
+ */
+package org.apache.hadoop.ozone.container.placement;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
index 01f70b1..e197886 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
@@ -202,8 +202,8 @@ public class TestContainerSupervisor {
       ppool.handleContainerReport(reportsProto);
     }
 
-    clist = datanodeStateManager.getContainerReport(wayOverReplicatedContainerID,
-        ppool.getPool().getPoolName(), 7);
+    clist = datanodeStateManager.getContainerReport(
+        wayOverReplicatedContainerID, ppool.getPool().getPoolName(), 7);
 
     for (ContainerReportsRequestProto reportsProto : clist) {
       ppool.handleContainerReport(reportsProto);
@@ -264,7 +264,8 @@ public class TestContainerSupervisor {
               "PoolNew", 1);
       containerSupervisor.handleContainerReport(clist.get(0));
       GenericTestUtils.waitFor(() ->
-          inProgressLog.getOutput().contains(Long.toString(newContainerID)) && inProgressLog
+          inProgressLog.getOutput()
+              .contains(Long.toString(newContainerID)) && inProgressLog
               .getOutput().contains(id.getUuidString()),
           200, 10 * 1000);
     } finally {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: YARN-8316. Improved diagnostic message for ATS unavailability for YARN Service. Contributed by Billie Rinaldi

Posted by bo...@apache.org.
YARN-8316.  Improved diagnostic message for ATS unavailability for YARN Service.
            Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ff5a402
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ff5a402
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ff5a402

Branch: refs/heads/YARN-7402
Commit: 7ff5a40218241ad2380595175a493794129a7402
Parents: 2d19e7d
Author: Eric Yang <ey...@apache.org>
Authored: Thu May 24 16:26:02 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Thu May 24 16:26:02 2018 -0400

----------------------------------------------------------------------
 .../org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java   | 2 +-
 .../org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java   | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ff5a402/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index 072e606..1ceb462 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -400,7 +400,7 @@ public class YarnClientImpl extends YarnClient {
             + e.getMessage());
         return null;
       }
-      throw e;
+      throw new IOException(e);
     } catch (NoClassDefFoundError e) {
       NoClassDefFoundError wrappedError = new NoClassDefFoundError(
           e.getMessage() + ". It appears that the timeline client "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ff5a402/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index b84b49c..70ff47b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -1159,7 +1159,7 @@ public class TestYarnClient extends ParameterizedSchedulerTestBase {
       TimelineClient createTimelineClient() throws IOException, YarnException {
         timelineClient = mock(TimelineClient.class);
         when(timelineClient.getDelegationToken(any(String.class)))
-          .thenThrow(new IOException("Best effort test exception"));
+          .thenThrow(new RuntimeException("Best effort test exception"));
         return timelineClient;
       }
     });
@@ -1175,7 +1175,7 @@ public class TestYarnClient extends ParameterizedSchedulerTestBase {
       client.serviceInit(conf);
       client.getTimelineDelegationToken();
       Assert.fail("Get delegation token should have thrown an exception");
-    } catch (Exception e) {
+    } catch (IOException e) {
       // Success
     }
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: HDFS-13598. Reduce unnecessary byte-to-string transform operation in INodesInPath#toString. Contributed by Gabor Bota.

Posted by bo...@apache.org.
HDFS-13598. Reduce unnecessary byte-to-string transform operation in INodesInPath#toString. Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a87add4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a87add4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a87add4

Branch: refs/heads/YARN-7402
Commit: 7a87add4ea4c317aa9377d1fc8e43fb5e7418a46
Parents: d996479
Author: Yiqun Lin <yq...@apache.org>
Authored: Thu May 24 10:57:35 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Thu May 24 10:57:35 2018 +0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a87add4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index 8235bf0..50ead61 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -484,7 +484,7 @@ public class INodesInPath {
     }
 
     final StringBuilder b = new StringBuilder(getClass().getSimpleName())
-        .append(": path = ").append(DFSUtil.byteArray2PathString(path))
+        .append(": path = ").append(getPath())
         .append("\n  inodes = ");
     if (inodes == null) {
       b.append("null");


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: HDFS-13627. TestErasureCodingExerciseAPIs fails on Windows. Contributed by Anbang Hu.

Posted by bo...@apache.org.
HDFS-13627. TestErasureCodingExerciseAPIs fails on Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91d7c74e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91d7c74e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91d7c74e

Branch: refs/heads/YARN-7402
Commit: 91d7c74e6aa4850922f68bab490b585443e4fccb
Parents: 7c34366
Author: Inigo Goiri <in...@apache.org>
Authored: Mon May 28 10:26:47 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Mon May 28 10:26:47 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java   | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91d7c74e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java
index 4335527..c63ba34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java
@@ -40,6 +40,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.DataOutputStream;
+import java.io.File;
 import java.io.IOException;
 import java.nio.file.Paths;
 import java.security.NoSuchAlgorithmException;
@@ -91,8 +92,10 @@ public class TestErasureCodingExerciseAPIs {
     // Set up java key store
     String testRootDir = Paths.get(new FileSystemTestHelper().getTestRootDir())
         .toString();
+    Path targetFile = new Path(new File(testRootDir).getAbsolutePath(),
+        "test.jks");
     String keyProviderURI = JavaKeyStoreProvider.SCHEME_NAME + "://file"
-        + new Path(testRootDir, "test.jks").toUri();
+        + targetFile.toUri();
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
         keyProviderURI);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: YARN-8369. Javadoc build failed due to 'bad use of >'. (Takanobu Asanuma via wangda)

Posted by bo...@apache.org.
YARN-8369. Javadoc build failed due to 'bad use of >'. (Takanobu Asanuma via wangda)

Change-Id: I79a42154e8f86ab1c3cc939b3745024b8eebe5f4


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17aa40f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17aa40f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17aa40f6

Branch: refs/heads/YARN-7402
Commit: 17aa40f669f197d43387d67dc00040d14cd00948
Parents: 3061bfc
Author: Wangda Tan <wa...@apache.org>
Authored: Tue May 29 09:27:36 2018 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Tue May 29 09:27:36 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/yarn/util/resource/ResourceCalculator.java | 4 ++--
 .../monitor/capacity/CapacitySchedulerPreemptionUtils.java   | 8 ++++----
 2 files changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17aa40f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
index 51078cd..27394f7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
@@ -260,10 +260,10 @@ public abstract class ResourceCalculator {
 
   /**
    * Check if resource has any major resource types (which are all NodeManagers
-   * included) has a >0 value.
+   * included) has a {@literal >} 0 value.
    *
    * @param resource resource
-   * @return returns true if any resource is >0
+   * @return returns true if any resource is {@literal >} 0
    */
   public abstract boolean isAnyMajorResourceAboveZero(Resource resource);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17aa40f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
index 5396d61..690eb02 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
@@ -136,12 +136,12 @@ public class CapacitySchedulerPreemptionUtils {
    * @param conservativeDRF
    *          should we do conservativeDRF preemption or not.
    *          When true:
-   *            stop preempt container when any major resource type <= 0 for to-
-   *            preempt.
+   *            stop preempt container when any major resource type
+   *            {@literal <=} 0 for to-preempt.
    *            This is default preemption behavior of intra-queue preemption
    *          When false:
-   *            stop preempt container when: all major resource type <= 0 for
-   *            to-preempt.
+   *            stop preempt container when: all major resource type
+   *            {@literal <=} 0 for to-preempt.
    *            This is default preemption behavior of inter-queue preemption
    * @return should we preempt rmContainer. If we should, deduct from
    *         <code>resourceToObtainByPartition</code>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: YARN-8336. Fix potential connection leak in SchedConfCLI and YarnWebServiceUtils. Contributed by Giovanni Matteo Fumarola.

Posted by bo...@apache.org.
YARN-8336. Fix potential connection leak in SchedConfCLI and YarnWebServiceUtils. Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e30938af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e30938af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e30938af

Branch: refs/heads/YARN-7402
Commit: e30938af1270e079587e7bc06b755f9e93e660a5
Parents: c13dea8
Author: Inigo Goiri <in...@apache.org>
Authored: Wed May 23 11:55:31 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Wed May 23 11:55:31 2018 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/client/cli/SchedConfCLI.java    | 42 ++++++++++++--------
 .../yarn/webapp/util/YarnWebServiceUtils.java   | 17 +++++---
 2 files changed, 38 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e30938af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
index 11bfdd7..a5f3b80 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java
@@ -132,25 +132,35 @@ public class SchedConfCLI extends Configured implements Tool {
     }
 
     Client webServiceClient = Client.create();
-    WebResource webResource = webServiceClient.resource(WebAppUtils.
-        getRMWebAppURLWithScheme(getConf()));
-    ClientResponse response = webResource.path("ws").path("v1").path("cluster")
-        .path("scheduler-conf").accept(MediaType.APPLICATION_JSON)
-        .entity(YarnWebServiceUtils.toJson(updateInfo,
-            SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
-        .put(ClientResponse.class);
-    if (response != null) {
-      if (response.getStatus() == Status.OK.getStatusCode()) {
-        System.out.println("Configuration changed successfully.");
-        return 0;
+    WebResource webResource = webServiceClient
+        .resource(WebAppUtils.getRMWebAppURLWithScheme(getConf()));
+    ClientResponse response = null;
+
+    try {
+      response =
+          webResource.path("ws").path("v1").path("cluster")
+              .path("scheduler-conf").accept(MediaType.APPLICATION_JSON)
+              .entity(YarnWebServiceUtils.toJson(updateInfo,
+                  SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
+              .put(ClientResponse.class);
+      if (response != null) {
+        if (response.getStatus() == Status.OK.getStatusCode()) {
+          System.out.println("Configuration changed successfully.");
+          return 0;
+        } else {
+          System.err.println("Configuration change unsuccessful: "
+              + response.getEntity(String.class));
+        }
       } else {
-        System.err.println("Configuration change unsuccessful: "
-            + response.getEntity(String.class));
+        System.err.println("Configuration change unsuccessful: null response");
       }
-    } else {
-      System.err.println("Configuration change unsuccessful: null response");
+      return -1;
+    } finally {
+      if (response != null) {
+        response.close();
+      }
+      webServiceClient.destroy();
     }
-    return -1;
   }
 
   @VisibleForTesting

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e30938af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
index 1cf1e97..e7bca2c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java
@@ -58,11 +58,18 @@ public final class YarnWebServiceUtils {
 
     WebResource webResource = webServiceClient.resource(webAppAddress);
 
-    ClientResponse response = webResource.path("ws").path("v1")
-        .path("cluster").path("nodes")
-        .path(nodeId).accept(MediaType.APPLICATION_JSON)
-        .get(ClientResponse.class);
-    return response.getEntity(JSONObject.class);
+    ClientResponse response = null;
+    try {
+      response = webResource.path("ws").path("v1").path("cluster")
+          .path("nodes").path(nodeId).accept(MediaType.APPLICATION_JSON)
+          .get(ClientResponse.class);
+      return response.getEntity(JSONObject.class);
+    } finally {
+      if (response != null) {
+        response.close();
+      }
+      webServiceClient.destroy();
+    }
   }
 
   @SuppressWarnings("rawtypes")


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: HDDS-78. Add per volume level storage stats in SCM. Contributed by Shashikant Banerjee.

Posted by bo...@apache.org.
HDDS-78. Add per volume level storage stats in SCM.
Contributed by  Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0cf6e87f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0cf6e87f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0cf6e87f

Branch: refs/heads/YARN-7402
Commit: 0cf6e87f9212af10eae39cdcb1fe60e6d8191772
Parents: f24c842
Author: Anu Engineer <ae...@apache.org>
Authored: Sat May 26 11:06:22 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Sat May 26 11:11:14 2018 -0700

----------------------------------------------------------------------
 .../placement/metrics/SCMNodeStat.java          |  21 --
 .../hdds/scm/node/SCMNodeStorageStatMXBean.java |   8 +
 .../hdds/scm/node/SCMNodeStorageStatMap.java    | 230 +++++++++++++------
 .../hdds/scm/node/StorageReportResult.java      |  87 +++++++
 .../scm/node/TestSCMNodeStorageStatMap.java     | 141 +++++++++---
 5 files changed, 356 insertions(+), 131 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cf6e87f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
index 4fe72fc..3c871d3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
@@ -136,25 +136,4 @@ public class SCMNodeStat implements NodeStat {
   public int hashCode() {
     return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get());
   }
-
-
-  /**
-   * Truncate to 4 digits since uncontrolled precision is some times
-   * counter intuitive to what users expect.
-   * @param value - double.
-   * @return double.
-   */
-  private double truncateDecimals(double value) {
-    final int multiplier = 10000;
-    return (double) ((long) (value * multiplier)) / multiplier;
-  }
-
-  /**
-   * get the scmUsed ratio
-   */
-  public  double getScmUsedratio() {
-    double scmUsedRatio =
-        truncateDecimals(getScmUsed().get() / (double) getCapacity().get());
-    return scmUsedRatio;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cf6e87f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
index f17a970..d81ff0f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java
@@ -19,7 +19,9 @@
 package org.apache.hadoop.hdds.scm.node;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
 
+import java.util.Set;
 import java.util.UUID;
 
 /**
@@ -66,4 +68,10 @@ public interface SCMNodeStorageStatMXBean {
    * @return long
    */
   long getTotalFreeSpace();
+
+  /**
+   * Returns the set of disks for a given Datanode.
+   * @return set of storage volumes
+   */
+  Set<StorageLocationReport> getStorageVolumes(UUID datanodeId);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cf6e87f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
index 25cb357..f8ad2af 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
@@ -22,18 +22,18 @@ package org.apache.hadoop.hdds.scm.node;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.hdds.protocol.proto.
+    StorageContainerDatanodeProtocolProtos.SCMStorageReport;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.management.ObjectName;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
+import java.io.IOException;
+import java.util.*;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.stream.Collectors;
 
@@ -52,16 +52,15 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
   private final double warningUtilizationThreshold;
   private final double criticalUtilizationThreshold;
 
-  private final Map<UUID, SCMNodeStat> scmNodeStorageStatMap;
+  private final Map<UUID, Set<StorageLocationReport>> scmNodeStorageReportMap;
   // NodeStorageInfo MXBean
   private ObjectName scmNodeStorageInfoBean;
-  // Aggregated node stats
-  private SCMNodeStat clusterStat;
   /**
-   * constructs the scmNodeStorageStatMap object
+   * constructs the scmNodeStorageReportMap object
    */
   public SCMNodeStorageStatMap(OzoneConfiguration conf) {
-    scmNodeStorageStatMap = new ConcurrentHashMap<>();
+    // scmNodeStorageReportMap = new ConcurrentHashMap<>();
+    scmNodeStorageReportMap = new ConcurrentHashMap<>();
     warningUtilizationThreshold = conf.getDouble(
         OzoneConfigKeys.
             HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD,
@@ -72,7 +71,6 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
             HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD,
         OzoneConfigKeys.
             HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT);
-    clusterStat = new SCMNodeStat();
   }
 
   public enum UtilizationThreshold {
@@ -81,20 +79,22 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
 
   /**
    * Returns true if this a datanode that is already tracked by
-   * scmNodeStorageStatMap.
+   * scmNodeStorageReportMap.
    *
    * @param datanodeID - UUID of the Datanode.
    * @return True if this is tracked, false if this map does not know about it.
    */
   public boolean isKnownDatanode(UUID datanodeID) {
     Preconditions.checkNotNull(datanodeID);
-    return scmNodeStorageStatMap.containsKey(datanodeID);
+    return scmNodeStorageReportMap.containsKey(datanodeID);
   }
 
   public List<UUID> getDatanodeList(
       UtilizationThreshold threshold) {
-    return scmNodeStorageStatMap.entrySet().stream()
-        .filter(entry -> (isThresholdReached(threshold, entry.getValue())))
+    return scmNodeStorageReportMap.entrySet().stream().filter(
+        entry -> (isThresholdReached(threshold,
+            getScmUsedratio(getUsedSpace(entry.getKey()),
+                getCapacity(entry.getKey())))))
         .map(Map.Entry::getKey)
         .collect(Collectors.toList());
   }
@@ -105,19 +105,19 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
    * Insert a new datanode into Node2Container Map.
    *
    * @param datanodeID -- Datanode UUID
-   * @param stat - scmNode stat for the Datanode.
+   * @param report - set if StorageReports.
    */
-  public void insertNewDatanode(UUID datanodeID, SCMNodeStat stat)
+  public void insertNewDatanode(UUID datanodeID, Set<StorageLocationReport> report)
       throws SCMException {
-    Preconditions.checkNotNull(stat);
+    Preconditions.checkNotNull(report);
+    Preconditions.checkState(report.size() != 0);
     Preconditions.checkNotNull(datanodeID);
-    synchronized (scmNodeStorageStatMap) {
+    synchronized (scmNodeStorageReportMap) {
       if (isKnownDatanode(datanodeID)) {
         throw new SCMException("Node already exists in the map",
             DUPLICATE_DATANODE);
       }
-      scmNodeStorageStatMap.put(datanodeID, stat);
-      clusterStat.add(stat);
+      scmNodeStorageReportMap.putIfAbsent(datanodeID, report);
     }
   }
 
@@ -138,72 +138,103 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
    * Updates the Container list of an existing DN.
    *
    * @param datanodeID - UUID of DN.
-   * @param stat - scmNode stat for the Datanode.
+   * @param report - set of Storage Reports for the Datanode.
    * @throws SCMException - if we don't know about this datanode, for new DN
    *                      use insertNewDatanode.
    */
-  public void updateDatanodeMap(UUID datanodeID, SCMNodeStat stat)
+  public void updateDatanodeMap(UUID datanodeID, Set<StorageLocationReport> report)
       throws SCMException {
     Preconditions.checkNotNull(datanodeID);
-    Preconditions.checkNotNull(stat);
-    synchronized (scmNodeStorageStatMap) {
-      if (!scmNodeStorageStatMap.containsKey(datanodeID)) {
+    Preconditions.checkNotNull(report);
+    Preconditions.checkState(report.size() != 0);
+    synchronized (scmNodeStorageReportMap) {
+      if (!scmNodeStorageReportMap.containsKey(datanodeID)) {
         throw new SCMException("No such datanode", NO_SUCH_DATANODE);
       }
-      SCMNodeStat removed = scmNodeStorageStatMap.get(datanodeID);
-      clusterStat.subtract(removed);
-      scmNodeStorageStatMap.put(datanodeID, stat);
-      clusterStat.add(stat);
+      scmNodeStorageReportMap.put(datanodeID, report);
     }
   }
 
-  public NodeReportStatus processNodeReport(UUID datanodeID,
+  public StorageReportResult processNodeReport(UUID datanodeID,
       StorageContainerDatanodeProtocolProtos.SCMNodeReport nodeReport)
-      throws SCMException {
+      throws IOException {
     Preconditions.checkNotNull(datanodeID);
     Preconditions.checkNotNull(nodeReport);
+
     long totalCapacity = 0;
     long totalRemaining = 0;
     long totalScmUsed = 0;
-    List<StorageContainerDatanodeProtocolProtos.SCMStorageReport>
+    Set<StorageLocationReport> storagReportSet = new HashSet<>();
+    Set<StorageLocationReport> fullVolumeSet = new HashSet<>();
+    Set<StorageLocationReport> failedVolumeSet = new HashSet<>();
+    List<SCMStorageReport>
         storageReports = nodeReport.getStorageReportList();
-    for (StorageContainerDatanodeProtocolProtos.SCMStorageReport report : storageReports) {
+    for (SCMStorageReport report : storageReports) {
+      StorageLocationReport storageReport =
+          StorageLocationReport.getFromProtobuf(report);
+      storagReportSet.add(storageReport);
+      if (report.hasFailed() && report.getFailed()) {
+        failedVolumeSet.add(storageReport);
+      } else if (isThresholdReached(UtilizationThreshold.CRITICAL,
+          getScmUsedratio(report.getScmUsed(), report.getCapacity()))) {
+        fullVolumeSet.add(storageReport);
+      }
       totalCapacity += report.getCapacity();
       totalRemaining += report.getRemaining();
       totalScmUsed += report.getScmUsed();
     }
-    SCMNodeStat stat = scmNodeStorageStatMap.get(datanodeID);
-    if (stat == null) {
-      stat = new SCMNodeStat();
-      stat.set(totalCapacity, totalScmUsed, totalRemaining);
-      insertNewDatanode(datanodeID, stat);
+
+    if (!isKnownDatanode(datanodeID)) {
+      insertNewDatanode(datanodeID, storagReportSet);
     } else {
-      stat.set(totalCapacity, totalScmUsed, totalRemaining);
-      updateDatanodeMap(datanodeID, stat);
+      updateDatanodeMap(datanodeID, storagReportSet);
     }
-    if (isThresholdReached(UtilizationThreshold.CRITICAL, stat)) {
+    if (isThresholdReached(UtilizationThreshold.CRITICAL,
+        getScmUsedratio(totalScmUsed, totalCapacity))) {
       LOG.warn("Datanode {} is out of storage space. Capacity: {}, Used: {}",
-          datanodeID, stat.getCapacity().get(), stat.getScmUsed().get());
-      return NodeReportStatus.DATANODE_OUT_OF_SPACE;
-    } else {
-      if (isThresholdReached(UtilizationThreshold.WARN, stat)) {
-       LOG.warn("Datanode {} is low on storage space. Capacity: {}, Used: {}",
-           datanodeID, stat.getCapacity().get(), stat.getScmUsed().get());
-      }
-      return NodeReportStatus.ALL_IS_WELL;
+          datanodeID, totalCapacity, totalScmUsed);
+      return StorageReportResult.ReportResultBuilder.newBuilder()
+          .setStatus(ReportStatus.DATANODE_OUT_OF_SPACE)
+          .setFullVolumeSet(fullVolumeSet).setFailedVolumeSet(failedVolumeSet)
+          .build();
+    }
+    if (isThresholdReached(UtilizationThreshold.WARN,
+        getScmUsedratio(totalScmUsed, totalCapacity))) {
+      LOG.warn("Datanode {} is low on storage space. Capacity: {}, Used: {}",
+          datanodeID, totalCapacity, totalScmUsed);
     }
+
+    if (failedVolumeSet.isEmpty() && !fullVolumeSet.isEmpty()) {
+      return StorageReportResult.ReportResultBuilder.newBuilder()
+          .setStatus(ReportStatus.STORAGE_OUT_OF_SPACE)
+          .setFullVolumeSet(fullVolumeSet).build();
+    }
+
+    if (!failedVolumeSet.isEmpty() && fullVolumeSet.isEmpty()) {
+      return StorageReportResult.ReportResultBuilder.newBuilder()
+          .setStatus(ReportStatus.FAILED_STORAGE)
+          .setFailedVolumeSet(failedVolumeSet).build();
+    }
+    if (!failedVolumeSet.isEmpty() && !fullVolumeSet.isEmpty()) {
+      return StorageReportResult.ReportResultBuilder.newBuilder()
+          .setStatus(ReportStatus.FAILED_AND_OUT_OF_SPACE_STORAGE)
+          .setFailedVolumeSet(failedVolumeSet).setFullVolumeSet(fullVolumeSet)
+          .build();
+    }
+    return StorageReportResult.ReportResultBuilder.newBuilder()
+        .setStatus(ReportStatus.ALL_IS_WELL).build();
   }
 
   private boolean isThresholdReached(UtilizationThreshold threshold,
-      SCMNodeStat stat) {
+      double scmUsedratio) {
     switch (threshold) {
     case NORMAL:
-      return stat.getScmUsedratio() < warningUtilizationThreshold;
+      return scmUsedratio < warningUtilizationThreshold;
     case WARN:
-      return stat.getScmUsedratio() >= warningUtilizationThreshold &&
-          stat.getScmUsedratio() < criticalUtilizationThreshold;
+      return scmUsedratio >= warningUtilizationThreshold
+          && scmUsedratio < criticalUtilizationThreshold;
     case CRITICAL:
-      return stat.getScmUsedratio() >= criticalUtilizationThreshold;
+      return scmUsedratio >= criticalUtilizationThreshold;
     default:
       throw new RuntimeException("Unknown UtilizationThreshold value");
     }
@@ -211,67 +242,120 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
 
   @Override
   public long getCapacity(UUID dnId) {
-    return scmNodeStorageStatMap.get(dnId).getCapacity().get();
+    long capacity = 0;
+    Set<StorageLocationReport> reportSet = scmNodeStorageReportMap.get(dnId);
+    for (StorageLocationReport report : reportSet) {
+      capacity += report.getCapacity();
+    }
+    return capacity;
   }
 
   @Override
   public long getRemainingSpace(UUID dnId) {
-    return scmNodeStorageStatMap.get(dnId).getRemaining().get();
+    long remaining = 0;
+    Set<StorageLocationReport> reportSet = scmNodeStorageReportMap.get(dnId);
+    for (StorageLocationReport report : reportSet) {
+      remaining += report.getRemaining();
+    }
+    return remaining;
   }
 
   @Override
   public long getUsedSpace(UUID dnId) {
-    return scmNodeStorageStatMap.get(dnId).getScmUsed().get();
+    long scmUsed = 0;
+    Set<StorageLocationReport> reportSet = scmNodeStorageReportMap.get(dnId);
+    for (StorageLocationReport report : reportSet) {
+      scmUsed += report.getScmUsed();
+    }
+    return scmUsed;
   }
 
   @Override
   public long getTotalCapacity() {
-    return clusterStat.getCapacity().get();
+    long capacity = 0;
+    Set<UUID> dnIdSet = scmNodeStorageReportMap.keySet();
+    for (UUID id : dnIdSet) {
+      capacity += getCapacity(id);
+    }
+    return capacity;
   }
 
   @Override
   public long getTotalSpaceUsed() {
-    return clusterStat.getScmUsed().get();
+    long scmUsed = 0;
+    Set<UUID> dnIdSet = scmNodeStorageReportMap.keySet();
+    for (UUID id : dnIdSet) {
+      scmUsed += getUsedSpace(id);
+    }
+    return scmUsed;
   }
 
   @Override
   public long getTotalFreeSpace() {
-    return clusterStat.getRemaining().get();
+    long remaining = 0;
+    Set<UUID> dnIdSet = scmNodeStorageReportMap.keySet();
+    for (UUID id : dnIdSet) {
+      remaining += getRemainingSpace(id);
+    }
+    return remaining;
   }
 
   /**
-   * removes the dataNode from scmNodeStorageStatMap
+   * removes the dataNode from scmNodeStorageReportMap
    * @param datanodeID
    * @throws SCMException in case the dataNode is not found in the map.
    */
   public void removeDatanode(UUID datanodeID) throws SCMException {
     Preconditions.checkNotNull(datanodeID);
-    synchronized (scmNodeStorageStatMap) {
-      if (!scmNodeStorageStatMap.containsKey(datanodeID)) {
+    synchronized (scmNodeStorageReportMap) {
+      if (!scmNodeStorageReportMap.containsKey(datanodeID)) {
         throw new SCMException("No such datanode", NO_SUCH_DATANODE);
       }
-      SCMNodeStat stat = scmNodeStorageStatMap.remove(datanodeID);
-      clusterStat.subtract(stat);
+      scmNodeStorageReportMap.remove(datanodeID);
     }
   }
 
   /**
-   * Gets the SCMNodeStat for the datanode
+   * Returns the set of storage volumes for a Datanode.
    * @param  datanodeID
-   * @return SCMNodeStat
+   * @return set of storage volumes.
    */
 
-  SCMNodeStat getNodeStat(UUID datanodeID) {
-    return scmNodeStorageStatMap.get(datanodeID);
+  @Override
+  public Set<StorageLocationReport> getStorageVolumes(UUID datanodeID) {
+    return scmNodeStorageReportMap.get(datanodeID);
   }
 
+
+  /**
+   * Truncate to 4 digits since uncontrolled precision is some times
+   * counter intuitive to what users expect.
+   * @param value - double.
+   * @return double.
+   */
+  private double truncateDecimals(double value) {
+    final int multiplier = 10000;
+    return (double) ((long) (value * multiplier)) / multiplier;
+  }
+
+  /**
+   * get the scmUsed ratio
+   */
+  public  double getScmUsedratio(long scmUsed, long capacity) {
+    double scmUsedRatio =
+        truncateDecimals (scmUsed / (double) capacity);
+    return scmUsedRatio;
+  }
   /**
    * Results possible from processing a Node report by
    * Node2ContainerMapper.
    */
-  public enum NodeReportStatus {
+  public enum ReportStatus {
     ALL_IS_WELL,
-    DATANODE_OUT_OF_SPACE
+    DATANODE_OUT_OF_SPACE,
+    STORAGE_OUT_OF_SPACE,
+    FAILED_STORAGE,
+    FAILED_AND_OUT_OF_SPACE_STORAGE
   }
 
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cf6e87f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java
new file mode 100644
index 0000000..3436e77
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java
@@ -0,0 +1,87 @@
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.scm.node;
+
+import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
+
+import java.util.Set;
+
+/**
+ * A Container Report gets processsed by the Node2Container and returns the
+ * Report Result class.
+ */
+public class StorageReportResult {
+  private SCMNodeStorageStatMap.ReportStatus status;
+  private Set<StorageLocationReport> fullVolumes;
+  private Set<StorageLocationReport> failedVolumes;
+
+  StorageReportResult(SCMNodeStorageStatMap.ReportStatus status,
+      Set<StorageLocationReport> fullVolumes,
+      Set<StorageLocationReport> failedVolumes) {
+    this.status = status;
+    this.fullVolumes = fullVolumes;
+    this.failedVolumes = failedVolumes;
+  }
+
+  public SCMNodeStorageStatMap.ReportStatus getStatus() {
+    return status;
+  }
+
+  public Set<StorageLocationReport> getFullVolumes() {
+    return fullVolumes;
+  }
+
+  public Set<StorageLocationReport> getFailedVolumes() {
+    return failedVolumes;
+  }
+
+  static class ReportResultBuilder {
+    private SCMNodeStorageStatMap.ReportStatus status;
+    private Set<StorageLocationReport> fullVolumes;
+    private Set<StorageLocationReport> failedVolumes;
+
+    static ReportResultBuilder newBuilder() {
+      return new ReportResultBuilder();
+    }
+
+    public ReportResultBuilder setStatus(
+        SCMNodeStorageStatMap.ReportStatus newstatus) {
+      this.status = newstatus;
+      return this;
+    }
+
+    public ReportResultBuilder setFullVolumeSet(
+        Set<StorageLocationReport> fullVolumes) {
+      this.fullVolumes = fullVolumes;
+      return this;
+    }
+
+    public ReportResultBuilder setFailedVolumeSet(
+        Set<StorageLocationReport> failedVolumes) {
+      this.failedVolumes = failedVolumes;
+      return this;
+    }
+
+    StorageReportResult build() {
+      return new StorageReportResult(status, fullVolumes, failedVolumes);
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cf6e87f/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
index 2fa786b..571de77 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
@@ -17,38 +17,56 @@
  */
 package org.apache.hadoop.hdds.scm.node;
 
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto.
+    StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+import org.apache.hadoop.hdds.protocol.proto.
+    StorageContainerDatanodeProtocolProtos.SCMStorageReport;
 import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.*;
+import org.junit.Rule;
 import org.junit.rules.ExpectedException;
 
 import java.util.List;
 import java.util.Map;
 import java.util.UUID;
+import java.util.Set;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.io.IOException;
 import java.util.concurrent.ConcurrentHashMap;
 
 public class TestSCMNodeStorageStatMap {
-  private final static int DATANODE_COUNT = 300;
+  private final static int DATANODE_COUNT = 100;
   final long capacity = 10L * OzoneConsts.GB;
   final long used = 2L * OzoneConsts.GB;
   final long remaining = capacity - used;
   private static OzoneConfiguration conf = new OzoneConfiguration();
-  private final Map<UUID, SCMNodeStat> testData = new ConcurrentHashMap<>();
+  private final Map<UUID, Set<StorageLocationReport>> testData =
+      new ConcurrentHashMap<>();
 
   @Rule
   public ExpectedException thrown = ExpectedException.none();
 
   private void generateData() {
-    SCMNodeStat stat = new SCMNodeStat();
-    stat.set(capacity, used, remaining);
     for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) {
-      testData.put(UUID.randomUUID(), stat);
+      UUID dnId = UUID.randomUUID();
+      Set<StorageLocationReport> reportSet = new HashSet<>();
+      String path = GenericTestUtils.getTempPath(
+          TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + Integer
+              .toString(dnIndex));
+      StorageLocationReport.Builder builder = StorageLocationReport.newBuilder();
+      builder.setStorageType(StorageType.DISK).setId(dnId.toString())
+          .setStorageLocation(path).setScmUsed(used).setRemaining(remaining)
+          .setCapacity(capacity).setFailed(false);
+      reportSet.add(builder.build());
+      testData.put(UUID.randomUUID(), reportSet);
     }
   }
 
@@ -70,8 +88,8 @@ public class TestSCMNodeStorageStatMap {
     SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
     UUID knownNode = getFirstKey();
     UUID unknownNode = UUID.randomUUID();
-    SCMNodeStat stat = testData.get(knownNode);
-    map.insertNewDatanode(knownNode, stat);
+    Set<StorageLocationReport> report = testData.get(knownNode);
+    map.insertNewDatanode(knownNode, report);
     Assert.assertTrue("Not able to detect a known node",
         map.isKnownDatanode(knownNode));
     Assert.assertFalse("Unknown node detected",
@@ -82,54 +100,89 @@ public class TestSCMNodeStorageStatMap {
   public void testInsertNewDatanode() throws SCMException {
     SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
     UUID knownNode = getFirstKey();
-    SCMNodeStat stat = testData.get(knownNode);
-    map.insertNewDatanode(knownNode, stat);
-    Assert.assertEquals(map.getNodeStat(knownNode).getScmUsed(),
-        testData.get(knownNode).getScmUsed());
+    Set<StorageLocationReport> report = testData.get(knownNode);
+    map.insertNewDatanode(knownNode, report);
+    Assert.assertEquals(map.getStorageVolumes(knownNode),
+        testData.get(knownNode));
     thrown.expect(SCMException.class);
     thrown.expectMessage("already exists");
-    map.insertNewDatanode(knownNode, stat);
+    map.insertNewDatanode(knownNode, report);
   }
 
   @Test
   public void testUpdateUnknownDatanode() throws SCMException {
     SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
     UUID unknownNode = UUID.randomUUID();
-    SCMNodeStat stat = new SCMNodeStat();
-
+    String path = GenericTestUtils.getTempPath(
+        TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + unknownNode
+            .toString());
+    Set<StorageLocationReport> reportSet = new HashSet<>();
+    StorageLocationReport.Builder builder = StorageLocationReport.newBuilder();
+    builder.setStorageType(StorageType.DISK).setId(unknownNode.toString())
+        .setStorageLocation(path).setScmUsed(used).setRemaining(remaining)
+        .setCapacity(capacity).setFailed(false);
+    reportSet.add(builder.build());
     thrown.expect(SCMException.class);
     thrown.expectMessage("No such datanode");
-    map.updateDatanodeMap(unknownNode, stat);
+    map.updateDatanodeMap(unknownNode, reportSet);
   }
 
   @Test
-  public void testProcessNodeReportCheckOneNode() throws SCMException {
+  public void testProcessNodeReportCheckOneNode() throws IOException {
     UUID key = getFirstKey();
-    SCMNodeStat value = testData.get(key);
+    List<SCMStorageReport> reportList = new ArrayList<>();
+    Set<StorageLocationReport> reportSet = testData.get(key);
     SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
-    map.insertNewDatanode(key, value);
+    map.insertNewDatanode(key, reportSet);
     Assert.assertTrue(map.isKnownDatanode(key));
     String storageId = UUID.randomUUID().toString();
     String path =
         GenericTestUtils.getRandomizedTempPath().concat("/" + storageId);
-    long capacity = value.getCapacity().get();
-    long used = value.getScmUsed().get();
-    long remaining = value.getRemaining().get();
+    StorageLocationReport report = reportSet.iterator().next();
+    long capacity = report.getCapacity();
+    long used = report.getScmUsed();
+    long remaining = report.getRemaining();
     List<SCMStorageReport> reports = TestUtils
         .createStorageReport(capacity, used, remaining, path, null, storageId,
             1);
-    SCMNodeStorageStatMap.NodeReportStatus status =
+    StorageReportResult result =
         map.processNodeReport(key, TestUtils.createNodeReport(reports));
-    Assert.assertEquals(status,
-        SCMNodeStorageStatMap.NodeReportStatus.ALL_IS_WELL);
+    Assert.assertEquals(result.getStatus(),
+        SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL);
+    StorageContainerDatanodeProtocolProtos.SCMNodeReport.Builder nrb =
+        SCMNodeReport.newBuilder();
+    SCMStorageReport srb = reportSet.iterator().next().getProtoBufMessage();
+    reportList.add(srb);
+    result = map.processNodeReport(key, TestUtils.createNodeReport(reportList));
+    Assert.assertEquals(result.getStatus(),
+        SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL);
+
+    reportList.add(TestUtils
+        .createStorageReport(capacity, capacity, 0, path, null,
+            UUID.randomUUID().toString(), 1).get(0));
+    result = map.processNodeReport(key, TestUtils.createNodeReport(reportList));
+    Assert.assertEquals(result.getStatus(),
+        SCMNodeStorageStatMap.ReportStatus.STORAGE_OUT_OF_SPACE);
+    // Mark a disk failed 
+    SCMStorageReport srb2 = SCMStorageReport.newBuilder()
+        .setStorageUuid(UUID.randomUUID().toString())
+        .setStorageLocation(srb.getStorageLocation()).setScmUsed(capacity)
+        .setCapacity(capacity).setRemaining(0).setFailed(true).build();
+    reportList.add(srb2);
+    nrb.addAllStorageReport(reportList);
+    result = map.processNodeReport(key, nrb.addStorageReport(srb).build());
+    Assert.assertEquals(result.getStatus(),
+        SCMNodeStorageStatMap.ReportStatus.FAILED_AND_OUT_OF_SPACE_STORAGE);
+
   }
 
   @Test
-  public void testProcessNodeReportAndSCMStats() throws SCMException {
+  public void testProcessMultipleNodeReports() throws SCMException {
     SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
     int counter = 1;
     // Insert all testData into the SCMNodeStorageStatMap Map.
-    for (Map.Entry<UUID, SCMNodeStat> keyEntry : testData.entrySet()) {
+    for (Map.Entry<UUID, Set<StorageLocationReport>> keyEntry : testData
+        .entrySet()) {
       map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue());
     }
     Assert.assertEquals(DATANODE_COUNT * capacity, map.getTotalCapacity());
@@ -137,9 +190,21 @@ public class TestSCMNodeStorageStatMap {
     Assert.assertEquals(DATANODE_COUNT * used, map.getTotalSpaceUsed());
 
     // upadate 1/4th of the datanode to be full
-    for (Map.Entry<UUID, SCMNodeStat> keyEntry : testData.entrySet()) {
-      SCMNodeStat stat = new SCMNodeStat(capacity, capacity, 0);
-      map.updateDatanodeMap(keyEntry.getKey(), stat);
+    for (Map.Entry<UUID, Set<StorageLocationReport>> keyEntry : testData
+        .entrySet()) {
+      Set<StorageLocationReport> reportSet = new HashSet<>();
+      String path = GenericTestUtils.getTempPath(
+          TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + keyEntry
+              .getKey().toString());
+      StorageLocationReport.Builder builder =
+          StorageLocationReport.newBuilder();
+      builder.setStorageType(StorageType.DISK)
+          .setId(keyEntry.getKey().toString()).setStorageLocation(path)
+          .setScmUsed(capacity).setRemaining(0).setCapacity(capacity)
+          .setFailed(false);
+      reportSet.add(builder.build());
+
+      map.updateDatanodeMap(keyEntry.getKey(), reportSet);
       counter++;
       if (counter > DATANODE_COUNT / 4) {
         break;
@@ -163,7 +228,8 @@ public class TestSCMNodeStorageStatMap {
         map.getTotalSpaceUsed(), 0);
     counter = 1;
     // Remove 1/4 of the DataNodes from the Map
-    for (Map.Entry<UUID, SCMNodeStat> keyEntry : testData.entrySet()) {
+    for (Map.Entry<UUID, Set<StorageLocationReport>> keyEntry : testData
+        .entrySet()) {
       map.removeDatanode(keyEntry.getKey());
       counter++;
       if (counter > DATANODE_COUNT / 4) {
@@ -181,12 +247,13 @@ public class TestSCMNodeStorageStatMap {
         map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.NORMAL)
             .size(), 0);
 
-    Assert.assertEquals(0.75 * DATANODE_COUNT * capacity, map.getTotalCapacity(), 0);
+    Assert
+        .assertEquals(0.75 * DATANODE_COUNT * capacity, map.getTotalCapacity(),
+            0);
     Assert.assertEquals(0.75 * DATANODE_COUNT * remaining,
         map.getTotalFreeSpace(), 0);
-    Assert.assertEquals(
-        0.75 * DATANODE_COUNT * used ,
-        map.getTotalSpaceUsed(), 0);
+    Assert
+        .assertEquals(0.75 * DATANODE_COUNT * used, map.getTotalSpaceUsed(), 0);
 
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: YARN-8357. Fixed NPE when YARN service is saved and not deployed. Contributed by Chandni Singh

Posted by bo...@apache.org.
YARN-8357.  Fixed NPE when YARN service is saved and not deployed.
            Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9852eb5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9852eb5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9852eb5

Branch: refs/heads/YARN-7402
Commit: d9852eb5897a25323ab0302c2c0decb61d310e5e
Parents: 7ff5a40
Author: Eric Yang <ey...@apache.org>
Authored: Thu May 24 16:32:13 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Thu May 24 16:32:13 2018 -0400

----------------------------------------------------------------------
 .../java/org/apache/hadoop/yarn/service/client/ServiceClient.java   | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9852eb5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 93a74e3..0ab3322 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -1198,6 +1198,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
     ServiceApiUtil.validateNameFormat(serviceName, getConfig());
     Service appSpec = new Service();
     appSpec.setName(serviceName);
+    appSpec.setState(ServiceState.STOPPED);
     ApplicationId currentAppId = getAppId(serviceName);
     if (currentAppId == null) {
       LOG.info("Service {} does not have an application ID", serviceName);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: HDDS-96. Add an option in ozone script to generate a site file with minimally required ozone configs. Contributed by Dinesh Chitlangia.

Posted by bo...@apache.org.
HDDS-96. Add an option in ozone script to generate a site file with minimally required ozone configs.
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8733012a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8733012a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8733012a

Branch: refs/heads/YARN-7402
Commit: 8733012ae35f2762d704f94975a762885d116795
Parents: 1e0d4b1
Author: Anu Engineer <ae...@apache.org>
Authored: Fri May 25 13:06:14 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Fri May 25 13:06:14 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdds/conf/OzoneConfiguration.java    |   6 +-
 hadoop-ozone/common/src/main/bin/ozone          |   4 +
 ...TestGenerateOzoneRequiredConfigurations.java | 100 +++++++++++
 .../GenerateOzoneRequiredConfigurations.java    | 174 +++++++++++++++++++
 .../hadoop/ozone/genconf/package-info.java      |  24 +++
 5 files changed, 305 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8733012a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
index f07718c..36d953c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
@@ -137,7 +137,7 @@ public class OzoneConfiguration extends Configuration {
 
     @Override
     public String toString() {
-      return this.getName() + " " + this.getValue() + this.getTag();
+      return this.getName() + " " + this.getValue() + " " + this.getTag();
     }
 
     @Override
@@ -152,11 +152,11 @@ public class OzoneConfiguration extends Configuration {
     }
   }
 
-  public static void activate(){
+  public static void activate() {
     // adds the default resources
     Configuration.addDefaultResource("hdfs-default.xml");
     Configuration.addDefaultResource("hdfs-site.xml");
     Configuration.addDefaultResource("ozone-default.xml");
     Configuration.addDefaultResource("ozone-site.xml");
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8733012a/hadoop-ozone/common/src/main/bin/ozone
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone
index 00261c7..6843bdd 100755
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -47,6 +47,7 @@ function hadoop_usage
   hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service"
   hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager "
   hadoop_add_subcommand "version" client "print the version"
+  hadoop_add_subcommand "genconf" client "generate minimally required ozone configs and output to ozone-site.xml in specified path"
 
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
 }
@@ -118,6 +119,9 @@ function ozonecmd_case
     version)
       HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
     ;;
+    genconf)
+      HADOOP_CLASSNAME=org.apache.hadoop.ozone.genconf.GenerateOzoneRequiredConfigurations
+    ;;
     *)
       HADOOP_CLASSNAME="${subcmd}"
       if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8733012a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
new file mode 100644
index 0000000..82582a6
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.genconf;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+
+
+/**
+ * Tests GenerateOzoneRequiredConfigurations.
+ */
+public class TestGenerateOzoneRequiredConfigurations {
+  private static MiniOzoneCluster cluster;
+  private static OzoneConfiguration conf;
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "distributed"
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
+    cluster.waitForClusterToBeReady();
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Tests a valid path and generates ozone-site.xml.
+   * @throws Exception
+   */
+  @Test
+  public void generateConfigurationsSuccess() throws Exception {
+    String[] args = new String[]{"-output", "."};
+    GenerateOzoneRequiredConfigurations.main(args);
+
+    Assert.assertEquals("Path is valid",
+        true, GenerateOzoneRequiredConfigurations.isValidPath(args[1]));
+
+    Assert.assertEquals("Permission is valid",
+        true, GenerateOzoneRequiredConfigurations.canWrite(args[1]));
+
+    Assert.assertEquals("Config file generated",
+        0, GenerateOzoneRequiredConfigurations.generateConfigurations(args[1]));
+  }
+
+  /**
+   * Test to avoid generating ozone-site.xml when invalid permission.
+   * @throws Exception
+   */
+  @Test
+  public void generateConfigurationsFailure() throws Exception {
+    String[] args = new String[]{"-output", "/"};
+    GenerateOzoneRequiredConfigurations.main(args);
+
+    Assert.assertEquals("Path is valid",
+        true, GenerateOzoneRequiredConfigurations.isValidPath(args[1]));
+
+    Assert.assertEquals("Invalid permission",
+        false, GenerateOzoneRequiredConfigurations.canWrite(args[1]));
+
+    Assert.assertEquals("Config file not generated",
+        1, GenerateOzoneRequiredConfigurations.generateConfigurations(args[1]));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8733012a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java
new file mode 100644
index 0000000..6296c9d
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.genconf;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Marshaller;
+import java.io.File;
+import java.net.URL;
+import java.nio.file.InvalidPathException;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * GenerateOzoneRequiredConfigurations - A tool to generate ozone-site.xml<br>
+ * This tool generates an ozone-site.xml with minimally required configs.
+ * This tool can be invoked as follows:<br>
+ * <ul>
+ * <li>ozone genconf -output <Path to output file></li>
+ * <li>ozone genconf -help</li>
+ * </ul>
+ */
+public final class GenerateOzoneRequiredConfigurations {
+
+  private static final String OUTPUT = "-output";
+  private static final String HELP = "-help";
+  private static final String USAGE = "Usage: \nozone genconf "
+      + OUTPUT + " <Path to output file> \n"
+      + "ozone genconf "
+      + HELP;
+  private static final int SUCCESS = 0;
+  private static final int FAILURE = 1;
+
+  private GenerateOzoneRequiredConfigurations() {
+
+  }
+  /**
+   * Entry point for using genconf tool.
+   *
+   * @param args
+   * @throws JAXBException
+   */
+  public static void main(String[] args) {
+
+    try {
+      if (args.length == 0) {
+        System.out.println(USAGE);
+        System.exit(1);
+      }
+
+      switch (args[0]) {
+      case OUTPUT:
+        if (args.length > 1) {
+          int result = generateConfigurations(args[1]);
+        } else {
+          System.out.println("Path to output file is mandatory");
+          System.out.println(USAGE);
+          System.exit(1);
+        }
+        break;
+
+      case HELP:
+        System.out.println(USAGE);
+        System.exit(0);
+        break;
+
+      default:
+        System.out.println(USAGE);
+        System.exit(1);
+      }
+
+    } catch (Exception e) {
+      e.printStackTrace();
+    }
+  }
+
+  /**
+   * Check if the path is valid.
+   *
+   * @param path
+   * @return true, if path is valid, else return false
+   */
+  public static boolean isValidPath(String path) {
+    try {
+      Paths.get(path);
+    } catch (InvalidPathException | NullPointerException ex) {
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * Check if user has permission to write in the specified path.
+   *
+   * @param path
+   * @return true, if the user has permission to write, else returns false
+   */
+  public static boolean canWrite(String path) {
+    File file = new File(path);
+    return file.canWrite();
+  }
+
+  /**
+   * Generate ozone-site.xml at specified path.
+   *
+   * @param path
+   * @return SUCCESS(0) if file can be generated, else returns FAILURE(1)
+   * @throws JAXBException
+   */
+  public static int generateConfigurations(String path) throws JAXBException {
+
+    if (!isValidPath(path)) {
+      System.out.println("Invalid path or insufficient permission");
+      return FAILURE;
+    }
+
+    if (!canWrite(path)) {
+      System.out.println("Invalid path or insufficient permission");
+      return FAILURE;
+    }
+
+    OzoneConfiguration oc = new OzoneConfiguration();
+
+    ClassLoader cL = Thread.currentThread().getContextClassLoader();
+    if (cL == null) {
+      cL = OzoneConfiguration.class.getClassLoader();
+    }
+    URL url = cL.getResource("ozone-default.xml");
+
+    List<OzoneConfiguration.Property> allProperties =
+        oc.readPropertyFromXml(url);
+
+    List<OzoneConfiguration.Property> requiredProperties = new ArrayList<>();
+
+    for (OzoneConfiguration.Property p : allProperties) {
+      if (p.getTag() != null && p.getTag().contains("REQUIRED")) {
+        requiredProperties.add(p);
+      }
+    }
+
+    OzoneConfiguration.XMLConfiguration requiredConfig =
+        new OzoneConfiguration.XMLConfiguration();
+    requiredConfig.setProperties(requiredProperties);
+
+    JAXBContext context =
+        JAXBContext.newInstance(OzoneConfiguration.XMLConfiguration.class);
+    Marshaller m = context.createMarshaller();
+    m.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE);
+    m.marshal(requiredConfig, new File(path, "ozone-site.xml"));
+
+    System.out.println("ozone-site.xml has been generated at " + path);
+
+    return SUCCESS;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8733012a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/package-info.java
new file mode 100644
index 0000000..4817d39
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/package-info.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.genconf;
+
+/**
+ * Command line tool to generate required Ozone configs to an ozone-site.xml.
+ */
+


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: HADOOP-15449. Increase default timeout of ZK session to avoid frequent NameNode failover

Posted by bo...@apache.org.
HADOOP-15449. Increase default timeout of ZK session to avoid frequent NameNode failover

Signed-off-by: Akira Ajisaka <aa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61df174e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61df174e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61df174e

Branch: refs/heads/YARN-7402
Commit: 61df174e8b3d582183306cabfa2347c8b96322ff
Parents: 04757e5
Author: Karthik Palanisamy <ka...@gmail.com>
Authored: Mon May 28 19:41:07 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon May 28 19:41:07 2018 +0900

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/ha/ZKFailoverController.java   | 2 +-
 .../hadoop-common/src/main/resources/core-default.xml              | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61df174e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index a8c19ab..9295288 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -63,7 +63,7 @@ public abstract class ZKFailoverController {
   
   public static final String ZK_QUORUM_KEY = "ha.zookeeper.quorum";
   private static final String ZK_SESSION_TIMEOUT_KEY = "ha.zookeeper.session-timeout.ms";
-  private static final int ZK_SESSION_TIMEOUT_DEFAULT = 5*1000;
+  private static final int ZK_SESSION_TIMEOUT_DEFAULT = 10*1000;
   private static final String ZK_PARENT_ZNODE_KEY = "ha.zookeeper.parent-znode";
   public static final String ZK_ACL_KEY = "ha.zookeeper.acl";
   private static final String ZK_ACL_DEFAULT = "world:anyone:rwcda";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61df174e/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 9564587..75acf48 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2168,7 +2168,7 @@
 
 <property>
   <name>ha.zookeeper.session-timeout.ms</name>
-  <value>5000</value>
+  <value>10000</value>
   <description>
     The session timeout to use when the ZKFC connects to ZooKeeper.
     Setting this value to a lower value implies that server crashes


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: HDFS-13587. TestQuorumJournalManager fails on Windows. Contributed by Anbang Hu.

Posted by bo...@apache.org.
HDFS-13587. TestQuorumJournalManager fails on Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c13dea87
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c13dea87
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c13dea87

Branch: refs/heads/YARN-7402
Commit: c13dea87d9de7a9872fc8b0c939b41b1666a61e5
Parents: 51ce02b
Author: Inigo Goiri <in...@apache.org>
Authored: Wed May 23 11:36:03 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Wed May 23 11:36:03 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java     | 5 +++++
 .../hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java   | 3 ++-
 2 files changed, 7 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c13dea87/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
index 2314e22..f936d75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager;
 import org.apache.hadoop.hdfs.qjournal.server.JournalNode;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
 
 import com.google.common.base.Joiner;
@@ -50,6 +51,10 @@ public class MiniJournalCluster {
     private int numJournalNodes = 3;
     private boolean format = true;
     private final Configuration conf;
+
+    static {
+      DefaultMetricsSystem.setMiniClusterMode(true);
+    }
     
     public Builder(Configuration conf) {
       this.conf = conf;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c13dea87/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
index 34a0348..69856ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
@@ -93,7 +93,8 @@ public class TestQuorumJournalManager {
     conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
     
     cluster = new MiniJournalCluster.Builder(conf)
-      .build();
+        .baseDir(GenericTestUtils.getRandomizedTestDir().getAbsolutePath())
+        .build();
     cluster.waitActive();
     
     qjm = createSpyingQJM();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: YARN-4599. Set OOM control for memory cgroups. (Miklos Szegedi via Haibo Chen)

Posted by bo...@apache.org.
YARN-4599. Set OOM control for memory cgroups. (Miklos Szegedi via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9964799
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9964799
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9964799

Branch: refs/heads/YARN-7402
Commit: d9964799544eefcf424fcc178d987525f5356cdf
Parents: f09dc73
Author: Haibo Chen <ha...@apache.org>
Authored: Wed May 23 11:29:55 2018 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Wed May 23 16:35:37 2018 -0700

----------------------------------------------------------------------
 .gitignore                                      |   1 +
 .../hadoop/yarn/conf/YarnConfiguration.java     |  26 +-
 .../src/main/resources/yarn-default.xml         |  67 ++-
 .../src/CMakeLists.txt                          |  19 +
 .../CGroupElasticMemoryController.java          | 476 +++++++++++++++++++
 .../linux/resources/CGroupsHandler.java         |   6 +
 .../linux/resources/CGroupsHandlerImpl.java     |   6 +-
 .../CGroupsMemoryResourceHandlerImpl.java       |  15 -
 .../linux/resources/DefaultOOMHandler.java      | 254 ++++++++++
 .../monitor/ContainersMonitorImpl.java          |  50 ++
 .../executor/ContainerSignalContext.java        |  41 ++
 .../native/oom-listener/impl/oom_listener.c     | 171 +++++++
 .../native/oom-listener/impl/oom_listener.h     | 102 ++++
 .../oom-listener/impl/oom_listener_main.c       | 104 ++++
 .../oom-listener/test/oom_listener_test_main.cc | 292 ++++++++++++
 .../resources/DummyRunnableWithContext.java     |  31 ++
 .../TestCGroupElasticMemoryController.java      | 319 +++++++++++++
 .../TestCGroupsMemoryResourceHandlerImpl.java   |   6 +-
 .../linux/resources/TestDefaultOOMHandler.java  | 307 ++++++++++++
 .../monitor/TestContainersMonitor.java          |   1 +
 .../TestContainersMonitorResourceChange.java    |   3 +-
 .../site/markdown/NodeManagerCGroupsMemory.md   | 133 ++++++
 22 files changed, 2391 insertions(+), 39 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index 934c009..428950b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -17,6 +17,7 @@
 target
 build
 dependency-reduced-pom.xml
+make-build-debug
 
 # Filesystem contract test options and credentials
 auth-keys.xml

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8e56cb8..6d08831 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1440,6 +1440,25 @@ public class YarnConfiguration extends Configuration {
     NM_PREFIX + "vmem-pmem-ratio";
   public static final float DEFAULT_NM_VMEM_PMEM_RATIO = 2.1f;
 
+  /** Specifies whether to do memory check on overall usage. */
+  public static final String NM_ELASTIC_MEMORY_CONTROL_ENABLED = NM_PREFIX
+      + "elastic-memory-control.enabled";
+  public static final boolean DEFAULT_NM_ELASTIC_MEMORY_CONTROL_ENABLED = false;
+
+  /** Specifies the OOM handler code. */
+  public static final String NM_ELASTIC_MEMORY_CONTROL_OOM_HANDLER = NM_PREFIX
+      + "elastic-memory-control.oom-handler";
+
+  /** The path to the OOM listener.*/
+  public static final String NM_ELASTIC_MEMORY_CONTROL_OOM_LISTENER_PATH =
+      NM_PREFIX + "elastic-memory-control.oom-listener.path";
+
+  /** Maximum time in seconds to resolve an OOM situation. */
+  public static final String NM_ELASTIC_MEMORY_CONTROL_OOM_TIMEOUT_SEC =
+      NM_PREFIX + "elastic-memory-control.timeout-sec";
+  public static final Integer
+      DEFAULT_NM_ELASTIC_MEMORY_CONTROL_OOM_TIMEOUT_SEC = 5;
+
   /** Number of Virtual CPU Cores which can be allocated for containers.*/
   public static final String NM_VCORES = NM_PREFIX + "resource.cpu-vcores";
   public static final int DEFAULT_NM_VCORES = 8;
@@ -2006,13 +2025,6 @@ public class YarnConfiguration extends Configuration {
   /** The path to the Linux container executor.*/
   public static final String NM_LINUX_CONTAINER_EXECUTOR_PATH =
     NM_PREFIX + "linux-container-executor.path";
-  
-  /** 
-   * The UNIX group that the linux-container-executor should run as.
-   * This is intended to be set as part of container-executor.cfg. 
-   */
-  public static final String NM_LINUX_CONTAINER_GROUP =
-    NM_PREFIX + "linux-container-executor.group";
 
   /**
    * True if linux-container-executor should limit itself to one user

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 156ca24..da44ccb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -772,7 +772,7 @@
   <property>
     <description>Maximum size in bytes for configurations that can be provided
       by application to RM for delegation token renewal.
-      By experiment, it's roughly 128 bytes per key-value pair.
+      By experiment, its roughly 128 bytes per key-value pair.
       The default value 12800 allows roughly 100 configs, may be less.
     </description>
     <name>yarn.resourcemanager.delegation-token.max-conf-size-bytes</name>
@@ -1860,14 +1860,6 @@
   </property>
 
   <property>
-    <description>
-    The UNIX group that the linux-container-executor should run as.
-    </description>
-    <name>yarn.nodemanager.linux-container-executor.group</name>
-    <value></value>
-  </property>
-
-  <property>
     <description>T-file compression types used to compress aggregated logs.</description>
     <name>yarn.nodemanager.log-aggregation.compression-type</name>
     <value>none</value>
@@ -2158,7 +2150,7 @@
     <description>
     In the server side it indicates whether timeline service is enabled or not.
     And in the client side, users can enable it to indicate whether client wants
-    to use timeline service. If it's enabled in the client side along with
+    to use timeline service. If its enabled in the client side along with
     security, then yarn client tries to fetch the delegation tokens for the
     timeline server.
     </description>
@@ -3404,7 +3396,7 @@
     <description>
       Defines the limit of the diagnostics message of an application
       attempt, in kilo characters (character count * 1024).
-      When using ZooKeeper to store application state behavior, it's
+      When using ZooKeeper to store application state behavior, its
       important to limit the size of the diagnostic messages to
       prevent YARN from overwhelming ZooKeeper. In cases where
       yarn.resourcemanager.state-store.max-completed-applications is set to
@@ -3819,4 +3811,57 @@
     <value>/usr/bin/numactl</value>
   </property>
 
+  <property>
+    <description>
+      Enable elastic memory control. This is a Linux only feature.
+      When enabled, the node manager adds a listener to receive an
+      event, if all the containers exceeded a limit.
+      The limit is specified by yarn.nodemanager.resource.memory-mb.
+      If this is not set, the limit is set based on the capabilities.
+      See yarn.nodemanager.resource.detect-hardware-capabilities
+      for details.
+      The limit applies to the physical or virtual (rss+swap) memory
+      depending on whether yarn.nodemanager.pmem-check-enabled or
+      yarn.nodemanager.vmem-check-enabled is set.
+    </description>
+    <name>yarn.nodemanager.elastic-memory-control.enabled</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>
+      The name of a JVM class. The class must implement the Runnable
+      interface. It is called,
+      if yarn.nodemanager.elastic-memory-control.enabled
+      is set and the system reaches its memory limit.
+      When called the handler must preempt a container,
+      since all containers are frozen by cgroups.
+      Once preempted some memory is released, so that the
+      kernel can resume all containers. Because of this the
+      handler has to act quickly.
+    </description>
+    <name>yarn.nodemanager.elastic-memory-control.oom-handler</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.DefaultOOMHandler</value>
+  </property>
+
+  <property>
+    <description>
+      The path to the oom-listener tool. Elastic memory control is only
+      supported on Linux. It relies on kernel events. The tool forwards
+      these kernel events to the standard input, so that the node manager
+      can preempt containers, in and out-of-memory scenario.
+      You rarely need to update this setting.
+    </description>
+    <name>yarn.nodemanager.elastic-memory-control.oom-listener.path</name>
+    <value></value>
+  </property>
+
+  <property>
+    <description>
+      Maximum time to wait for an OOM situation to get resolved before
+      bringing down the node.
+    </description>
+    <name>yarn.nodemanager.elastic-memory-control.timeout-sec</name>
+    <value>5</value>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
index 79faeec..a614f80 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
@@ -30,6 +30,7 @@ string(REGEX MATCH . HCD_ONE "${HADOOP_CONF_DIR}")
 string(COMPARE EQUAL ${HCD_ONE} / HADOOP_CONF_DIR_IS_ABS)
 
 set (CMAKE_C_STANDARD 99)
+set (CMAKE_CXX_STANDARD 11)
 
 include(CheckIncludeFiles)
 check_include_files("sys/types.h;sys/sysctl.h" HAVE_SYS_SYSCTL_H)
@@ -113,6 +114,7 @@ include_directories(
     ${GTEST_SRC_DIR}/include
     main/native/container-executor
     main/native/container-executor/impl
+    main/native/oom-listener/impl
 )
 # add gtest as system library to suppress gcc warnings
 include_directories(SYSTEM ${GTEST_SRC_DIR}/include)
@@ -171,3 +173,20 @@ add_executable(cetest
         main/native/container-executor/test/utils/test_docker_util.cc)
 target_link_libraries(cetest gtest container)
 output_directory(cetest test)
+
+# CGroup OOM listener
+add_executable(oom-listener
+        main/native/oom-listener/impl/oom_listener.c
+        main/native/oom-listener/impl/oom_listener.h
+        main/native/oom-listener/impl/oom_listener_main.c
+)
+output_directory(oom-listener target/usr/local/bin)
+
+# CGroup OOM listener test with GTest
+add_executable(test-oom-listener
+        main/native/oom-listener/impl/oom_listener.c
+        main/native/oom-listener/impl/oom_listener.h
+        main/native/oom-listener/test/oom_listener_test_main.cc
+)
+target_link_libraries(test-oom-listener gtest)
+output_directory(test-oom-listener test)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java
new file mode 100644
index 0000000..752c3a6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java
@@ -0,0 +1,476 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.MonotonicClock;
+
+import java.io.File;
+import java.io.InputStream;
+import java.lang.reflect.Constructor;
+import java.nio.charset.Charset;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_ELASTIC_MEMORY_CONTROL_OOM_TIMEOUT_SEC;
+import static org.apache.hadoop.yarn.conf.YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_ENABLED;
+import static org.apache.hadoop.yarn.conf.YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_TIMEOUT_SEC;
+import static org.apache.hadoop.yarn.conf.YarnConfiguration.NM_PMEM_CHECK_ENABLED;
+import static org.apache.hadoop.yarn.conf.YarnConfiguration.NM_VMEM_CHECK_ENABLED;
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_HARD_LIMIT_BYTES;
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_OOM_CONTROL;
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_SWAP_HARD_LIMIT_BYTES;
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_NO_LIMIT;
+
+/**
+ * This thread controls memory usage using cgroups. It listens to out of memory
+ * events of all the containers together, and if we go over the limit picks
+ * a container to kill. The algorithm that picks the container is a plugin.
+ */
+public class CGroupElasticMemoryController extends Thread {
+  protected static final Log LOG = LogFactory
+      .getLog(CGroupElasticMemoryController.class);
+  private final Clock clock = new MonotonicClock();
+  private String yarnCGroupPath;
+  private String oomListenerPath;
+  private Runnable oomHandler;
+  private CGroupsHandler cgroups;
+  private boolean controlPhysicalMemory;
+  private boolean controlVirtualMemory;
+  private long limit;
+  private Process process = null;
+  private boolean stopped = false;
+  private int timeoutMS;
+
+  /**
+   * Default constructor.
+   * @param conf Yarn configuration to use
+   * @param context Node manager context to out of memory handler
+   * @param cgroups Cgroups handler configured
+   * @param controlPhysicalMemory Whether to listen to physical memory OOM
+   * @param controlVirtualMemory Whether to listen to virtual memory OOM
+   * @param limit memory limit in bytes
+   * @param oomHandlerOverride optional OOM handler
+   * @exception YarnException Could not instantiate class
+   */
+  @VisibleForTesting
+  CGroupElasticMemoryController(Configuration conf,
+                                       Context context,
+                                       CGroupsHandler cgroups,
+                                       boolean controlPhysicalMemory,
+                                       boolean controlVirtualMemory,
+                                       long limit,
+                                       Runnable oomHandlerOverride)
+      throws YarnException {
+    super("CGroupElasticMemoryController");
+    boolean controlVirtual = controlVirtualMemory && !controlPhysicalMemory;
+    Runnable oomHandlerTemp =
+        getDefaultOOMHandler(conf, context, oomHandlerOverride, controlVirtual);
+    if (controlPhysicalMemory && controlVirtualMemory) {
+      LOG.warn(
+          NM_ELASTIC_MEMORY_CONTROL_ENABLED + " is on. " +
+          "We cannot control both virtual and physical " +
+          "memory at the same time. Enforcing virtual memory. " +
+          "If swapping is enabled set " +
+          "only " + NM_PMEM_CHECK_ENABLED + " to true otherwise set " +
+          "only " + NM_VMEM_CHECK_ENABLED + " to true.");
+    }
+    if (!controlPhysicalMemory && !controlVirtualMemory) {
+      throw new YarnException(
+          NM_ELASTIC_MEMORY_CONTROL_ENABLED + " is on. " +
+              "We need either virtual or physical memory check requested. " +
+              "If swapping is enabled set " +
+              "only " + NM_PMEM_CHECK_ENABLED + " to true otherwise set " +
+              "only " + NM_VMEM_CHECK_ENABLED + " to true.");
+    }
+    // We are safe at this point that no more exceptions can be thrown
+    this.timeoutMS =
+        1000 * conf.getInt(NM_ELASTIC_MEMORY_CONTROL_OOM_TIMEOUT_SEC,
+        DEFAULT_NM_ELASTIC_MEMORY_CONTROL_OOM_TIMEOUT_SEC);
+    this.oomListenerPath = getOOMListenerExecutablePath(conf);
+    this.oomHandler = oomHandlerTemp;
+    this.cgroups = cgroups;
+    this.controlPhysicalMemory = !controlVirtual;
+    this.controlVirtualMemory = controlVirtual;
+    this.yarnCGroupPath = this.cgroups
+        .getPathForCGroup(CGroupsHandler.CGroupController.MEMORY, "");
+    this.limit = limit;
+  }
+
+  /**
+   * Get the configured OOM handler.
+   * @param conf configuration
+   * @param context context to pass to constructor
+   * @param oomHandlerLocal Default override
+   * @param controlVirtual Control physical or virtual memory
+   * @return The configured or overridden OOM handler.
+   * @throws YarnException in case the constructor failed
+   */
+  private Runnable getDefaultOOMHandler(
+      Configuration conf, Context context, Runnable oomHandlerLocal,
+      boolean controlVirtual)
+      throws YarnException {
+    Class oomHandlerClass =
+        conf.getClass(
+            YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_HANDLER,
+            DefaultOOMHandler.class);
+    if (oomHandlerLocal == null) {
+      try {
+        Constructor constr = oomHandlerClass.getConstructor(
+            Context.class, boolean.class);
+        oomHandlerLocal = (Runnable)constr.newInstance(
+            context, controlVirtual);
+      } catch (Exception ex) {
+        throw new YarnException(ex);
+      }
+    }
+    return oomHandlerLocal;
+  }
+
+  /**
+   * Default constructor.
+   * @param conf Yarn configuration to use
+   * @param context Node manager context to out of memory handler
+   * @param cgroups Cgroups handler configured
+   * @param controlPhysicalMemory Whether to listen to physical memory OOM
+   * @param controlVirtualMemory Whether to listen to virtual memory OOM
+   * @param limit memory limit in bytes
+   * @exception YarnException Could not instantiate class
+   */
+  public CGroupElasticMemoryController(Configuration conf,
+                                       Context context,
+                                       CGroupsHandler cgroups,
+                                       boolean controlPhysicalMemory,
+                                       boolean controlVirtualMemory,
+                                       long limit)
+      throws YarnException {
+    this(conf,
+        context,
+        cgroups,
+        controlPhysicalMemory,
+        controlVirtualMemory,
+        limit,
+        null);
+  }
+
+  /**
+   * Exception thrown if the OOM situation is not resolved.
+   */
+  static private class OOMNotResolvedException extends YarnRuntimeException {
+    OOMNotResolvedException(String message, Exception parent) {
+      super(message, parent);
+    }
+  }
+
+  /**
+   * Stop listening to the cgroup.
+   */
+  public synchronized void stopListening() {
+    stopped = true;
+    if (process != null) {
+      process.destroyForcibly();
+    } else {
+      LOG.warn("Trying to stop listening, when listening is not running");
+    }
+  }
+
+  /**
+   * Checks if the CGroupElasticMemoryController is available on this system.
+   * This assumes that Linux container executor is already initialized.
+   * We need to have CGroups enabled.
+   *
+   * @return True if CGroupElasticMemoryController is available.
+   * False otherwise.
+   */
+  public static boolean isAvailable() {
+    try {
+      if (!Shell.LINUX) {
+        LOG.info("CGroupElasticMemoryController currently is supported only "
+            + "on Linux.");
+        return false;
+      }
+      if (ResourceHandlerModule.getCGroupsHandler() == null ||
+          ResourceHandlerModule.getMemoryResourceHandler() == null) {
+        LOG.info("CGroupElasticMemoryController requires enabling " +
+            "memory CGroups with" +
+            YarnConfiguration.NM_MEMORY_RESOURCE_ENABLED);
+        return false;
+      }
+    } catch (SecurityException se) {
+      LOG.info("Failed to get Operating System name. " + se);
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * Main OOM listening thread. It uses an external process to listen to
+   * Linux events. The external process does not need to run as root, so
+   * it is not related to container-executor. We do not use JNI for security
+   * reasons.
+   */
+  @Override
+  public void run() {
+    ExecutorService executor = null;
+    try {
+      // Disable OOM killer and set a limit.
+      // This has to be set first, so that we get notified about valid events.
+      // We will be notified about events even, if they happened before
+      // oom-listener started
+      setCGroupParameters();
+
+      // Start a listener process
+      ProcessBuilder oomListener = new ProcessBuilder();
+      oomListener.command(oomListenerPath, yarnCGroupPath);
+      synchronized (this) {
+        if (!stopped) {
+          process = oomListener.start();
+        } else {
+          resetCGroupParameters();
+          LOG.info("Listener stopped before starting");
+          return;
+        }
+      }
+      LOG.info(String.format("Listening on %s with %s",
+          yarnCGroupPath,
+          oomListenerPath));
+
+      // We need 1 thread for the error stream and a few others
+      // as a watchdog for the OOM killer
+      executor = Executors.newFixedThreadPool(2);
+
+      // Listen to any errors in the background. We do not expect this to
+      // be large in size, so it will fit into a string.
+      Future<String> errorListener = executor.submit(
+          () -> IOUtils.toString(process.getErrorStream(),
+              Charset.defaultCharset()));
+
+      // We get Linux event increments (8 bytes) forwarded from the event stream
+      // The events cannot be split, so it is safe to read them as a whole
+      // There is no race condition with the cgroup
+      // running out of memory. If oom is 1 at startup
+      // oom_listener will send an initial notification
+      InputStream events = process.getInputStream();
+      byte[] event = new byte[8];
+      int read;
+      // This loop can be exited by terminating the process
+      // with stopListening()
+      while ((read = events.read(event)) == event.length) {
+        // An OOM event has occurred
+        resolveOOM(executor);
+      }
+
+      if (read != -1) {
+        LOG.warn(String.format("Characters returned from event hander: %d",
+            read));
+      }
+
+      // If the input stream is closed, we wait for exit or process terminated.
+      int exitCode = process.waitFor();
+      String error = errorListener.get();
+      process = null;
+      LOG.info(String.format("OOM listener exited %d %s", exitCode, error));
+    } catch (OOMNotResolvedException ex) {
+      // We could mark the node unhealthy but it shuts down the node anyways.
+      // Let's just bring down the node manager all containers are frozen.
+      throw new YarnRuntimeException("Could not resolve OOM", ex);
+    } catch (Exception ex) {
+      synchronized (this) {
+        if (!stopped) {
+          LOG.warn("OOM Listener exiting.", ex);
+        }
+      }
+    } finally {
+      // Make sure we do not leak the child process,
+      // especially if process.waitFor() did not finish.
+      if (process != null && process.isAlive()) {
+        process.destroyForcibly();
+      }
+      if (executor != null) {
+        try {
+          executor.awaitTermination(6, TimeUnit.SECONDS);
+        } catch (InterruptedException e) {
+          LOG.warn("Exiting without processing all OOM events.");
+        }
+        executor.shutdown();
+      }
+      resetCGroupParameters();
+    }
+  }
+
+  /**
+   * Resolve an OOM event.
+   * Listen to the handler timeouts.
+   * @param executor Executor to create watchdog with.
+   * @throws InterruptedException interrupted
+   * @throws java.util.concurrent.ExecutionException cannot launch watchdog
+   */
+  private void resolveOOM(ExecutorService executor)
+      throws InterruptedException, java.util.concurrent.ExecutionException {
+    // Just log, when we are still in OOM after a couple of seconds
+    final long start = clock.getTime();
+    Future<Boolean> watchdog =
+        executor.submit(() -> watchAndLogOOMState(start));
+    // Kill something to resolve the issue
+    try {
+      oomHandler.run();
+    } catch (RuntimeException ex) {
+      watchdog.cancel(true);
+      throw new OOMNotResolvedException("OOM handler failed", ex);
+    }
+    if (!watchdog.get()) {
+      // If we are still in OOM,
+      // the watchdog will trigger stop
+      // listening to exit this loop
+      throw new OOMNotResolvedException("OOM handler timed out", null);
+    }
+  }
+
+  /**
+   * Just watch until we are in OOM and log. Send an update log every second.
+   * @return if the OOM was resolved successfully
+   */
+  private boolean watchAndLogOOMState(long start) {
+    long lastLog = start;
+    try {
+      long end = start;
+      // Throw an error, if we are still in OOM after 5 seconds
+      while(end - start < timeoutMS) {
+        end = clock.getTime();
+        String underOOM = cgroups.getCGroupParam(
+            CGroupsHandler.CGroupController.MEMORY,
+            "",
+            CGROUP_PARAM_MEMORY_OOM_CONTROL);
+        if (underOOM.contains(CGroupsHandler.UNDER_OOM)) {
+          if (end - lastLog > 1000) {
+            LOG.warn(String.format(
+                "OOM not resolved in %d ms", end - start));
+            lastLog = end;
+          }
+        } else {
+          LOG.info(String.format(
+              "Resolved OOM in %d ms", end - start));
+          return true;
+        }
+        // We do not want to saturate the CPU
+        // leaving the resources to the actual OOM killer
+        // but we want to be fast, too.
+        Thread.sleep(10);
+      }
+    } catch (InterruptedException ex) {
+      LOG.debug("Watchdog interrupted");
+    } catch (Exception e) {
+      LOG.warn("Exception running logging thread", e);
+    }
+    LOG.warn(String.format("OOM was not resolved in %d ms",
+        clock.getTime() - start));
+    stopListening();
+    return false;
+  }
+
+  /**
+   * Update root memory cgroup. This contains all containers.
+   * The physical limit has to be set first then the virtual limit.
+   */
+  private void setCGroupParameters() throws ResourceHandlerException {
+    // Disable the OOM killer
+    cgroups.updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, "",
+        CGROUP_PARAM_MEMORY_OOM_CONTROL, "1");
+    if (controlPhysicalMemory && !controlVirtualMemory) {
+      try {
+        // Ignore virtual memory limits, since we do not know what it is set to
+        cgroups.updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, "",
+            CGROUP_PARAM_MEMORY_SWAP_HARD_LIMIT_BYTES, CGROUP_NO_LIMIT);
+      } catch (ResourceHandlerException ex) {
+        LOG.debug("Swap monitoring is turned off in the kernel");
+      }
+      // Set physical memory limits
+      cgroups.updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, "",
+          CGROUP_PARAM_MEMORY_HARD_LIMIT_BYTES, Long.toString(limit));
+    } else if (controlVirtualMemory && !controlPhysicalMemory) {
+      // Ignore virtual memory limits, since we do not know what it is set to
+      cgroups.updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, "",
+          CGROUP_PARAM_MEMORY_SWAP_HARD_LIMIT_BYTES, CGROUP_NO_LIMIT);
+      // Set physical limits to no more than virtual limits
+      cgroups.updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, "",
+          CGROUP_PARAM_MEMORY_HARD_LIMIT_BYTES, Long.toString(limit));
+      // Set virtual memory limits
+      // Important: it has to be set after physical limit is set
+      cgroups.updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, "",
+          CGROUP_PARAM_MEMORY_SWAP_HARD_LIMIT_BYTES, Long.toString(limit));
+    } else {
+      throw new ResourceHandlerException(
+          String.format("Unsupported scenario physical:%b virtual:%b",
+              controlPhysicalMemory, controlVirtualMemory));
+    }
+  }
+
+  /**
+   * Reset root memory cgroup to OS defaults. This controls all containers.
+   */
+  private void resetCGroupParameters() {
+    try {
+      try {
+        // Disable memory limits
+        cgroups.updateCGroupParam(
+            CGroupsHandler.CGroupController.MEMORY, "",
+            CGROUP_PARAM_MEMORY_SWAP_HARD_LIMIT_BYTES, CGROUP_NO_LIMIT);
+      } catch (ResourceHandlerException ex) {
+        LOG.debug("Swap monitoring is turned off in the kernel");
+      }
+      cgroups.updateCGroupParam(
+          CGroupsHandler.CGroupController.MEMORY, "",
+          CGROUP_PARAM_MEMORY_HARD_LIMIT_BYTES, CGROUP_NO_LIMIT);
+      // Enable the OOM killer
+      cgroups.updateCGroupParam(
+          CGroupsHandler.CGroupController.MEMORY, "",
+          CGROUP_PARAM_MEMORY_OOM_CONTROL, "0");
+    } catch (ResourceHandlerException ex) {
+      LOG.warn("Error in cleanup", ex);
+    }
+  }
+
+  private static String getOOMListenerExecutablePath(Configuration conf) {
+    String yarnHomeEnvVar =
+        System.getenv(ApplicationConstants.Environment.HADOOP_YARN_HOME.key());
+    if (yarnHomeEnvVar == null) {
+      yarnHomeEnvVar = ".";
+    }
+    File hadoopBin = new File(yarnHomeEnvVar, "bin");
+    String defaultPath =
+        new File(hadoopBin, "oom-listener").getAbsolutePath();
+    final String path = conf.get(
+        YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_LISTENER_PATH,
+        defaultPath);
+    LOG.debug(String.format("oom-listener path: %s %s", path, defaultPath));
+    return path;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
index e279504..9dc16c3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
@@ -76,8 +76,14 @@ public interface CGroupsHandler {
   String CGROUP_PARAM_BLKIO_WEIGHT = "weight";
 
   String CGROUP_PARAM_MEMORY_HARD_LIMIT_BYTES = "limit_in_bytes";
+  String CGROUP_PARAM_MEMORY_SWAP_HARD_LIMIT_BYTES = "memsw.limit_in_bytes";
   String CGROUP_PARAM_MEMORY_SOFT_LIMIT_BYTES = "soft_limit_in_bytes";
+  String CGROUP_PARAM_MEMORY_OOM_CONTROL = "oom_control";
   String CGROUP_PARAM_MEMORY_SWAPPINESS = "swappiness";
+  String CGROUP_PARAM_MEMORY_USAGE_BYTES = "usage_in_bytes";
+  String CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES = "memsw.usage_in_bytes";
+  String CGROUP_NO_LIMIT = "-1";
+  String UNDER_OOM = "under_oom 1";
 
 
   String CGROUP_CPU_PERIOD_US = "cfs_period_us";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index 008f3d7..6ed94e1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -594,7 +594,11 @@ class CGroupsHandlerImpl implements CGroupsHandler {
   @Override
   public String getCGroupParam(CGroupController controller, String cGroupId,
       String param) throws ResourceHandlerException {
-    String cGroupParamPath = getPathForCGroupParam(controller, cGroupId, param);
+    String cGroupParamPath =
+        param.equals(CGROUP_FILE_TASKS) ?
+            getPathForCGroup(controller, cGroupId)
+                + Path.SEPARATOR + param :
+        getPathForCGroupParam(controller, cGroupId, param);
 
     try {
       byte[] contents = Files.readAllBytes(Paths.get(cGroupParamPath));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
index 2d1585e..a57adb1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
@@ -65,21 +65,6 @@ public class CGroupsMemoryResourceHandlerImpl implements MemoryResourceHandler {
   @Override
   public List<PrivilegedOperation> bootstrap(Configuration conf)
       throws ResourceHandlerException {
-    boolean pmemEnabled =
-        conf.getBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED,
-            YarnConfiguration.DEFAULT_NM_PMEM_CHECK_ENABLED);
-    boolean vmemEnabled =
-        conf.getBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED,
-            YarnConfiguration.DEFAULT_NM_VMEM_CHECK_ENABLED);
-    if (pmemEnabled || vmemEnabled) {
-      String msg = "The default YARN physical and/or virtual memory health"
-          + " checkers as well as the CGroups memory controller are enabled. "
-          + "If you wish to use the Cgroups memory controller, please turn off"
-          + " the default physical/virtual memory checkers by setting "
-          + YarnConfiguration.NM_PMEM_CHECK_ENABLED + " and "
-          + YarnConfiguration.NM_VMEM_CHECK_ENABLED + " to false.";
-      throw new ResourceHandlerException(msg);
-    }
     this.cGroupsHandler.initializeCGroupController(MEMORY);
     enforce = conf.getBoolean(
         YarnConfiguration.NM_MEMORY_RESOURCE_ENFORCED,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java
new file mode 100644
index 0000000..c690225
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java
@@ -0,0 +1,254 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_FILE_TASKS;
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES;
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_OOM_CONTROL;
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_USAGE_BYTES;
+
+/**
+ * A very basic OOM handler implementation.
+ * See the javadoc on the run() method for details.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class DefaultOOMHandler implements Runnable {
+  protected static final Log LOG = LogFactory
+      .getLog(DefaultOOMHandler.class);
+  private Context context;
+  private boolean virtual;
+  private CGroupsHandler cgroups;
+
+  /**
+   * Create an OOM handler.
+   * This has to be public to be able to construct through reflection.
+   * @param context node manager context to work with
+   * @param testVirtual Test virtual memory or physical
+   */
+  public DefaultOOMHandler(Context context, boolean testVirtual) {
+    this.context = context;
+    this.virtual = testVirtual;
+    this.cgroups = ResourceHandlerModule.getCGroupsHandler();
+  }
+
+  @VisibleForTesting
+  void setCGroupsHandler(CGroupsHandler handler) {
+    cgroups = handler;
+  }
+
+  /**
+   * Kill the container, if it has exceeded its request.
+   *
+   * @param container Container to check
+   * @param fileName  CGroup filename (physical or swap/virtual)
+   * @return true, if the container was preempted
+   */
+  private boolean killContainerIfOOM(Container container, String fileName) {
+    String value = null;
+    try {
+      value = cgroups.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
+          container.getContainerId().toString(),
+          fileName);
+      long usage = Long.parseLong(value);
+      long request = container.getResource().getMemorySize() * 1024 * 1024;
+
+      // Check if the container has exceeded its limits.
+      if (usage > request) {
+        // Kill the container
+        // We could call the regular cleanup but that sends a
+        // SIGTERM first that cannot be handled by frozen processes.
+        // Walk through the cgroup
+        // tasks file and kill all processes in it
+        sigKill(container);
+        String message = String.format(
+            "Container %s was killed by elastic cgroups OOM handler using %d " +
+                "when requested only %d",
+            container.getContainerId(), usage, request);
+        LOG.warn(message);
+        return true;
+      }
+    } catch (ResourceHandlerException ex) {
+      LOG.warn(String.format("Could not access memory resource for %s",
+          container.getContainerId()), ex);
+    } catch (NumberFormatException ex) {
+      LOG.warn(String.format("Could not parse %s in %s",
+          value, container.getContainerId()));
+    }
+    return false;
+  }
+
+  /**
+   * SIGKILL the specified container. We do this not using the standard
+   * container logic. The reason is that the processes are frozen by
+   * the cgroups OOM handler, so they cannot respond to SIGTERM.
+   * On the other hand we have to be as fast as possible.
+   * We walk through the list of active processes in the container.
+   * This is needed because frozen parents cannot signal their children.
+   * We kill each process and then try again until the whole cgroup
+   * is cleaned up. This logic avoids leaking processes in a cgroup.
+   * Currently the killing only succeeds for PGIDS.
+   *
+   * @param container Container to clean up
+   */
+  private void sigKill(Container container) {
+    boolean finished = false;
+    try {
+      while (!finished) {
+        String[] pids =
+            cgroups.getCGroupParam(
+                CGroupsHandler.CGroupController.MEMORY,
+                container.getContainerId().toString(),
+                CGROUP_FILE_TASKS)
+                .split("\n");
+        finished = true;
+        for (String pid : pids) {
+          // Note: this kills only PGIDs currently
+          if (pid != null && !pid.isEmpty()) {
+            LOG.debug(String.format(
+                "Terminating container %s Sending SIGKILL to -%s",
+                container.getContainerId().toString(),
+                pid));
+            finished = false;
+            try {
+              context.getContainerExecutor().signalContainer(
+                  new ContainerSignalContext.Builder().setContainer(container)
+                      .setUser(container.getUser())
+                      .setPid(pid).setSignal(ContainerExecutor.Signal.KILL)
+                      .build());
+            } catch (IOException ex) {
+              LOG.warn(String.format("Cannot kill container %s pid -%s.",
+                  container.getContainerId(), pid), ex);
+            }
+          }
+        }
+        try {
+          Thread.sleep(10);
+        } catch (InterruptedException e) {
+          LOG.debug("Interrupted while waiting for processes to disappear");
+        }
+      }
+    } catch (ResourceHandlerException ex) {
+      LOG.warn(String.format(
+          "Cannot list more tasks in container %s to kill.",
+          container.getContainerId()));
+    }
+  }
+
+  /**
+   * It is called when the node is under an OOM condition. All processes in
+   * all sub-cgroups are suspended. We need to act fast, so that we do not
+   * affect the overall system utilization.
+   * In general we try to find a newly run container that exceeded its limits.
+   * The justification is cost, since probably this is the one that has
+   * accumulated the least amount of uncommitted data so far.
+   * We continue the process until the OOM is resolved.
+   */
+  @Override
+  public void run() {
+    try {
+      // Reverse order by start time
+      Comparator<Container> comparator = (Container o1, Container o2) -> {
+        long order = o1.getContainerStartTime() - o2.getContainerStartTime();
+        return order > 0 ? -1 : order < 0 ? 1 : 0;
+      };
+
+      // We kill containers until the kernel reports the OOM situation resolved
+      // Note: If the kernel has a delay this may kill more than necessary
+      while (true) {
+        String status = cgroups.getCGroupParam(
+            CGroupsHandler.CGroupController.MEMORY,
+            "",
+            CGROUP_PARAM_MEMORY_OOM_CONTROL);
+        if (!status.contains(CGroupsHandler.UNDER_OOM)) {
+          break;
+        }
+
+        // The first pass kills a recent container
+        // that uses more than its request
+        ArrayList<Container> containers = new ArrayList<>();
+        containers.addAll(context.getContainers().values());
+        // Note: Sorting may take a long time with 10K+ containers
+        // but it is acceptable now with low number of containers per node
+        containers.sort(comparator);
+
+        // Kill the latest container that exceeded its request
+        boolean found = false;
+        for (Container container : containers) {
+          if (!virtual) {
+            if (killContainerIfOOM(container,
+                CGROUP_PARAM_MEMORY_USAGE_BYTES)) {
+              found = true;
+              break;
+            }
+          } else {
+            if (killContainerIfOOM(container,
+                CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) {
+              found = true;
+              break;
+            }
+          }
+        }
+        if (found) {
+          continue;
+        }
+
+        // We have not found any containers that ran out of their limit,
+        // so we will kill the latest one. This can happen, if all use
+        // close to their request and one of them requests a big block
+        // triggering the OOM freeze.
+        // Currently there is no other way to identify the outstanding one.
+        if (containers.size() > 0) {
+          Container container = containers.get(0);
+          sigKill(container);
+          String message = String.format(
+              "Newest container %s killed by elastic cgroups OOM handler using",
+              container.getContainerId());
+          LOG.warn(message);
+          continue;
+        }
+
+        // This can happen, if SIGKILL did not clean up
+        // non-PGID or containers or containers launched by other users
+        // or if a process was put to the root YARN cgroup.
+        throw new YarnRuntimeException(
+            "Could not find any containers but CGroups " +
+                "reserved for containers ran out of memory. " +
+                "I am giving up");
+      }
+    } catch (ResourceHandlerException ex) {
+      LOG.warn("Could not fecth OOM status. " +
+          "This is expected at shutdown. Exiting.", ex);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index 35015c2..bd68dfe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -20,6 +20,9 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupElasticMemoryController;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule;
 import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -64,6 +67,7 @@ public class ContainersMonitorImpl extends AbstractService implements
 
   private long monitoringInterval;
   private MonitoringThread monitoringThread;
+  private CGroupElasticMemoryController oomListenerThread;
   private boolean containerMetricsEnabled;
   private long containerMetricsPeriodMs;
   private long containerMetricsUnregisterDelayMs;
@@ -85,6 +89,8 @@ public class ContainersMonitorImpl extends AbstractService implements
 
   private boolean pmemCheckEnabled;
   private boolean vmemCheckEnabled;
+  private boolean elasticMemoryEnforcement;
+  private boolean strictMemoryEnforcement;
   private boolean containersMonitorEnabled;
 
   private long maxVCoresAllottedForContainers;
@@ -173,8 +179,37 @@ public class ContainersMonitorImpl extends AbstractService implements
     vmemCheckEnabled = this.conf.getBoolean(
         YarnConfiguration.NM_VMEM_CHECK_ENABLED,
         YarnConfiguration.DEFAULT_NM_VMEM_CHECK_ENABLED);
+    elasticMemoryEnforcement = this.conf.getBoolean(
+        YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_ENABLED,
+        YarnConfiguration.DEFAULT_NM_ELASTIC_MEMORY_CONTROL_ENABLED);
+    strictMemoryEnforcement = conf.getBoolean(
+        YarnConfiguration.NM_MEMORY_RESOURCE_ENFORCED,
+        YarnConfiguration.DEFAULT_NM_MEMORY_RESOURCE_ENFORCED);
     LOG.info("Physical memory check enabled: " + pmemCheckEnabled);
     LOG.info("Virtual memory check enabled: " + vmemCheckEnabled);
+    LOG.info("Elastic memory control enabled: " + elasticMemoryEnforcement);
+    LOG.info("Strict memory control enabled: " + strictMemoryEnforcement);
+
+    if (elasticMemoryEnforcement) {
+      if (!CGroupElasticMemoryController.isAvailable()) {
+        // Test for availability outside the constructor
+        // to be able to write non-Linux unit tests for
+        // CGroupElasticMemoryController
+        throw new YarnException(
+            "CGroup Elastic Memory controller enabled but " +
+            "it is not available. Exiting.");
+      } else {
+        this.oomListenerThread = new CGroupElasticMemoryController(
+            conf,
+            context,
+            ResourceHandlerModule.getCGroupsHandler(),
+            pmemCheckEnabled,
+            vmemCheckEnabled,
+            pmemCheckEnabled ?
+                maxPmemAllottedForContainers : maxVmemAllottedForContainers
+        );
+      }
+    }
 
     containersMonitorEnabled =
         isContainerMonitorEnabled() && monitoringInterval > 0;
@@ -246,6 +281,9 @@ public class ContainersMonitorImpl extends AbstractService implements
     if (containersMonitorEnabled) {
       this.monitoringThread.start();
     }
+    if (oomListenerThread != null) {
+      oomListenerThread.start();
+    }
     super.serviceStart();
   }
 
@@ -259,6 +297,14 @@ public class ContainersMonitorImpl extends AbstractService implements
       } catch (InterruptedException e) {
         LOG.info("ContainersMonitorImpl monitoring thread interrupted");
       }
+      if (this.oomListenerThread != null) {
+        this.oomListenerThread.stopListening();
+        try {
+          this.oomListenerThread.join();
+        } finally {
+          this.oomListenerThread = null;
+        }
+      }
     }
     super.serviceStop();
   }
@@ -651,6 +697,10 @@ public class ContainersMonitorImpl extends AbstractService implements
                             ProcessTreeInfo ptInfo,
                             long currentVmemUsage,
                             long currentPmemUsage) {
+      if (elasticMemoryEnforcement || strictMemoryEnforcement) {
+        // We enforce the overall memory usage instead of individual containers
+        return;
+      }
       boolean isMemoryOverLimit = false;
       long vmemLimit = ptInfo.getVmemLimit();
       long pmemLimit = ptInfo.getPmemLimit();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerSignalContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerSignalContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerSignalContext.java
index 56b571b..5b911b8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerSignalContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/executor/ContainerSignalContext.java
@@ -20,6 +20,7 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.executor;
 
+import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal;
@@ -93,4 +94,44 @@ public final class ContainerSignalContext {
   public Signal getSignal() {
     return this.signal;
   }
+
+  /**
+   * Retrun true if we are trying to signal the same process.
+   * @param obj compare to this object
+   * @return whether we try to signal the same process id
+   */
+  @Override
+  public boolean equals(Object obj) {
+    if (obj instanceof ContainerSignalContext) {
+      ContainerSignalContext other = (ContainerSignalContext)obj;
+      boolean ret =
+          (other.getPid() == null && getPid() == null) ||
+              (other.getPid() != null && getPid() != null &&
+                  other.getPid().equals(getPid()));
+      ret = ret &&
+          (other.getSignal() == null && getSignal() == null) ||
+          (other.getSignal() != null && getSignal() != null &&
+              other.getSignal().equals(getSignal()));
+      ret = ret &&
+          (other.getContainer() == null && getContainer() == null) ||
+          (other.getContainer() != null && getContainer() != null &&
+              other.getContainer().equals(getContainer()));
+      ret = ret &&
+          (other.getUser() == null && getUser() == null) ||
+          (other.getUser() != null && getUser() != null &&
+              other.getUser().equals(getUser()));
+      return ret;
+    }
+    return super.equals(obj);
+  }
+
+  @Override
+  public int hashCode() {
+    return new HashCodeBuilder().
+        append(getPid()).
+        append(getSignal()).
+        append(getContainer()).
+        append(getUser()).
+        toHashCode();
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener.c
new file mode 100644
index 0000000..0086b26
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener.c
@@ -0,0 +1,171 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if __linux
+
+#include <sys/param.h>
+#include <poll.h>
+#include "oom_listener.h"
+
+/*
+ * Print an error.
+*/
+static inline void print_error(const char *file, const char *message,
+                        ...) {
+  fprintf(stderr, "%s ", file);
+  va_list arguments;
+  va_start(arguments, message);
+  vfprintf(stderr, message, arguments);
+  va_end(arguments);
+}
+
+/*
+ * Listen to OOM events in a memory cgroup. See declaration for details.
+ */
+int oom_listener(_oom_listener_descriptors *descriptors, const char *cgroup, int fd) {
+  const char *pattern =
+          cgroup[MAX(strlen(cgroup), 1) - 1] == '/'
+          ? "%s%s" :"%s/%s";
+
+  /* Create an event handle, if we do not have one already*/
+  if (descriptors->event_fd == -1 &&
+      (descriptors->event_fd = eventfd(0, 0)) == -1) {
+    print_error(descriptors->command, "eventfd() failed. errno:%d %s\n",
+                errno, strerror(errno));
+    return EXIT_FAILURE;
+  }
+
+  /*
+   * open the file to listen to (memory.oom_control)
+   * and write the event handle and the file handle
+   * to cgroup.event_control
+   */
+  if (snprintf(descriptors->event_control_path,
+               sizeof(descriptors->event_control_path),
+               pattern,
+               cgroup,
+               "cgroup.event_control") < 0) {
+    print_error(descriptors->command, "path too long %s\n", cgroup);
+    return EXIT_FAILURE;
+  }
+
+  if ((descriptors->event_control_fd = open(
+      descriptors->event_control_path,
+      O_WRONLY|O_CREAT, 0600)) == -1) {
+    print_error(descriptors->command, "Could not open %s. errno:%d %s\n",
+                descriptors->event_control_path,
+                errno, strerror(errno));
+    return EXIT_FAILURE;
+  }
+
+  if (snprintf(descriptors->oom_control_path,
+               sizeof(descriptors->oom_control_path),
+               pattern,
+               cgroup,
+               "memory.oom_control") < 0) {
+    print_error(descriptors->command, "path too long %s\n", cgroup);
+    return EXIT_FAILURE;
+  }
+
+  if ((descriptors->oom_control_fd = open(
+      descriptors->oom_control_path,
+      O_RDONLY)) == -1) {
+    print_error(descriptors->command, "Could not open %s. errno:%d %s\n",
+                descriptors->oom_control_path,
+                errno, strerror(errno));
+    return EXIT_FAILURE;
+  }
+
+  if ((descriptors->oom_command_len = (size_t) snprintf(
+      descriptors->oom_command,
+      sizeof(descriptors->oom_command),
+      "%d %d",
+      descriptors->event_fd,
+      descriptors->oom_control_fd)) < 0) {
+    print_error(descriptors->command, "Could print %d %d\n",
+                descriptors->event_control_fd,
+                descriptors->oom_control_fd);
+    return EXIT_FAILURE;
+  }
+
+  if (write(descriptors->event_control_fd,
+            descriptors->oom_command,
+            descriptors->oom_command_len) == -1) {
+    print_error(descriptors->command, "Could not write to %s errno:%d\n",
+                descriptors->event_control_path, errno);
+    return EXIT_FAILURE;
+  }
+
+  if (close(descriptors->event_control_fd) == -1) {
+    print_error(descriptors->command, "Could not close %s errno:%d\n",
+                descriptors->event_control_path, errno);
+    return EXIT_FAILURE;
+  }
+  descriptors->event_control_fd = -1;
+
+  /*
+   * Listen to events as long as the cgroup exists
+   * and forward them to the fd in the argument.
+   */
+  for (;;) {
+    uint64_t u;
+    ssize_t ret = 0;
+    struct stat stat_buffer = {0};
+    struct pollfd poll_fd = {
+        .fd = descriptors->event_fd,
+        .events = POLLIN
+    };
+
+    ret = poll(&poll_fd, 1, descriptors->watch_timeout);
+    if (ret < 0) {
+      /* Error calling poll */
+      print_error(descriptors->command,
+                  "Could not poll eventfd %d errno:%d %s\n", ret,
+                  errno, strerror(errno));
+      return EXIT_FAILURE;
+    }
+
+    if (ret > 0) {
+      /* Event counter values are always 8 bytes */
+      if ((ret = read(descriptors->event_fd, &u, sizeof(u))) != sizeof(u)) {
+        print_error(descriptors->command,
+                    "Could not read from eventfd %d errno:%d %s\n", ret,
+                    errno, strerror(errno));
+        return EXIT_FAILURE;
+      }
+
+      /* Forward the value to the caller, typically stdout */
+      if ((ret = write(fd, &u, sizeof(u))) != sizeof(u)) {
+        print_error(descriptors->command,
+                    "Could not write to pipe %d errno:%d %s\n", ret,
+                    errno, strerror(errno));
+        return EXIT_FAILURE;
+      }
+    } else if (ret == 0) {
+      /* Timeout has elapsed*/
+
+      /* Quit, if the cgroup is deleted */
+      if (stat(cgroup, &stat_buffer) != 0) {
+        break;
+      }
+    }
+  }
+  return EXIT_SUCCESS;
+}
+
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener.h
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener.h
new file mode 100644
index 0000000..aa77cb6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener.h
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if __linux
+
+#include <fcntl.h>
+#include <errno.h>
+#include <string.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <sys/eventfd.h>
+#include <sys/stat.h>
+
+#include <linux/limits.h>
+
+/*
+This file implements a standard cgroups out of memory listener.
+*/
+
+typedef struct _oom_listener_descriptors {
+  /*
+   * Command line that was called to run this process.
+   */
+  const char *command;
+  /*
+   * Event descriptor to watch.
+   * It is filled in by the function,
+   * if not specified, yet.
+   */
+  int event_fd;
+  /*
+   * cgroup.event_control file handle
+   */
+  int event_control_fd;
+  /*
+   * memory.oom_control file handle
+   */
+  int oom_control_fd;
+  /*
+   * cgroup.event_control path
+   */
+  char event_control_path[PATH_MAX];
+  /*
+   * memory.oom_control path
+   */
+  char oom_control_path[PATH_MAX];
+  /*
+   * Control command to write to
+   * cgroup.event_control
+   * Filled by the function.
+   */
+  char oom_command[25];
+  /*
+   * Length of oom_command filled by the function.
+   */
+  size_t oom_command_len;
+  /*
+   * Directory watch timeout
+   */
+  int watch_timeout;
+} _oom_listener_descriptors;
+
+/*
+ Clean up allocated resources in a descriptor structure
+*/
+inline void cleanup(_oom_listener_descriptors *descriptors) {
+  close(descriptors->event_fd);
+  descriptors->event_fd = -1;
+  close(descriptors->event_control_fd);
+  descriptors->event_control_fd = -1;
+  close(descriptors->oom_control_fd);
+  descriptors->oom_control_fd = -1;
+  descriptors->watch_timeout = 1000;
+}
+
+/*
+ * Enable an OOM listener on the memory cgroup cgroup
+ * descriptors: Structure that holds state for testing purposes
+ * cgroup: cgroup path to watch. It has to be a memory cgroup
+ * fd: File to forward events to. Normally this is stdout
+ */
+int oom_listener(_oom_listener_descriptors *descriptors, const char *cgroup, int fd);
+
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener_main.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener_main.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener_main.c
new file mode 100644
index 0000000..eb7fc3e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener_main.c
@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if __linux
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "oom_listener.h"
+
+void print_usage(void) {
+  fprintf(stderr, "oom-listener");
+  fprintf(stderr, "Listen to OOM events in a cgroup");
+  fprintf(stderr, "usage to listen: oom-listener <cgroup directory>\n");
+  fprintf(stderr, "usage to test: oom-listener oom [<pgid>]\n");
+  fprintf(stderr, "example listening: oom-listener /sys/fs/cgroup/memory/hadoop-yarn | xxd -c 8\n");
+  fprintf(stderr, "example oom to test: bash -c 'echo $$ >/sys/fs/cgroup/memory/hadoop-yarn/tasks;oom-listener oom'\n");
+  fprintf(stderr, "example container overload: sudo -u <user> bash -c 'echo $$ && oom-listener oom 0' >/sys/fs/cgroup/memory/hadoop-yarn/<container>/tasks\n");
+  exit(EXIT_FAILURE);
+}
+
+/*
+  Test an OOM situation adding the pid
+  to the group pgid and calling malloc in a loop
+  This can be used to test OOM listener. See examples above.
+*/
+void test_oom_infinite(char* pgids) {
+  if (pgids != NULL) {
+    int pgid = atoi(pgids);
+    setpgid(0, pgid);
+  }
+  while(1) {
+    char* p = (char*)malloc(4096);
+    if (p != NULL) {
+      p[0] = 0xFF;
+    } else {
+      exit(1);
+    }
+  }
+}
+
+/*
+ A command that receives a memory cgroup directory and
+ listens to the events in the directory.
+ It will print a new line on every out of memory event
+ to the standard output.
+ usage:
+ oom-listener <cgroup>
+*/
+int main(int argc, char *argv[]) {
+  if (argc >= 2 &&
+      strcmp(argv[1], "oom") == 0)
+    test_oom_infinite(argc < 3 ? NULL : argv[2]);
+
+  if (argc != 2)
+    print_usage();
+
+  _oom_listener_descriptors descriptors = {
+      .command = argv[0],
+      .event_fd = -1,
+      .event_control_fd = -1,
+      .oom_control_fd = -1,
+      .event_control_path = {0},
+      .oom_control_path = {0},
+      .oom_command = {0},
+      .oom_command_len = 0,
+      .watch_timeout = 1000
+  };
+
+  int ret = oom_listener(&descriptors, argv[1], STDOUT_FILENO);
+
+  cleanup(&descriptors);
+
+  return ret;
+}
+
+#else
+
+/*
+ This tool uses Linux specific functionality,
+ so it is not available for other operating systems
+*/
+int main() {
+  return 1;
+}
+
+#endif
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc
new file mode 100644
index 0000000..9627632
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc
@@ -0,0 +1,292 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if __linux
+
+extern "C" {
+#include "oom_listener.h"
+}
+
+#include <gtest/gtest.h>
+#include <fstream>
+#include <mutex>
+
+#define CGROUP_ROOT "/sys/fs/cgroup/memory/"
+#define TEST_ROOT "/tmp/test-oom-listener/"
+#define CGROUP_TASKS "tasks"
+#define CGROUP_OOM_CONTROL "memory.oom_control"
+#define CGROUP_LIMIT_PHYSICAL "memory.limit_in_bytes"
+#define CGROUP_LIMIT_SWAP "memory.memsw.limit_in_bytes"
+#define CGROUP_EVENT_CONTROL "cgroup.event_control"
+#define CGROUP_LIMIT (5 * 1024 * 1024)
+
+// We try multiple cgroup directories
+// We try first the official path to test
+// in production
+// If we are running as a user we fall back
+// to mock cgroup
+static const char *cgroup_candidates[] = { CGROUP_ROOT, TEST_ROOT };
+
+int main(int argc, char **argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
+
+class OOMListenerTest : public ::testing::Test {
+private:
+  char cgroup[PATH_MAX] = {};
+  const char* cgroup_root = nullptr;
+public:
+  OOMListenerTest() = default;
+
+  virtual ~OOMListenerTest() = default;
+  virtual const char* GetCGroup() { return cgroup; }
+  virtual void SetUp() {
+    struct stat cgroup_memory = {};
+    for (unsigned int i = 0; i < GTEST_ARRAY_SIZE_(cgroup_candidates); ++i) {
+      cgroup_root = cgroup_candidates[i];
+
+      // Try to create the root.
+      // We might not have permission and
+      // it may already exist
+      mkdir(cgroup_root, 0700);
+
+      if (0 != stat(cgroup_root, &cgroup_memory)) {
+        printf("%s missing. Skipping test\n", cgroup_root);
+        continue;
+      }
+
+      timespec timespec1 = {};
+      if (0 != clock_gettime(CLOCK_MONOTONIC, &timespec1)) {
+        ASSERT_TRUE(false) << " clock_gettime failed\n";
+      }
+
+      if (snprintf(cgroup, sizeof(cgroup), "%s%lx/",
+                        cgroup_root, timespec1.tv_nsec) <= 0) {
+        cgroup[0] = '\0';
+        printf("%s snprintf failed\n", cgroup_root);
+        continue;
+      }
+
+      // Create a cgroup named the current timestamp
+      // to make it quasi unique
+      if (0 != mkdir(cgroup, 0700)) {
+        printf("%s not writable.\n", cgroup);
+        continue;
+      }
+      break;
+    }
+
+    ASSERT_EQ(0, stat(cgroup, &cgroup_memory))
+                  << "Cannot use or simulate cgroup " << cgroup;
+  }
+  virtual void TearDown() {
+    if (cgroup[0] != '\0') {
+      rmdir(cgroup);
+    }
+    if (cgroup_root != nullptr &&
+        cgroup_root != cgroup_candidates[0]) {
+      rmdir(cgroup_root);
+    }
+  }
+};
+
+/*
+  Unit test for cgroup testing. There are two modes.
+  If the unit test is run as root and we have cgroups
+  we try to crate a cgroup and generate an OOM.
+  If we are not running as root we just sleep instead of
+  hogging memory and simulate the OOM by sending
+  an event in a mock event fd mock_oom_event_as_user.
+*/
+TEST_F(OOMListenerTest, test_oom) {
+  // Disable OOM killer
+  std::ofstream oom_control;
+  std::string oom_control_file =
+      std::string(GetCGroup()).append(CGROUP_OOM_CONTROL);
+  oom_control.open(oom_control_file.c_str(), oom_control.out);
+  oom_control << 1 << std::endl;
+  oom_control.close();
+
+  // Set a low enough limit for physical
+  std::ofstream limit;
+  std::string limit_file =
+      std::string(GetCGroup()).append(CGROUP_LIMIT_PHYSICAL);
+  limit.open(limit_file.c_str(), limit.out);
+  limit << CGROUP_LIMIT << std::endl;
+  limit.close();
+
+  // Set a low enough limit for physical + swap
+  std::ofstream limitSwap;
+  std::string limit_swap_file =
+      std::string(GetCGroup()).append(CGROUP_LIMIT_SWAP);
+  limitSwap.open(limit_swap_file.c_str(), limitSwap.out);
+  limitSwap << CGROUP_LIMIT << std::endl;
+  limitSwap.close();
+
+  // Event control file to set
+  std::string memory_control_file =
+      std::string(GetCGroup()).append(CGROUP_EVENT_CONTROL);
+
+  // Tasks file to check
+  std::string tasks_file =
+      std::string(GetCGroup()).append(CGROUP_TASKS);
+
+  int mock_oom_event_as_user = -1;
+  struct stat stat1 = {};
+  if (0 != stat(memory_control_file.c_str(), &stat1)) {
+    // We cannot tamper with cgroups
+    // running as a user, so simulate an
+    // oom event
+    mock_oom_event_as_user = eventfd(0, 0);
+  }
+  const int simulate_cgroups =
+      mock_oom_event_as_user != -1;
+
+  __pid_t mem_hog_pid = fork();
+  if (!mem_hog_pid) {
+    // Child process to consume too much memory
+    if (simulate_cgroups) {
+      std::cout << "Simulating cgroups OOM" << std::endl;
+      for (;;) {
+        sleep(1);
+      }
+    } else {
+      // Wait until we are added to the cgroup
+      // so that it is accounted for our mem
+      // usage
+      __pid_t cgroupPid;
+      do {
+        std::ifstream tasks;
+        tasks.open(tasks_file.c_str(), tasks.in);
+        tasks >> cgroupPid;
+        tasks.close();
+      } while (cgroupPid != getpid());
+
+      // Start consuming as much memory as we can.
+      // cgroup will stop us at CGROUP_LIMIT
+      const int bufferSize = 1024 * 1024;
+      std::cout << "Consuming too much memory" << std::endl;
+      for (;;) {
+        auto buffer = (char *) malloc(bufferSize);
+        if (buffer != nullptr) {
+          for (int i = 0; i < bufferSize; ++i) {
+            buffer[i] = (char) std::rand();
+          }
+        }
+      }
+    }
+  } else {
+    // Parent test
+    ASSERT_GE(mem_hog_pid, 1) << "Fork failed " << errno;
+
+    // Put child into cgroup
+    std::ofstream tasks;
+    tasks.open(tasks_file.c_str(), tasks.out);
+    tasks << mem_hog_pid << std::endl;
+    tasks.close();
+
+    // Create pipe to get forwarded eventfd
+    int test_pipe[2];
+    ASSERT_EQ(0, pipe(test_pipe));
+
+    // Launch OOM listener
+    // There is no race condition with the process
+    // running out of memory. If oom is 1 at startup
+    // oom_listener will send an initial notification
+    __pid_t listener = fork();
+    if (listener == 0) {
+      // child listener forwarding cgroup events
+      _oom_listener_descriptors descriptors = {
+          .command = "test",
+          .event_fd = mock_oom_event_as_user,
+          .event_control_fd = -1,
+          .oom_control_fd = -1,
+          .event_control_path = {0},
+          .oom_control_path = {0},
+          .oom_command = {0},
+          .oom_command_len = 0,
+          .watch_timeout = 100
+      };
+      int ret = oom_listener(&descriptors, GetCGroup(), test_pipe[1]);
+      cleanup(&descriptors);
+      close(test_pipe[0]);
+      close(test_pipe[1]);
+      exit(ret);
+    } else {
+    // Parent test
+      uint64_t event_id = 1;
+      if (simulate_cgroups) {
+        // We cannot tamper with cgroups
+        // running as a user, so simulate an
+        // oom event
+        ASSERT_EQ(sizeof(event_id),
+                  write(mock_oom_event_as_user,
+                        &event_id,
+                        sizeof(event_id)));
+      }
+      ASSERT_EQ(sizeof(event_id),
+                read(test_pipe[0],
+                     &event_id,
+                     sizeof(event_id)))
+                    << "The event has not arrived";
+      close(test_pipe[0]);
+      close(test_pipe[1]);
+
+      // Simulate OOM killer
+      ASSERT_EQ(0, kill(mem_hog_pid, SIGKILL));
+
+      // Verify that process was killed
+      __WAIT_STATUS mem_hog_status = {};
+      __pid_t exited0 = wait(mem_hog_status);
+      ASSERT_EQ(mem_hog_pid, exited0)
+        << "Wrong process exited";
+      ASSERT_EQ(nullptr, mem_hog_status)
+        << "Test process killed with invalid status";
+
+      if (mock_oom_event_as_user != -1) {
+        ASSERT_EQ(0, unlink(oom_control_file.c_str()));
+        ASSERT_EQ(0, unlink(limit_file.c_str()));
+        ASSERT_EQ(0, unlink(limit_swap_file.c_str()));
+        ASSERT_EQ(0, unlink(tasks_file.c_str()));
+        ASSERT_EQ(0, unlink(memory_control_file.c_str()));
+      }
+      // Once the cgroup is empty delete it
+      ASSERT_EQ(0, rmdir(GetCGroup()))
+                << "Could not delete cgroup " << GetCGroup();
+
+      // Check that oom_listener exited on the deletion of the cgroup
+      __WAIT_STATUS oom_listener_status = {};
+      __pid_t exited1 = wait(oom_listener_status);
+      ASSERT_EQ(listener, exited1)
+        << "Wrong process exited";
+      ASSERT_EQ(nullptr, oom_listener_status)
+        << "Listener process exited with invalid status";
+    }
+  }
+}
+
+#else
+/*
+This tool covers Linux specific functionality,
+so it is not available for other operating systems
+*/
+int main() {
+  return 1;
+}
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DummyRunnableWithContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DummyRunnableWithContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DummyRunnableWithContext.java
new file mode 100644
index 0000000..54bcb13
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DummyRunnableWithContext.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
+
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+
+/**
+ * Runnable that does not do anything.
+ */
+public class DummyRunnableWithContext implements Runnable {
+  public DummyRunnableWithContext(Context context, boolean virtual) {
+  }
+  @Override
+  public void run() {
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: YARN-6919. Add default volume mount list. Contributed by Eric Badger

Posted by bo...@apache.org.
YARN-6919. Add default volume mount list. Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1388de18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1388de18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1388de18

Branch: refs/heads/YARN-7402
Commit: 1388de18ad51434569589a8f5b0b05c38fe02ab3
Parents: 774daa8
Author: Shane Kumpf <sk...@apache.org>
Authored: Thu May 24 09:30:39 2018 -0600
Committer: Shane Kumpf <sk...@apache.org>
Committed: Thu May 24 09:30:39 2018 -0600

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |  10 ++
 .../src/main/resources/yarn-default.xml         |  14 ++
 .../runtime/DockerLinuxContainerRuntime.java    |  38 +++++
 .../runtime/TestDockerContainerRuntime.java     | 138 +++++++++++++++++++
 4 files changed, 200 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1388de18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 004a59f..f7f82f8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2002,6 +2002,16 @@ public class YarnConfiguration extends Configuration {
    */
   public static final int DEFAULT_NM_DOCKER_STOP_GRACE_PERIOD = 10;
 
+  /** The default list of read-only mounts to be bind-mounted into all
+   *  Docker containers that use DockerContainerRuntime. */
+  public static final String NM_DOCKER_DEFAULT_RO_MOUNTS =
+      DOCKER_CONTAINER_RUNTIME_PREFIX + "default-ro-mounts";
+
+  /** The default list of read-write mounts to be bind-mounted into all
+   *  Docker containers that use DockerContainerRuntime. */
+  public static final String NM_DOCKER_DEFAULT_RW_MOUNTS =
+      DOCKER_CONTAINER_RUNTIME_PREFIX + "default-rw-mounts";
+
   /** The mode in which the Java Container Sandbox should run detailed by
    *  the JavaSandboxLinuxContainerRuntime. */
   public static final String YARN_CONTAINER_SANDBOX =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1388de18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index c82474c..b0ffc48 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1811,6 +1811,20 @@
   </property>
 
   <property>
+    <description>The default list of read-only mounts to be bind-mounted
+      into all Docker containers that use DockerContainerRuntime.</description>
+    <name>yarn.nodemanager.runtime.linux.docker.default-ro-mounts</name>
+    <value></value>
+  </property>
+
+  <property>
+    <description>The default list of read-write mounts to be bind-mounted
+      into all Docker containers that use DockerContainerRuntime.</description>
+    <name>yarn.nodemanager.runtime.linux.docker.default-rw-mounts</name>
+    <value></value>
+  </property>
+
+  <property>
     <description>The mode in which the Java Container Sandbox should run detailed by
       the JavaSandboxLinuxContainerRuntime.</description>
     <name>yarn.nodemanager.runtime.linux.sandbox-mode</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1388de18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index e131e9d..5e2233b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -229,6 +229,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
   private Set<String> capabilities;
   private boolean delayedRemovalAllowed;
   private int dockerStopGracePeriod;
+  private Set<String> defaultROMounts = new HashSet<>();
+  private Set<String> defaultRWMounts = new HashSet<>();
 
   /**
    * Return whether the given environment variables indicate that the operation
@@ -291,6 +293,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     this.conf = conf;
     dockerClient = new DockerClient(conf);
     allowedNetworks.clear();
+    defaultROMounts.clear();
+    defaultRWMounts.clear();
     allowedNetworks.addAll(Arrays.asList(
         conf.getTrimmedStrings(
             YarnConfiguration.NM_DOCKER_ALLOWED_CONTAINER_NETWORKS,
@@ -336,6 +340,14 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     dockerStopGracePeriod = conf.getInt(
         YarnConfiguration.NM_DOCKER_STOP_GRACE_PERIOD,
         YarnConfiguration.DEFAULT_NM_DOCKER_STOP_GRACE_PERIOD);
+
+    defaultROMounts.addAll(Arrays.asList(
+        conf.getTrimmedStrings(
+        YarnConfiguration.NM_DOCKER_DEFAULT_RO_MOUNTS)));
+
+    defaultRWMounts.addAll(Arrays.asList(
+        conf.getTrimmedStrings(
+        YarnConfiguration.NM_DOCKER_DEFAULT_RW_MOUNTS)));
   }
 
   private Set<String> getDockerCapabilitiesFromConf() throws
@@ -829,6 +841,32 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
       }
     }
 
+    if(defaultROMounts != null && !defaultROMounts.isEmpty()) {
+      for (String mount : defaultROMounts) {
+        String[] dir = StringUtils.split(mount, ':');
+        if (dir.length != 2) {
+          throw new ContainerExecutionException("Invalid mount : " +
+              mount);
+        }
+        String src = dir[0];
+        String dst = dir[1];
+        runCommand.addReadOnlyMountLocation(src, dst);
+      }
+    }
+
+    if(defaultRWMounts != null && !defaultRWMounts.isEmpty()) {
+      for (String mount : defaultRWMounts) {
+        String[] dir = StringUtils.split(mount, ':');
+        if (dir.length != 2) {
+          throw new ContainerExecutionException("Invalid mount : " +
+              mount);
+        }
+        String src = dir[0];
+        String dst = dir[1];
+        runCommand.addReadWriteMountLocation(src, dst);
+      }
+    }
+
     if (allowHostPidNamespace(container)) {
       runCommand.setPidNamespace("host");
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1388de18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index ef21ef0..b6de366 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -82,6 +82,8 @@ import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.ConcurrentMap;
 
+import static org.apache.hadoop.yarn.conf.YarnConfiguration.NM_DOCKER_DEFAULT_RO_MOUNTS;
+import static org.apache.hadoop.yarn.conf.YarnConfiguration.NM_DOCKER_DEFAULT_RW_MOUNTS;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.APPID;
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.APPLICATION_LOCAL_DIRS;
@@ -1332,6 +1334,142 @@ public class TestDockerContainerRuntime {
   }
 
   @Test
+  public void testDefaultROMounts()
+      throws ContainerExecutionException, PrivilegedOperationException,
+      IOException {
+    conf.setStrings(NM_DOCKER_DEFAULT_RO_MOUNTS,
+        "/tmp/foo:/tmp/foo,/tmp/bar:/tmp/bar");
+    DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
+        mockExecutor, mockCGroupsHandler);
+    runtime.initialize(conf, nmContext);
+
+    runtime.launchContainer(builder.build());
+    PrivilegedOperation op = capturePrivilegedOperationAndVerifyArgs();
+    List<String> args = op.getArguments();
+    String dockerCommandFile = args.get(11);
+
+    List<String> dockerCommands = Files.readAllLines(
+        Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
+
+    int expected = 14;
+    int counter = 0;
+    Assert.assertEquals(expected, dockerCommands.size());
+    Assert.assertEquals("[docker-command-execution]",
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  cap-add=SYS_CHROOT,NET_BIND_SERVICE",
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
+    Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
+    Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
+    Assert.assertEquals("  group-add=" + String.join(",", groups),
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  image=busybox:latest",
+        dockerCommands.get(counter++));
+    Assert.assertEquals(
+        "  launch-command=bash,/test_container_work_dir/launch_container.sh",
+        dockerCommands.get(counter++));
+    Assert.assertEquals(
+        "  name=container_e11_1518975676334_14532816_01_000001",
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  net=host", dockerCommands.get(counter++));
+    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
+        + "/test_user_filecache_dir:/test_user_filecache_dir,"
+        + "/tmp/foo:/tmp/foo,/tmp/bar:/tmp/bar",
+        dockerCommands.get(counter++));
+    Assert.assertEquals(
+        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
+        + "/test_application_local_dir:/test_application_local_dir",
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
+    Assert.assertEquals("  workdir=/test_container_work_dir",
+        dockerCommands.get(counter));
+  }
+
+  @Test
+  public void testDefaultROMountsInvalid() throws ContainerExecutionException {
+    conf.setStrings(NM_DOCKER_DEFAULT_RO_MOUNTS,
+        "source,target");
+    DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
+        mockExecutor, mockCGroupsHandler);
+    runtime.initialize(conf, nmContext);
+
+    try {
+      runtime.launchContainer(builder.build());
+      Assert.fail("Expected a launch container failure due to invalid mount.");
+    } catch (ContainerExecutionException e) {
+      LOG.info("Caught expected exception : " + e);
+    }
+  }
+
+  @Test
+  public void testDefaultRWMounts()
+      throws ContainerExecutionException, PrivilegedOperationException,
+      IOException {
+    conf.setStrings(NM_DOCKER_DEFAULT_RW_MOUNTS,
+        "/tmp/foo:/tmp/foo,/tmp/bar:/tmp/bar");
+    DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
+        mockExecutor, mockCGroupsHandler);
+    runtime.initialize(conf, nmContext);
+
+    runtime.launchContainer(builder.build());
+    PrivilegedOperation op = capturePrivilegedOperationAndVerifyArgs();
+    List<String> args = op.getArguments();
+    String dockerCommandFile = args.get(11);
+
+    List<String> dockerCommands = Files.readAllLines(
+        Paths.get(dockerCommandFile), Charset.forName("UTF-8"));
+
+    int expected = 14;
+    int counter = 0;
+    Assert.assertEquals(expected, dockerCommands.size());
+    Assert.assertEquals("[docker-command-execution]",
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  cap-add=SYS_CHROOT,NET_BIND_SERVICE",
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  cap-drop=ALL", dockerCommands.get(counter++));
+    Assert.assertEquals("  detach=true", dockerCommands.get(counter++));
+    Assert.assertEquals("  docker-command=run", dockerCommands.get(counter++));
+    Assert.assertEquals("  group-add=" + String.join(",", groups),
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  image=busybox:latest",
+        dockerCommands.get(counter++));
+    Assert.assertEquals(
+        "  launch-command=bash,/test_container_work_dir/launch_container.sh",
+        dockerCommands.get(counter++));
+    Assert.assertEquals(
+        "  name=container_e11_1518975676334_14532816_01_000001",
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  net=host", dockerCommands.get(counter++));
+    Assert.assertEquals("  ro-mounts=/test_filecache_dir:/test_filecache_dir,"
+        + "/test_user_filecache_dir:/test_user_filecache_dir",
+        dockerCommands.get(counter++));
+    Assert.assertEquals(
+        "  rw-mounts=/test_container_log_dir:/test_container_log_dir,"
+        + "/test_application_local_dir:/test_application_local_dir,"
+        + "/tmp/foo:/tmp/foo,/tmp/bar:/tmp/bar",
+        dockerCommands.get(counter++));
+    Assert.assertEquals("  user=" + uidGidPair, dockerCommands.get(counter++));
+    Assert.assertEquals("  workdir=/test_container_work_dir",
+        dockerCommands.get(counter));
+  }
+
+  @Test
+  public void testDefaultRWMountsInvalid() throws ContainerExecutionException {
+    conf.setStrings(NM_DOCKER_DEFAULT_RW_MOUNTS,
+        "source,target");
+    DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
+        mockExecutor, mockCGroupsHandler);
+    runtime.initialize(conf, nmContext);
+
+    try {
+      runtime.launchContainer(builder.build());
+      Assert.fail("Expected a launch container failure due to invalid mount.");
+    } catch (ContainerExecutionException e) {
+      LOG.info("Caught expected exception : " + e);
+    }
+  }
+
+  @Test
   public void testContainerLivelinessCheck()
       throws ContainerExecutionException, PrivilegedOperationException {
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: HDDS-80. Remove SendContainerCommand from SCM. Contributed by Nanda Kumar.

Posted by bo...@apache.org.
HDDS-80. Remove SendContainerCommand from SCM. Contributed by Nanda Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d19e7d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d19e7d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d19e7d0

Branch: refs/heads/YARN-7402
Commit: 2d19e7d08f031341078a36fee74860c58de02993
Parents: c9b63de
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Thu May 24 11:10:30 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu May 24 11:10:30 2018 -0700

----------------------------------------------------------------------
 .../statemachine/DatanodeStateMachine.java      |   3 -
 .../commandhandler/ContainerReportHandler.java  | 114 -------------------
 .../states/endpoint/HeartbeatEndpointTask.java  |   5 -
 .../protocol/commands/SendContainerCommand.java |  80 -------------
 .../StorageContainerDatanodeProtocol.proto      |  16 ++-
 .../container/replication/InProgressPool.java   |  57 ----------
 .../scm/server/SCMDatanodeProtocolServer.java   |   7 --
 7 files changed, 7 insertions(+), 275 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d19e7d0/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index a16bfdc..a8fe494 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -26,8 +26,6 @@ import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
 import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
     .CommandDispatcher;
 import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
-    .ContainerReportHandler;
-import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
     .DeleteBlocksCommandHandler;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
@@ -88,7 +86,6 @@ public class DatanodeStateMachine implements Closeable {
      // When we add new handlers just adding a new handler here should do the
      // trick.
     commandDispatcher = CommandDispatcher.newBuilder()
-        .addHandler(new ContainerReportHandler())
         .addHandler(new CloseContainerHandler())
         .addHandler(new DeleteBlocksCommandHandler(
             container.getContainerManager(), conf))

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d19e7d0/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ContainerReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ContainerReportHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ContainerReportHandler.java
deleted file mode 100644
index fbea290..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ContainerReportHandler.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
-
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCmdType;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .EndpointStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .SCMConnectionManager;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * Container Report handler.
- */
-public class ContainerReportHandler implements CommandHandler {
-  static final Logger LOG =
-      LoggerFactory.getLogger(ContainerReportHandler.class);
-  private int invocationCount;
-  private long totalTime;
-
-  /**
-   * Constructs a ContainerReport handler.
-   */
-  public ContainerReportHandler() {
-  }
-
-  /**
-   * Handles a given SCM command.
-   *
-   * @param command - SCM Command
-   * @param container - Ozone Container.
-   * @param context - Current Context.
-   * @param connectionManager - The SCMs that we are talking to.
-   */
-  @Override
-  public void handle(SCMCommand command, OzoneContainer container,
-      StateContext context, SCMConnectionManager connectionManager) {
-    LOG.debug("Processing Container Report.");
-    invocationCount++;
-    long startTime = Time.monotonicNow();
-    try {
-      ContainerReportsRequestProto containerReport =
-          container.getContainerReport();
-
-      // TODO : We send this report to all SCMs.Check if it is enough only to
-      // send to the leader once we have RAFT enabled SCMs.
-      for (EndpointStateMachine endPoint : connectionManager.getValues()) {
-        endPoint.getEndPoint().sendContainerReport(containerReport);
-      }
-    } catch (IOException ex) {
-      LOG.error("Unable to process the Container Report command.", ex);
-    } finally {
-      long endTime = Time.monotonicNow();
-      totalTime += endTime - startTime;
-    }
-  }
-
-  /**
-   * Returns the command type that this command handler handles.
-   *
-   * @return Type
-   */
-  @Override
-  public SCMCmdType getCommandType() {
-    return SCMCmdType.sendContainerReport;
-  }
-
-  /**
-   * Returns number of times this handler has been invoked.
-   *
-   * @return int
-   */
-  @Override
-  public int getInvocationCount() {
-    return invocationCount;
-  }
-
-  /**
-   * Returns the average time this function takes to run.
-   *
-   * @return long
-   */
-  @Override
-  public long getAverageRunTime() {
-    if (invocationCount > 0) {
-      return totalTime / invocationCount;
-    }
-    return 0;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d19e7d0/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index 2f1db39..01b4c72 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.ozone.container.common.statemachine
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
 import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
-import org.apache.hadoop.ozone.protocol.commands.SendContainerCommand;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -133,10 +132,6 @@ public class HeartbeatEndpointTask
           .equalsIgnoreCase(datanodeDetails.getUuid()),
           "Unexpected datanode ID in the response.");
       switch (commandResponseProto.getCmdType()) {
-      case sendContainerReport:
-        this.context.addCommand(SendContainerCommand.getFromProtobuf(
-            commandResponseProto.getSendReport()));
-        break;
       case reregisterCommand:
         if (rpcEndpoint.getState() == EndPointStates.HEARTBEAT) {
           if (LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d19e7d0/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SendContainerCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SendContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SendContainerCommand.java
deleted file mode 100644
index 8431752..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SendContainerCommand.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.protocol.commands;
-
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCmdType;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SendContainerReportProto;
-
-/**
- * Allows a Datanode to send in the container report.
- */
-public class SendContainerCommand extends SCMCommand<SendContainerReportProto> {
-  /**
-   * Returns a NullCommand class from NullCommandResponse Proto.
-   * @param unused  - unused
-   * @return NullCommand
-   */
-  public static SendContainerCommand getFromProtobuf(
-      final SendContainerReportProto unused) {
-    return new SendContainerCommand();
-  }
-
-  /**
-   * returns a new builder.
-   * @return Builder
-   */
-  public static SendContainerCommand.Builder newBuilder() {
-    return new SendContainerCommand.Builder();
-  }
-
-  /**
-   * Returns the type of this command.
-   *
-   * @return Type
-   */
-  @Override
-  public SCMCmdType getType() {
-    return SCMCmdType.sendContainerReport;
-  }
-
-  /**
-   * Gets the protobuf message of this object.
-   *
-   * @return A protobuf message.
-   */
-  @Override
-  public byte[] getProtoBufMessage() {
-    return SendContainerReportProto.newBuilder().build().toByteArray();
-  }
-
-  /**
-   * A Builder class this is the standard pattern we are using for all commands.
-   */
-  public static class Builder {
-    /**
-     * Return a null command.
-     * @return - NullCommand.
-     */
-    public SendContainerCommand build() {
-      return new SendContainerCommand();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d19e7d0/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
index 91070b3..20e6af8 100644
--- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
@@ -186,10 +186,9 @@ Type of commands supported by SCM to datanode protocol.
 enum SCMCmdType {
   versionCommand = 2;
   registeredCommand = 3;
-  sendContainerReport = 4;
-  reregisterCommand = 5;
-  deleteBlocksCommand = 6;
-  closeContainerCommand = 7;
+  reregisterCommand = 4;
+  deleteBlocksCommand = 5;
+  closeContainerCommand = 6;
 }
 
 /*
@@ -199,11 +198,10 @@ message SCMCommandResponseProto {
   required SCMCmdType cmdType = 2; // Type of the command
   optional SCMRegisteredCmdResponseProto registeredProto = 3;
   optional SCMVersionResponseProto versionProto = 4;
-  optional SendContainerReportProto sendReport = 5;
-  optional SCMReregisterCmdResponseProto reregisterProto = 6;
-  optional SCMDeleteBlocksCmdResponseProto deleteBlocksProto = 7;
-  required string datanodeUUID = 8;
-  optional SCMCloseContainerCmdResponseProto closeContainerProto = 9;
+  optional SCMReregisterCmdResponseProto reregisterProto = 5;
+  optional SCMDeleteBlocksCmdResponseProto deleteBlocksProto = 6;
+  required string datanodeUUID = 7;
+  optional SCMCloseContainerCmdResponseProto closeContainerProto = 8;
 }
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d19e7d0/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
index af878bf..c444e90 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
@@ -21,12 +21,10 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.NodePoolManager;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerInfo;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
-import org.apache.hadoop.ozone.protocol.commands.SendContainerCommand;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -36,19 +34,10 @@ import java.util.Map;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.Predicate;
 import java.util.stream.Collectors;
 
-import static com.google.common.util.concurrent.Uninterruptibles
-    .sleepUninterruptibly;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
-    .HEALTHY;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
-    .INVALID;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
-
 /**
  * These are pools that are actively checking for replication status of the
  * containers.
@@ -177,57 +166,11 @@ public final class InProgressPool {
     nodeProcessed = new AtomicInteger(0);
     containerProcessedCount = new AtomicInteger(0);
     nodeCount = new AtomicInteger(0);
-    /*
-       Ask each datanode to send us commands.
-     */
-    SendContainerCommand cmd = SendContainerCommand.newBuilder().build();
-    for (DatanodeDetails dd : datanodeDetailsList) {
-      NodeState currentState = getNodestate(dd);
-      if (currentState == HEALTHY || currentState == STALE) {
-        nodeCount.incrementAndGet();
-        // Queue commands to all datanodes in this pool to send us container
-        // report. Since we ignore dead nodes, it is possible that we would have
-        // over replicated the container if the node comes back.
-        nodeManager.addDatanodeCommand(dd.getUuid(), cmd);
-      }
-    }
     this.status = ProgressStatus.InProgress;
     this.getPool().setLastProcessedTime(Time.monotonicNow());
   }
 
   /**
-   * Gets the node state.
-   *
-   * @param datanode - datanode information.
-   * @return NodeState.
-   */
-  private NodeState getNodestate(DatanodeDetails datanode) {
-    NodeState  currentState = INVALID;
-    int maxTry = 100;
-    // We need to loop to make sure that we will retry if we get
-    // node state unknown. This can lead to infinite loop if we send
-    // in unknown node ID. So max try count is used to prevent it.
-
-    int currentTry = 0;
-    while (currentState == INVALID && currentTry < maxTry) {
-      // Retry to make sure that we deal with the case of node state not
-      // known.
-      currentState = nodeManager.getNodeState(datanode);
-      currentTry++;
-      if (currentState == INVALID) {
-        // Sleep to make sure that this is not a tight loop.
-        sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
-      }
-    }
-    if (currentState == INVALID) {
-      LOG.error("Not able to determine the state of Node: {}, Exceeded max " +
-          "try and node manager returns INVALID state. This indicates we " +
-          "are dealing with a node that we don't know about.", datanode);
-    }
-    return currentState;
-  }
-
-  /**
    * Queues a container Report for handling. This is done in a worker thread
    * since decoding a container report might be compute intensive . We don't
    * want to block since we have asked for bunch of container reports

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d19e7d0/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index 58b8c82..6e5b7de 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolPro
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SendContainerReportProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMReregisterCmdResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType;
@@ -46,7 +45,6 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolPro
 
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.versionCommand;
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.registeredCommand;
-import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.sendContainerReport;
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.reregisterCommand;
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.deleteBlocksCommand;
 import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType.closeContainerCommand;
@@ -318,11 +316,6 @@ public class SCMDatanodeProtocolServer implements
           .setCmdType(versionCommand)
           .setVersionProto(SCMVersionResponseProto.getDefaultInstance())
           .build();
-    case sendContainerReport:
-      return builder
-          .setCmdType(sendContainerReport)
-          .setSendReport(SendContainerReportProto.getDefaultInstance())
-          .build();
     case reregisterCommand:
       return builder
           .setCmdType(reregisterCommand)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: YARN-3660. [GPG] Federation Global Policy Generator (service hook only). (Contributed by Botong Huang via curino)

Posted by bo...@apache.org.
YARN-3660. [GPG] Federation Global Policy Generator (service hook only). (Contributed by Botong Huang via curino)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bca8e9bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bca8e9bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bca8e9bf

Branch: refs/heads/YARN-7402
Commit: bca8e9bf9d6c0d99e15d45dfb714ca5677ac4e0a
Parents: 9502b47
Author: Carlo Curino <cu...@apache.org>
Authored: Thu Jan 18 17:21:06 2018 -0800
Committer: Botong Huang <bo...@apache.org>
Committed: Tue May 29 10:48:40 2018 -0700

----------------------------------------------------------------------
 hadoop-project/pom.xml                          |   6 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn        |   5 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd    |  55 +++++---
 .../hadoop-yarn/conf/yarn-env.sh                |  12 ++
 .../pom.xml                                     |  98 +++++++++++++
 .../globalpolicygenerator/GPGContext.java       |  31 +++++
 .../globalpolicygenerator/GPGContextImpl.java   |  41 ++++++
 .../GlobalPolicyGenerator.java                  | 136 +++++++++++++++++++
 .../globalpolicygenerator/package-info.java     |  19 +++
 .../TestGlobalPolicyGenerator.java              |  38 ++++++
 .../hadoop-yarn/hadoop-yarn-server/pom.xml      |   1 +
 hadoop-yarn-project/pom.xml                     |   4 +
 12 files changed, 424 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bca8e9bf/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 59a9bd2..2db538e 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -446,6 +446,12 @@
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-yarn-server-globalpolicygenerator</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-services-core</artifactId>
         <version>${hadoop.version}</version>
       </dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bca8e9bf/hadoop-yarn-project/hadoop-yarn/bin/yarn
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index 69afe6f..8061859 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -39,6 +39,7 @@ function hadoop_usage
   hadoop_add_subcommand "container" client "prints container(s) report"
   hadoop_add_subcommand "daemonlog" admin "get/set the log level for each daemon"
   hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
+  hadoop_add_subcommand "globalpolicygenerator" daemon "run the Global Policy Generator"
   hadoop_add_subcommand "jar <jar>" client "run a jar file"
   hadoop_add_subcommand "logs" client "dump container logs"
   hadoop_add_subcommand "node" admin "prints node report(s)"
@@ -103,6 +104,10 @@ ${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"
       echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
       exit 0
     ;;
+    globalpolicygenerator)
+      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+      HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.globalpolicygenerator.GlobalPolicyGenerator'
+    ;;
     jar)
       HADOOP_CLASSNAME=org.apache.hadoop.util.RunJar
     ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bca8e9bf/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
index e1ac112..bebfd71 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
@@ -134,6 +134,10 @@ if "%1" == "--loglevel" (
     set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-router\target\classes
   )
 
+  if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-globalpolicygenerator\target\classes (
+    set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-globalpolicygenerator\target\classes
+  )
+
   if exist %HADOOP_YARN_HOME%\build\test\classes (
     set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\build\test\classes
   )
@@ -155,7 +159,7 @@ if "%1" == "--loglevel" (
 
   set yarncommands=resourcemanager nodemanager proxyserver rmadmin version jar ^
      application applicationattempt container node queue logs daemonlog historyserver ^
-     timelineserver timelinereader router classpath
+     timelineserver timelinereader router globalpolicygenerator classpath
   for %%i in ( %yarncommands% ) do (
     if %yarn-command% == %%i set yarncommand=true
   )
@@ -259,7 +263,13 @@ goto :eof
 :router
   set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\router-config\log4j.properties
   set CLASS=org.apache.hadoop.yarn.server.router.Router
-  set YARN_OPTS=%YARN_OPTS% %HADOOP_ROUTER_OPTS%
+  set YARN_OPTS=%YARN_OPTS% %YARN_ROUTER_OPTS%
+  goto :eof
+
+:globalpolicygenerator
+  set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\globalpolicygenerator-config\log4j.properties
+  set CLASS=org.apache.hadoop.yarn.server.globalpolicygenerator.GlobalPolicyGenerator
+  set YARN_OPTS=%YARN_OPTS% %YARN_GLOBALPOLICYGENERATOR_OPTS%
   goto :eof
 
 :nodemanager
@@ -336,27 +346,28 @@ goto :eof
 :print_usage
   @echo Usage: yarn [--config confdir] [--loglevel loglevel] COMMAND
   @echo        where COMMAND is one of:
-  @echo   resourcemanager      run the ResourceManager
-  @echo   nodemanager          run a nodemanager on each slave
-  @echo   router               run the Router daemon
-  @echo   timelineserver       run the timeline server
-  @echo   timelinereader       run the timeline reader server
-  @echo   rmadmin              admin tools
-  @echo   version              print the version
-  @echo   jar ^<jar^>          run a jar file
-  @echo   application          prints application(s) report/kill application
-  @echo   applicationattempt   prints applicationattempt(s) report
-  @echo   cluster              prints cluster information
-  @echo   container            prints container(s) report
-  @echo   node                 prints node report(s)
-  @echo   queue                prints queue information
-  @echo   logs                 dump container logs
-  @echo   schedulerconf        updates scheduler configuration
-  @echo   classpath            prints the class path needed to get the
-  @echo                        Hadoop jar and the required libraries
-  @echo   daemonlog            get/set the log level for each daemon
+  @echo   resourcemanager        run the ResourceManager
+  @echo   nodemanager            run a nodemanager on each slave
+  @echo   router                 run the Router daemon
+  @echo   globalpolicygenerator  run the Global Policy Generator
+  @echo   timelineserver         run the timeline server
+  @echo   timelinereader         run the timeline reader server
+  @echo   rmadmin                admin tools
+  @echo   version                print the version
+  @echo   jar ^<jar^>            run a jar file
+  @echo   application            prints application(s) report/kill application
+  @echo   applicationattempt     prints applicationattempt(s) report
+  @echo   cluster                prints cluster information
+  @echo   container              prints container(s) report
+  @echo   node                   prints node report(s)
+  @echo   queue                  prints queue information
+  @echo   logs                   dump container logs
+  @echo   schedulerconf          updates scheduler configuration
+  @echo   classpath              prints the class path needed to get the
+  @echo                          Hadoop jar and the required libraries
+  @echo   daemonlog              get/set the log level for each daemon
   @echo   or
-  @echo   CLASSNAME            run the class named CLASSNAME
+  @echo   CLASSNAME              run the class named CLASSNAME
   @echo Most commands print help when invoked w/o parameters.
 
 endlocal

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bca8e9bf/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
index d865023..4e2030a 100644
--- a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
+++ b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
@@ -150,6 +150,18 @@
 #export YARN_ROUTER_OPTS=
 
 ###
+# Global Policy Generator specific parameters
+###
+
+# Specify the JVM options to be used when starting the GPG.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# See ResourceManager for some examples
+#
+#export YARN_GLOBALPOLICYGENERATOR_OPTS=
+
+###
 # Registry DNS specific parameters
 ###
 # For privileged registry DNS, user to run as after dropping privileges

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bca8e9bf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
new file mode 100644
index 0000000..9bbb936
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
@@ -0,0 +1,98 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <parent>
+    <artifactId>hadoop-yarn-server</artifactId>
+    <groupId>org.apache.hadoop</groupId>
+    <version>3.1.0-SNAPSHOT</version>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>org.apache.hadoop</groupId>
+  <artifactId>hadoop-yarn-server-globalpolicygenerator</artifactId>
+  <version>3.1.0-SNAPSHOT</version>
+  <name>hadoop-yarn-server-globalpolicygenerator</name>
+
+  <properties>
+    <!-- Needed for generating FindBugs warnings using parent pom -->
+    <yarn.basedir>${project.parent.parent.basedir}</yarn.basedir>
+  </properties>
+
+  <dependencies>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-common</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-api</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-common</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-common</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.hsqldb</groupId>
+      <artifactId>hsqldb</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+      </plugin>
+    </plugins>
+  </build>
+</project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bca8e9bf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContext.java
new file mode 100644
index 0000000..da8a383
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContext.java
@@ -0,0 +1,31 @@
+/**
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator;
+
+import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+
+/**
+ * Context for Global Policy Generator.
+ */
+public interface GPGContext {
+
+  FederationStateStoreFacade getStateStoreFacade();
+
+  void setStateStoreFacade(FederationStateStoreFacade facade);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bca8e9bf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContextImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContextImpl.java
new file mode 100644
index 0000000..3884ace
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContextImpl.java
@@ -0,0 +1,41 @@
+/**
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator;
+
+import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+
+/**
+ * Context implementation for Global Policy Generator.
+ */
+public class GPGContextImpl implements GPGContext {
+
+  private FederationStateStoreFacade facade;
+
+  @Override
+  public FederationStateStoreFacade getStateStoreFacade() {
+    return facade;
+  }
+
+  @Override
+  public void setStateStoreFacade(
+      FederationStateStoreFacade federationStateStoreFacade) {
+    this.facade = federationStateStoreFacade;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bca8e9bf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
new file mode 100644
index 0000000..c1f7460
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.service.CompositeService;
+import org.apache.hadoop.util.ShutdownHookManager;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Global Policy Generator (GPG) is a Yarn Federation component. By tuning the
+ * Federation policies in Federation State Store, GPG overlooks the entire
+ * federated cluster and ensures that the system is tuned and balanced all the
+ * time.
+ *
+ * The GPG operates continuously but out-of-band from all cluster operations,
+ * that allows to enforce global invariants, affect load balancing, trigger
+ * draining of sub-clusters that will undergo maintenance, etc.
+ */
+public class GlobalPolicyGenerator extends CompositeService {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(GlobalPolicyGenerator.class);
+
+  // YARN Variables
+  private static CompositeServiceShutdownHook gpgShutdownHook;
+  public static final int SHUTDOWN_HOOK_PRIORITY = 30;
+  private AtomicBoolean isStopping = new AtomicBoolean(false);
+  private static final String METRICS_NAME = "Global Policy Generator";
+
+  // Federation Variables
+  private GPGContext gpgContext;
+
+  public GlobalPolicyGenerator() {
+    super(GlobalPolicyGenerator.class.getName());
+    this.gpgContext = new GPGContextImpl();
+  }
+
+  protected void initAndStart(Configuration conf, boolean hasToReboot) {
+    try {
+      // Remove the old hook if we are rebooting.
+      if (hasToReboot && null != gpgShutdownHook) {
+        ShutdownHookManager.get().removeShutdownHook(gpgShutdownHook);
+      }
+
+      gpgShutdownHook = new CompositeServiceShutdownHook(this);
+      ShutdownHookManager.get().addShutdownHook(gpgShutdownHook,
+          SHUTDOWN_HOOK_PRIORITY);
+
+      this.init(conf);
+      this.start();
+    } catch (Throwable t) {
+      LOG.error("Error starting globalpolicygenerator", t);
+      System.exit(-1);
+    }
+  }
+
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    // Set up the context
+    this.gpgContext
+        .setStateStoreFacade(FederationStateStoreFacade.getInstance());
+
+    DefaultMetricsSystem.initialize(METRICS_NAME);
+
+    // super.serviceInit after all services are added
+    super.serviceInit(conf);
+  }
+
+  @Override
+  protected void serviceStart() throws Exception {
+    super.serviceStart();
+  }
+
+  @Override
+  protected void serviceStop() throws Exception {
+    if (this.isStopping.getAndSet(true)) {
+      return;
+    }
+    DefaultMetricsSystem.shutdown();
+    super.serviceStop();
+  }
+
+  public String getName() {
+    return "FederationGlobalPolicyGenerator";
+  }
+
+  public GPGContext getGPGContext() {
+    return this.gpgContext;
+  }
+
+  @SuppressWarnings("resource")
+  public static void startGPG(String[] argv, Configuration conf) {
+    boolean federationEnabled =
+        conf.getBoolean(YarnConfiguration.FEDERATION_ENABLED,
+            YarnConfiguration.DEFAULT_FEDERATION_ENABLED);
+    if (federationEnabled) {
+      Thread.setDefaultUncaughtExceptionHandler(
+          new YarnUncaughtExceptionHandler());
+      StringUtils.startupShutdownMessage(GlobalPolicyGenerator.class, argv,
+          LOG);
+      GlobalPolicyGenerator globalPolicyGenerator = new GlobalPolicyGenerator();
+      globalPolicyGenerator.initAndStart(conf, false);
+    } else {
+      LOG.warn("Federation is not enabled. The gpg cannot start.");
+    }
+  }
+
+  public static void main(String[] argv) {
+    startGPG(argv, new YarnConfiguration());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bca8e9bf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/package-info.java
new file mode 100644
index 0000000..abaa57c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/package-info.java
@@ -0,0 +1,19 @@
+/**
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bca8e9bf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/TestGlobalPolicyGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/TestGlobalPolicyGenerator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/TestGlobalPolicyGenerator.java
new file mode 100644
index 0000000..f657b86
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/TestGlobalPolicyGenerator.java
@@ -0,0 +1,38 @@
+/**
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.Test;
+
+/**
+ * Unit test for GlobalPolicyGenerator.
+ */
+public class TestGlobalPolicyGenerator {
+
+  @Test(timeout = 1000)
+  public void testNonFederation() {
+    Configuration conf = new YarnConfiguration();
+    conf.setBoolean(YarnConfiguration.FEDERATION_ENABLED, false);
+
+    // If GPG starts running, this call will not return
+    GlobalPolicyGenerator.startGPG(new String[0], conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bca8e9bf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
index de4484c..226407b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
@@ -46,5 +46,6 @@
     <module>hadoop-yarn-server-timelineservice-hbase</module>
     <module>hadoop-yarn-server-timelineservice-hbase-tests</module>
     <module>hadoop-yarn-server-router</module>
+    <module>hadoop-yarn-server-globalpolicygenerator</module>
   </modules>
 </project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bca8e9bf/hadoop-yarn-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml
index 4593441..311b26e 100644
--- a/hadoop-yarn-project/pom.xml
+++ b/hadoop-yarn-project/pom.xml
@@ -80,6 +80,10 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-globalpolicygenerator</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-yarn-services-core</artifactId>
     </dependency>
   </dependencies>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar.

Posted by bo...@apache.org.
HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51ce02bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51ce02bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51ce02bb

Branch: refs/heads/YARN-7402
Commit: 51ce02bb54d6047a8191624a86d427b0c9445cb1
Parents: aa23d49
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed May 23 10:30:12 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Wed May 23 10:30:12 2018 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/net/NetworkTopology.java       | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51ce02bb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 256f07b..1f077a7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -105,7 +105,7 @@ public class NetworkTopology {
   private boolean clusterEverBeenMultiRack = false;
 
   /** the lock used to manage access */
-  protected ReadWriteLock netlock = new ReentrantReadWriteLock();
+  protected ReadWriteLock netlock = new ReentrantReadWriteLock(true);
 
   // keeping the constructor because other components like MR still uses this.
   public NetworkTopology() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: HADOOP-15455. Incorrect debug message in KMSACL#hasAccess. Contributed by Yuen-Kuei Hsueh.

Posted by bo...@apache.org.
HADOOP-15455. Incorrect debug message in KMSACL#hasAccess. Contributed by Yuen-Kuei Hsueh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/438ef495
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/438ef495
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/438ef495

Branch: refs/heads/YARN-7402
Commit: 438ef4951a38171f193eaf2631da31d0f4bc3c62
Parents: 8fdc993
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Mon May 28 17:32:32 2018 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Mon May 28 17:32:32 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java    | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/438ef495/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
index b02f34e..17faec2 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
@@ -247,9 +247,9 @@ public class KMSACLs implements Runnable, KeyACLs {
         if (blacklist == null) {
           LOG.debug("No blacklist for {}", type.toString());
         } else if (access) {
-          LOG.debug("user is in {}" , blacklist.getAclString());
-        } else {
           LOG.debug("user is not in {}" , blacklist.getAclString());
+        } else {
+          LOG.debug("user is in {}" , blacklist.getAclString());
         }
       }
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: HDFS-13628. Update Archival Storage doc for Provided Storage

Posted by bo...@apache.org.
HDFS-13628. Update Archival Storage doc for Provided Storage

Signed-off-by: Akira Ajisaka <aa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04757e58
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04757e58
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04757e58

Branch: refs/heads/YARN-7402
Commit: 04757e5864bd4904fd5a59d143fff480814700e4
Parents: 88cbe57
Author: Takanobu Asanuma <ta...@yahoo-corp.jp>
Authored: Mon May 28 19:04:36 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon May 28 19:06:34 2018 +0900

----------------------------------------------------------------------
 .../hadoop-hdfs/src/site/markdown/ArchivalStorage.md             | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04757e58/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index ab7975a..3c49cb1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -35,7 +35,7 @@ A new storage type *ARCHIVE*, which has high storage density (petabyte of storag
 
 Another new storage type *RAM\_DISK* is added for supporting writing single replica files in memory.
 
-### Storage Policies: Hot, Warm, Cold, All\_SSD, One\_SSD and Lazy\_Persist
+### Storage Policies: Hot, Warm, Cold, All\_SSD, One\_SSD, Lazy\_Persist and Provided
 
 A new concept of storage policies is introduced in order to allow files to be stored in different storage types according to the storage policy.
 
@@ -47,6 +47,7 @@ We have the following storage policies:
 * **All\_SSD** - for storing all replicas in SSD.
 * **One\_SSD** - for storing one of the replicas in SSD. The remaining replicas are stored in DISK.
 * **Lazy\_Persist** - for writing blocks with single replica in memory. The replica is first written in RAM\_DISK and then it is lazily persisted in DISK.
+* **Provided** - for storing data outside HDFS. See also [HDFS Provided Storage](./HdfsProvidedStorage.html).
 
 More formally, a storage policy consists of the following fields:
 
@@ -68,6 +69,7 @@ The following is a typical storage policy table.
 | 7 | Hot (default) | DISK: *n* | \<none\> | ARCHIVE |
 | 5 | Warm | DISK: 1, ARCHIVE: *n*-1 | ARCHIVE, DISK | ARCHIVE, DISK |
 | 2 | Cold | ARCHIVE: *n* | \<none\> | \<none\> |
+| 1 | Provided | PROVIDED: 1, DISK: *n*-1 | PROVIDED, DISK | PROVIDED, DISK |
 
 Note 1: The Lazy\_Persist policy is useful only for single replica blocks. For blocks with more than one replicas, all the replicas will be written to DISK since writing only one of the replicas to RAM\_DISK does not improve the overall performance.
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: YARN-7707. [GPG] Policy generator framework. Contributed by Young Chen

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json
new file mode 100644
index 0000000..2ff879e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json
@@ -0,0 +1,196 @@
+ {
+      "type": "capacityScheduler",
+      "capacity": 100.0,
+      "usedCapacity": 0.0,
+      "maxCapacity": 100.0,
+      "queueName": "root",
+      "queues": {
+        "queue": [
+          {
+            "type": "capacitySchedulerLeafQueueInfo",
+            "capacity": 100.0,
+            "usedCapacity": 0.0,
+            "maxCapacity": 100.0,
+            "absoluteCapacity": 100.0,
+            "absoluteMaxCapacity": 100.0,
+            "absoluteUsedCapacity": 0.0,
+            "numApplications": 484,
+            "queueName": "default",
+            "state": "RUNNING",
+            "resourcesUsed": {
+              "memory": 0,
+              "vCores": 0
+            },
+            "hideReservationQueues": false,
+            "nodeLabels": [
+              "*"
+            ],
+            "numActiveApplications": 484,
+            "numPendingApplications": 0,
+            "numContainers": 0,
+            "maxApplications": 10000,
+            "maxApplicationsPerUser": 10000,
+            "userLimit": 100,
+            "users": {
+              "user": [
+                {
+                  "username": "Default",
+                  "resourcesUsed": {
+                    "memory": 0,
+                    "vCores": 0
+                  },
+                  "numPendingApplications": 0,
+                  "numActiveApplications": 468,
+                  "AMResourceUsed": {
+                    "memory": 30191616,
+                    "vCores": 468
+                  },
+                  "userResourceLimit": {
+                    "memory": 31490048,
+                    "vCores": 7612
+                  }
+                }
+              ]
+            },
+            "userLimitFactor": 1.0,
+            "AMResourceLimit": {
+              "memory": 31490048,
+              "vCores": 7612
+            },
+            "usedAMResource": {
+              "memory": 30388224,
+              "vCores": 532
+            },
+            "userAMResourceLimit": {
+              "memory": 31490048,
+              "vCores": 7612
+            },
+            "preemptionDisabled": true
+          },
+          {
+            "type": "capacitySchedulerLeafQueueInfo",
+            "capacity": 100.0,
+            "usedCapacity": 0.0,
+            "maxCapacity": 100.0,
+            "absoluteCapacity": 100.0,
+            "absoluteMaxCapacity": 100.0,
+            "absoluteUsedCapacity": 0.0,
+            "numApplications": 484,
+            "queueName": "default2",
+            "state": "RUNNING",
+            "resourcesUsed": {
+              "memory": 0,
+              "vCores": 0
+            },
+            "hideReservationQueues": false,
+            "nodeLabels": [
+              "*"
+            ],
+            "numActiveApplications": 484,
+            "numPendingApplications": 0,
+            "numContainers": 0,
+            "maxApplications": 10000,
+            "maxApplicationsPerUser": 10000,
+            "userLimit": 100,
+            "users": {
+              "user": [
+                {
+                  "username": "Default",
+                  "resourcesUsed": {
+                    "memory": 0,
+                    "vCores": 0
+                  },
+                  "numPendingApplications": 0,
+                  "numActiveApplications": 468,
+                  "AMResourceUsed": {
+                    "memory": 30191616,
+                    "vCores": 468
+                  },
+                  "userResourceLimit": {
+                    "memory": 31490048,
+                    "vCores": 7612
+                  }
+                }
+              ]
+            },
+            "userLimitFactor": 1.0,
+            "AMResourceLimit": {
+              "memory": 31490048,
+              "vCores": 7612
+            },
+            "usedAMResource": {
+              "memory": 30388224,
+              "vCores": 532
+            },
+            "userAMResourceLimit": {
+              "memory": 31490048,
+              "vCores": 7612
+            },
+            "preemptionDisabled": true
+          }
+        ]
+      },
+      "health": {
+        "lastrun": 1517951638085,
+        "operationsInfo": {
+          "entry": {
+            "key": "last-allocation",
+            "value": {
+              "nodeId": "node0:0",
+              "containerId": "container_e61477_1517922128312_0340_01_000001",
+              "queue": "root.default"
+            }
+          },
+          "entry": {
+            "key": "last-reservation",
+            "value": {
+              "nodeId": "node0:1",
+              "containerId": "container_e61477_1517879828320_0249_01_000001",
+              "queue": "root.default"
+            }
+          },
+          "entry": {
+            "key": "last-release",
+            "value": {
+              "nodeId": "node0:2",
+              "containerId": "container_e61477_1517922128312_0340_01_000001",
+              "queue": "root.default"
+            }
+          },
+          "entry": {
+            "key": "last-preemption",
+            "value": {
+              "nodeId": "N/A",
+              "containerId": "N/A",
+              "queue": "N/A"
+            }
+          }
+        },
+        "lastRunDetails": [
+          {
+            "operation": "releases",
+            "count": 0,
+            "resources": {
+              "memory": 0,
+              "vCores": 0
+            }
+          },
+          {
+            "operation": "allocations",
+            "count": 0,
+            "resources": {
+              "memory": 0,
+              "vCores": 0
+            }
+          },
+          {
+            "operation": "reservations",
+            "count": 0,
+            "resources": {
+              "memory": 0,
+              "vCores": 0
+            }
+          }
+        ]
+      }
+    }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: YARN-8346. Upgrading to 3.1 kills running containers with error 'Opportunistic container queue is full'. Contributed by Jason Lowe.

Posted by bo...@apache.org.
YARN-8346. Upgrading to 3.1 kills running containers with error 'Opportunistic container queue is full'. Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4cc0c9b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4cc0c9b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4cc0c9b0

Branch: refs/heads/YARN-7402
Commit: 4cc0c9b0baa93f5a1c0623eee353874e858a7caa
Parents: 7a87add
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Thu May 24 12:23:47 2018 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Thu May 24 12:23:47 2018 +0530

----------------------------------------------------------------------
 .../yarn/security/ContainerTokenIdentifier.java |  4 ++--
 .../yarn/security/TestYARNTokenIdentifier.java  | 25 ++++++++++++++++++++
 2 files changed, 27 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cc0c9b0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
index 37c74b8..8dea65f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
@@ -292,7 +292,7 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
    */
   public ContainerType getContainerType(){
     if (!proto.hasContainerType()) {
-      return null;
+      return ContainerType.TASK;
     }
     return convertFromProtoFormat(proto.getContainerType());
   }
@@ -303,7 +303,7 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
    */
   public ExecutionType getExecutionType(){
     if (!proto.hasExecutionType()) {
-      return null;
+      return ExecutionType.GUARANTEED;
     }
     return convertFromProtoFormat(proto.getExecutionType());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cc0c9b0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
index 51fbe9a..8109b5e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.impl.pb.LogAggregationContextPBImpl;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
+import org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.ContainerTokenIdentifierProto;
 import org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.YARNDelegationTokenIdentifierProto;
 import org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier;
 import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
@@ -170,6 +171,30 @@ public class TestYARNTokenIdentifier {
   }
 
   @Test
+  public void testContainerTokenIdentifierProtoMissingFields()
+      throws IOException {
+    ContainerTokenIdentifierProto.Builder builder =
+        ContainerTokenIdentifierProto.newBuilder();
+    ContainerTokenIdentifierProto proto = builder.build();
+    Assert.assertFalse(proto.hasContainerType());
+    Assert.assertFalse(proto.hasExecutionType());
+    Assert.assertFalse(proto.hasNodeLabelExpression());
+
+    byte[] tokenData = proto.toByteArray();
+    DataInputBuffer dib = new DataInputBuffer();
+    dib.reset(tokenData, tokenData.length);
+    ContainerTokenIdentifier tid = new ContainerTokenIdentifier();
+    tid.readFields(dib);
+
+    Assert.assertEquals("container type",
+        ContainerType.TASK, tid.getContainerType());
+    Assert.assertEquals("execution type",
+        ExecutionType.GUARANTEED, tid.getExecutionType());
+    Assert.assertEquals("node label expression",
+        CommonNodeLabelsManager.NO_LABEL, tid.getNodeLabelExpression());
+  }
+
+  @Test
   public void testContainerTokenIdentifier() throws IOException {
     testContainerTokenIdentifier(false, false);
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: YARN-8213. Add Capacity Scheduler performance metrics. (Weiwei Yang via wangda)

Posted by bo...@apache.org.
YARN-8213. Add Capacity Scheduler performance metrics. (Weiwei Yang via wangda)

Change-Id: Ieea6f3eeb83c90cd74233fea896f0fcd0f325d5f


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f24c842d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f24c842d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f24c842d

Branch: refs/heads/YARN-7402
Commit: f24c842d52e166e8566337ef93c96438f1c870d8
Parents: 8605a38
Author: Wangda Tan <wa...@apache.org>
Authored: Fri May 25 21:53:20 2018 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Fri May 25 21:53:20 2018 -0700

----------------------------------------------------------------------
 .../server/resourcemanager/ResourceManager.java |   1 +
 .../scheduler/AbstractYarnScheduler.java        |   5 +
 .../scheduler/ResourceScheduler.java            |   5 +
 .../scheduler/capacity/CapacityScheduler.java   |  31 ++++-
 .../capacity/CapacitySchedulerMetrics.java      | 119 +++++++++++++++++++
 .../TestCapacitySchedulerMetrics.java           | 110 +++++++++++++++++
 6 files changed, 269 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c842d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 05745ec..c533111 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -1216,6 +1216,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
   void reinitialize(boolean initialize) {
     ClusterMetrics.destroy();
     QueueMetrics.clearQueueMetrics();
+    getResourceScheduler().resetSchedulerMetrics();
     if (initialize) {
       resetRMContext();
       createAndInitActiveServices(true);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c842d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index b2747f7..18c7b4e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -1464,4 +1464,9 @@ public abstract class AbstractYarnScheduler
       SchedulingRequest schedulingRequest, SchedulerNode schedulerNode) {
     return false;
   }
+
+  @Override
+  public void resetSchedulerMetrics() {
+    // reset scheduler metrics
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c842d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
index 5a56ac7..dcb6edd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
@@ -71,4 +71,9 @@ public interface ResourceScheduler extends YarnScheduler, Recoverable {
    */
   boolean attemptAllocationOnNode(SchedulerApplicationAttempt appAttempt,
       SchedulingRequest schedulingRequest, SchedulerNode schedulerNode);
+
+  /**
+   * Reset scheduler metrics.
+   */
+  void resetSchedulerMetrics();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c842d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 162d3bb..1c9bf6b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -1252,6 +1252,7 @@ public class CapacityScheduler extends
 
   @Override
   protected void nodeUpdate(RMNode rmNode) {
+    long begin = System.nanoTime();
     try {
       readLock.lock();
       setLastNodeUpdateTime(Time.now());
@@ -1279,6 +1280,9 @@ public class CapacityScheduler extends
         writeLock.unlock();
       }
     }
+
+    long latency = System.nanoTime() - begin;
+    CapacitySchedulerMetrics.getMetrics().addNodeUpdate(latency);
   }
 
   /**
@@ -1643,17 +1647,28 @@ public class CapacityScheduler extends
       return null;
     }
 
+    long startTime = System.nanoTime();
+
     // Backward compatible way to make sure previous behavior which allocation
     // driven by node heartbeat works.
     FiCaSchedulerNode node = CandidateNodeSetUtils.getSingleNode(candidates);
 
     // We have two different logics to handle allocation on single node / multi
     // nodes.
+    CSAssignment assignment;
     if (null != node) {
-      return allocateContainerOnSingleNode(candidates, node, withNodeHeartbeat);
+      assignment = allocateContainerOnSingleNode(candidates,
+          node, withNodeHeartbeat);
     } else{
-      return allocateContainersOnMultiNodes(candidates);
+      assignment = allocateContainersOnMultiNodes(candidates);
+    }
+
+    if (assignment != null && assignment.getAssignmentInformation() != null
+        && assignment.getAssignmentInformation().getNumAllocations() > 0) {
+      long allocateTime = System.nanoTime() - startTime;
+      CapacitySchedulerMetrics.getMetrics().addAllocate(allocateTime);
     }
+    return assignment;
   }
 
   @Override
@@ -2806,6 +2821,7 @@ public class CapacityScheduler extends
   @Override
   public boolean tryCommit(Resource cluster, ResourceCommitRequest r,
       boolean updatePending) {
+    long commitStart = System.nanoTime();
     ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode> request =
         (ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode>) r;
 
@@ -2844,9 +2860,15 @@ public class CapacityScheduler extends
       if (app != null && attemptId.equals(app.getApplicationAttemptId())) {
         if (app.accept(cluster, request, updatePending)
             && app.apply(cluster, request, updatePending)) {
+          long commitSuccess = System.nanoTime() - commitStart;
+          CapacitySchedulerMetrics.getMetrics()
+              .addCommitSuccess(commitSuccess);
           LOG.info("Allocation proposal accepted");
           isSuccess = true;
         } else{
+          long commitFailed = System.nanoTime() - commitStart;
+          CapacitySchedulerMetrics.getMetrics()
+              .addCommitFailure(commitFailed);
           LOG.info("Failed to accept allocation proposal");
         }
 
@@ -3029,4 +3051,9 @@ public class CapacityScheduler extends
     }
     return autoCreatedLeafQueue;
   }
+
+  @Override
+  public void resetSchedulerMetrics() {
+    CapacitySchedulerMetrics.destroy();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c842d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerMetrics.java
new file mode 100644
index 0000000..5f8988b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerMetrics.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableRate;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.apache.hadoop.metrics2.lib.Interns.info;
+
+/**
+ * Metrics for capacity scheduler.
+ */
+@InterfaceAudience.Private
+@Metrics(context="yarn")
+public class CapacitySchedulerMetrics {
+
+  private static AtomicBoolean isInitialized = new AtomicBoolean(false);
+
+  private static final MetricsInfo RECORD_INFO =
+      info("CapacitySchedulerMetrics",
+          "Metrics for the Yarn Capacity Scheduler");
+
+  @Metric("Scheduler allocate containers") MutableRate allocate;
+  @Metric("Scheduler commit success") MutableRate commitSuccess;
+  @Metric("Scheduler commit failure") MutableRate commitFailure;
+  @Metric("Scheduler node update") MutableRate nodeUpdate;
+
+  private static volatile CapacitySchedulerMetrics INSTANCE = null;
+  private static MetricsRegistry registry;
+
+  public static CapacitySchedulerMetrics getMetrics() {
+    if(!isInitialized.get()){
+      synchronized (CapacitySchedulerMetrics.class) {
+        if(INSTANCE == null){
+          INSTANCE = new CapacitySchedulerMetrics();
+          registerMetrics();
+          isInitialized.set(true);
+        }
+      }
+    }
+    return INSTANCE;
+  }
+
+  private static void registerMetrics() {
+    registry = new MetricsRegistry(RECORD_INFO);
+    registry.tag(RECORD_INFO, "ResourceManager");
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    if (ms != null) {
+      ms.register("CapacitySchedulerMetrics",
+          "Metrics for the Yarn Capacity Scheduler", INSTANCE);
+    }
+  }
+
+  @VisibleForTesting
+  public synchronized static void destroy() {
+    isInitialized.set(false);
+    INSTANCE = null;
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    if (ms != null) {
+      ms.unregisterSource("CapacitySchedulerMetrics");
+    }
+  }
+
+  public void addAllocate(long latency) {
+    this.allocate.add(latency);
+  }
+
+  public void addCommitSuccess(long latency) {
+    this.commitSuccess.add(latency);
+  }
+
+  public void addCommitFailure(long latency) {
+    this.commitFailure.add(latency);
+  }
+
+  public void addNodeUpdate(long latency) {
+    this.nodeUpdate.add(latency);
+  }
+
+  @VisibleForTesting
+  public long getNumOfNodeUpdate() {
+    return this.nodeUpdate.lastStat().numSamples();
+  }
+
+  @VisibleForTesting
+  public long getNumOfAllocates() {
+    return this.allocate.lastStat().numSamples();
+  }
+
+  @VisibleForTesting
+  public long getNumOfCommitSuccess() {
+    return this.commitSuccess.lastStat().numSamples();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c842d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestCapacitySchedulerMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestCapacitySchedulerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestCapacitySchedulerMetrics.java
new file mode 100644
index 0000000..eaa966a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestCapacitySchedulerMetrics.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerMetrics;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Test class for CS metrics.
+ */
+public class TestCapacitySchedulerMetrics {
+
+  private MockRM rm;
+
+  @Test
+  public void testCSMetrics() throws Exception {
+    YarnConfiguration conf = new YarnConfiguration();
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+    conf.setBoolean(
+        CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true);
+
+    RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
+    mgr.init(conf);
+    rm = new MockRM(conf) {
+      @Override
+      public RMNodeLabelsManager createNodeLabelManager() {
+        return mgr;
+      }
+    };
+
+    rm.getRMContext().setNodeLabelManager(mgr);
+    rm.start();
+
+    MockNM nm1 = rm.registerNode("host1:1234", 2048);
+    MockNM nm2 = rm.registerNode("host2:1234", 2048);
+    nm1.nodeHeartbeat(true);
+    nm2.nodeHeartbeat(true);
+
+    CapacitySchedulerMetrics csMetrics = CapacitySchedulerMetrics.getMetrics();
+    Assert.assertNotNull(csMetrics);
+    try {
+      GenericTestUtils.waitFor(()
+          -> csMetrics.getNumOfNodeUpdate() == 2, 100, 3000);
+    } catch(TimeoutException e) {
+      Assert.fail("CS metrics not updated on node-update events.");
+    }
+
+    Assert.assertEquals(0, csMetrics.getNumOfAllocates());
+    Assert.assertEquals(0, csMetrics.getNumOfCommitSuccess());
+
+    RMApp rmApp = rm.submitApp(1024, "app", "user", null, false,
+        "default", 1, null, null, false);
+    MockAM am = MockRM.launchAMWhenAsyncSchedulingEnabled(rmApp, rm);
+    am.registerAppAttempt();
+    am.allocate("*", 1024, 1, new ArrayList<>());
+
+    nm1.nodeHeartbeat(true);
+    nm2.nodeHeartbeat(true);
+
+    // Verify HB metrics updated
+    try {
+      GenericTestUtils.waitFor(()
+          -> csMetrics.getNumOfNodeUpdate() == 4, 100, 3000);
+    } catch(TimeoutException e) {
+      Assert.fail("CS metrics not updated on node-update events.");
+    }
+
+    // For async mode, the number of alloc might be bigger than 1
+    Assert.assertTrue(csMetrics.getNumOfAllocates() > 0);
+    // But there will be only 2 successful commit (1 AM + 1 task)
+    Assert.assertEquals(2, csMetrics.getNumOfCommitSuccess());
+  }
+
+  @After
+  public void tearDown() {
+    if (rm != null) {
+      rm.stop();
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: YARN-8327. Fix TestAggregatedLogFormat#testReadAcontainerLogs1 on Windows. Contributed by Giovanni Matteo Fumarola.

Posted by bo...@apache.org.
YARN-8327. Fix TestAggregatedLogFormat#testReadAcontainerLogs1 on Windows. Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f09dc730
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f09dc730
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f09dc730

Branch: refs/heads/YARN-7402
Commit: f09dc73001fd5f3319765fa997f4b0ca9e8f2aff
Parents: d726156
Author: Inigo Goiri <in...@apache.org>
Authored: Wed May 23 15:59:30 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Wed May 23 15:59:30 2018 -0700

----------------------------------------------------------------------
 .../logaggregation/TestAggregatedLogFormat.java  | 19 ++++++++++++-------
 1 file changed, 12 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f09dc730/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
index efbaa4c..f85445e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java
@@ -254,13 +254,18 @@ public class TestAggregatedLogFormat {
     // Since we could not open the fileInputStream for stderr, this file is not
     // aggregated.
     String s = writer.toString();
-    int expectedLength =
-        "LogType:stdout".length()
-            + (logUploadedTime ? ("\nLog Upload Time:" + Times.format(System
-              .currentTimeMillis())).length() : 0)
-            + ("\nLogLength:" + numChars).length()
-            + "\nLog Contents:\n".length() + numChars + "\n".length()
-            + "\nEnd of LogType:stdout\n".length();
+
+    int expectedLength = "LogType:stdout".length()
+        + (logUploadedTime
+            ? (System.lineSeparator() + "Log Upload Time:"
+                + Times.format(System.currentTimeMillis())).length()
+            : 0)
+        + (System.lineSeparator() + "LogLength:" + numChars).length()
+        + (System.lineSeparator() + "Log Contents:" + System.lineSeparator())
+            .length()
+        + numChars + ("\n").length() + ("End of LogType:stdout"
+            + System.lineSeparator() + System.lineSeparator()).length();
+
     Assert.assertTrue("LogType not matched", s.contains("LogType:stdout"));
     Assert.assertTrue("log file:stderr should not be aggregated.", !s.contains("LogType:stderr"));
     Assert.assertTrue("log file:logs should not be aggregated.", !s.contains("LogType:logs"));


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: YARN-4781. Support intra-queue preemption for fairness ordering policy. Contributed by Eric Payne.

Posted by bo...@apache.org.
YARN-4781. Support intra-queue preemption for fairness ordering policy. Contributed by Eric Payne.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c343669
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c343669
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c343669

Branch: refs/heads/YARN-7402
Commit: 7c343669baf660df3b70d58987d6e68aec54d6fa
Parents: 61df174
Author: Sunil G <su...@apache.org>
Authored: Mon May 28 16:32:53 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Mon May 28 16:32:53 2018 +0530

----------------------------------------------------------------------
 .../FifoIntraQueuePreemptionPlugin.java         |  37 ++-
 .../capacity/IntraQueueCandidatesSelector.java  |  40 +++
 .../monitor/capacity/TempAppPerPartition.java   |   9 +
 .../AbstractComparatorOrderingPolicy.java       |   2 -
 ...alCapacityPreemptionPolicyMockFramework.java |  12 +-
 ...yPreemptionPolicyIntraQueueFairOrdering.java | 276 +++++++++++++++++++
 6 files changed, 366 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c343669/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
index 40f333f..12c178c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
@@ -34,6 +34,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.IntraQueueCandidatesSelector.TAFairOrderingComparator;
 import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.IntraQueueCandidatesSelector.TAPriorityComparator;
 import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.IntraQueuePreemptionOrderPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
@@ -41,6 +42,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FairOrderingPolicy;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.OrderingPolicy;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
@@ -263,8 +266,17 @@ public class FifoIntraQueuePreemptionPlugin
       Resource queueReassignableResource,
       PriorityQueue<TempAppPerPartition> orderedByPriority) {
 
-    Comparator<TempAppPerPartition> reverseComp = Collections
-        .reverseOrder(new TAPriorityComparator());
+    Comparator<TempAppPerPartition> reverseComp;
+    OrderingPolicy<FiCaSchedulerApp> queueOrderingPolicy =
+        tq.leafQueue.getOrderingPolicy();
+    if (queueOrderingPolicy instanceof FairOrderingPolicy
+        && (context.getIntraQueuePreemptionOrderPolicy()
+            == IntraQueuePreemptionOrderPolicy.USERLIMIT_FIRST)) {
+      reverseComp = Collections.reverseOrder(
+          new TAFairOrderingComparator(this.rc, clusterResource));
+    } else {
+      reverseComp = Collections.reverseOrder(new TAPriorityComparator());
+    }
     TreeSet<TempAppPerPartition> orderedApps = new TreeSet<>(reverseComp);
 
     String partition = tq.partition;
@@ -355,7 +367,16 @@ public class FifoIntraQueuePreemptionPlugin
       TempQueuePerPartition tq, Collection<FiCaSchedulerApp> apps,
       Resource clusterResource,
       Map<String, Resource> perUserAMUsed) {
-    TAPriorityComparator taComparator = new TAPriorityComparator();
+    Comparator<TempAppPerPartition> taComparator;
+    OrderingPolicy<FiCaSchedulerApp> orderingPolicy =
+        tq.leafQueue.getOrderingPolicy();
+    if (orderingPolicy instanceof FairOrderingPolicy
+        && (context.getIntraQueuePreemptionOrderPolicy()
+            == IntraQueuePreemptionOrderPolicy.USERLIMIT_FIRST)) {
+      taComparator = new TAFairOrderingComparator(this.rc, clusterResource);
+    } else {
+       taComparator = new TAPriorityComparator();
+    }
     PriorityQueue<TempAppPerPartition> orderedByPriority = new PriorityQueue<>(
         100, taComparator);
 
@@ -393,13 +414,12 @@ public class FifoIntraQueuePreemptionPlugin
       // Set ideal allocation of app as 0.
       tmpApp.idealAssigned = Resources.createResource(0, 0);
 
-      orderedByPriority.add(tmpApp);
-
       // Create a TempUserPerPartition structure to hold more information
       // regarding each user's entities such as UserLimit etc. This could
       // be kept in a user to TempUserPerPartition map for further reference.
       String userName = app.getUser();
-      if (!usersPerPartition.containsKey(userName)) {
+      TempUserPerPartition tmpUser = usersPerPartition.get(userName);
+      if (tmpUser == null) {
         ResourceUsage userResourceUsage = tq.leafQueue.getUser(userName)
             .getResourceUsage();
 
@@ -409,7 +429,7 @@ public class FifoIntraQueuePreemptionPlugin
         amUsed = (userSpecificAmUsed == null)
             ? Resources.none() : userSpecificAmUsed;
 
-        TempUserPerPartition tmpUser = new TempUserPerPartition(
+        tmpUser = new TempUserPerPartition(
             tq.leafQueue.getUser(userName), tq.queueName,
             Resources.clone(userResourceUsage.getUsed(partition)),
             Resources.clone(amUsed),
@@ -432,7 +452,10 @@ public class FifoIntraQueuePreemptionPlugin
         tmpUser.idealAssigned = Resources.createResource(0, 0);
         tq.addUserPerPartition(userName, tmpUser);
       }
+      tmpApp.setTempUserPerPartition(tmpUser);
+      orderedByPriority.add(tmpApp);
     }
+
     return orderedByPriority;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c343669/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
index a91fac7..8ab9507 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
@@ -28,6 +28,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsMana
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.AbstractComparatorOrderingPolicy;
+import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
 import java.io.Serializable;
@@ -64,6 +66,44 @@ public class IntraQueueCandidatesSelector extends PreemptionCandidatesSelector {
     }
   }
 
+  /*
+   * Order first by amount used from least to most. Then order from oldest to
+   * youngest if amount used is the same.
+   */
+  static class TAFairOrderingComparator
+      implements Comparator<TempAppPerPartition> {
+
+    private ResourceCalculator rc;
+    private Resource clusterRes;
+
+    TAFairOrderingComparator(ResourceCalculator rc, Resource clusterRes) {
+      this.rc = rc;
+      this.clusterRes = clusterRes;
+    }
+
+    @Override
+    public int compare(TempAppPerPartition ta1, TempAppPerPartition ta2) {
+      if (ta1.getUser().equals(ta2.getUser())) {
+        AbstractComparatorOrderingPolicy<FiCaSchedulerApp> acop =
+            (AbstractComparatorOrderingPolicy<FiCaSchedulerApp>)
+            ta1.getFiCaSchedulerApp().getCSLeafQueue().getOrderingPolicy();
+        return acop.getComparator()
+                  .compare(ta1.getFiCaSchedulerApp(), ta2.getFiCaSchedulerApp());
+      } else {
+        Resource usedByUser1 = ta1.getTempUserPerPartition().getUsedDeductAM();
+        Resource usedByUser2 = ta2.getTempUserPerPartition().getUsedDeductAM();
+        if (Resources.equals(usedByUser1, usedByUser2)) {
+          return ta1.getApplicationId().compareTo(ta2.getApplicationId());
+        }
+        if (Resources.lessThan(rc, clusterRes, usedByUser1, usedByUser2)) {
+          return -1;
+        } else {
+          return 1;
+        }
+      }
+    }
+  }
+
   IntraQueuePreemptionComputePlugin fifoPreemptionComputePlugin = null;
   final CapacitySchedulerPreemptionContext context;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c343669/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempAppPerPartition.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempAppPerPartition.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempAppPerPartition.java
index e9a934b..05d8096 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempAppPerPartition.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempAppPerPartition.java
@@ -34,6 +34,7 @@ public class TempAppPerPartition extends AbstractPreemptionEntity {
   // Following fields are settled and used by candidate selection policies
   private final int priority;
   private final ApplicationId applicationId;
+  private TempUserPerPartition tempUser;
 
   FiCaSchedulerApp app;
 
@@ -102,4 +103,12 @@ public class TempAppPerPartition extends AbstractPreemptionEntity {
       Resources.subtractFrom(getActuallyToBePreempted(), toBeDeduct);
     }
   }
+
+  public void setTempUserPerPartition(TempUserPerPartition tu) {
+    tempUser = tu;
+  }
+
+  public TempUserPerPartition getTempUserPerPartition() {
+    return tempUser;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c343669/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/AbstractComparatorOrderingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/AbstractComparatorOrderingPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/AbstractComparatorOrderingPolicy.java
index b7cb1bf..09dd3bf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/AbstractComparatorOrderingPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/AbstractComparatorOrderingPolicy.java
@@ -26,7 +26,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.*;
 import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
-import com.google.common.annotations.VisibleForTesting;
 
 
 /**
@@ -89,7 +88,6 @@ public abstract class AbstractComparatorOrderingPolicy<S extends SchedulableEnti
     }
   }
 
-  @VisibleForTesting
   public Comparator<SchedulableEntity> getComparator() {
     return comparator; 
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c343669/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
index a972584..64b56fb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preempti
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerPreemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FairOrderingPolicy;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.OrderingPolicy;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
@@ -64,6 +65,7 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.mockito.ArgumentMatcher;
+import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -337,9 +339,11 @@ public class ProportionalCapacityPreemptionPolicyMockFramework {
           .thenReturn(pendingForDefaultPartition);
 
       // need to set pending resource in resource usage as well
-      ResourceUsage ru = new ResourceUsage();
+      ResourceUsage ru = Mockito.spy(new ResourceUsage());
       ru.setUsed(label, used);
+      when(ru.getCachedUsed(anyString())).thenReturn(used);
       when(app.getAppAttemptResourceUsage()).thenReturn(ru);
+      when(app.getSchedulingResourceUsage()).thenReturn(ru);
 
       start = end + 1;
     }
@@ -637,6 +641,12 @@ public class ProportionalCapacityPreemptionPolicyMockFramework {
         when(leafQueue.getApplications()).thenReturn(apps);
         when(leafQueue.getAllApplications()).thenReturn(apps);
         OrderingPolicy<FiCaSchedulerApp> so = mock(OrderingPolicy.class);
+        String opName = conf.get(CapacitySchedulerConfiguration.PREFIX
+            + CapacitySchedulerConfiguration.ROOT + "." + getQueueName(q)
+            + ".ordering-policy", "fifo");
+        if (opName.equals("fair")) {
+          so = Mockito.spy(new FairOrderingPolicy<FiCaSchedulerApp>());
+        }
         when(so.getPreemptionIterator()).thenAnswer(new Answer() {
           public Object answer(InvocationOnMock invocation) {
             return apps.descendingIterator();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c343669/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueFairOrdering.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueFairOrdering.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueFairOrdering.java
new file mode 100644
index 0000000..1678651
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueFairOrdering.java
@@ -0,0 +1,276 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
+
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import java.io.IOException;
+
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.junit.Before;
+import org.junit.Test;
+
+/*
+ * Test class for testing intra-queue preemption when the fair ordering policy
+ * is enabled on a capacity queue.
+ */
+public class TestProportionalCapacityPreemptionPolicyIntraQueueFairOrdering
+    extends ProportionalCapacityPreemptionPolicyMockFramework {
+  @Before
+  public void setup() {
+    super.setup();
+    conf.setBoolean(
+        CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ENABLED, true);
+    policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock);
+  }
+
+  /*
+   * When the capacity scheduler fair ordering policy is enabled, preempt first
+   * from the application owned by the user that is the farthest over their
+   * user limit.
+   */
+  @Test
+  public void testIntraQueuePreemptionFairOrderingPolicyEnabledOneAppPerUser()
+      throws IOException {
+    // Enable FairOrderingPolicy for yarn.scheduler.capacity.root.a
+    conf.set(CapacitySchedulerConfiguration.PREFIX
+        + CapacitySchedulerConfiguration.ROOT + ".a.ordering-policy", "fair");
+    // Make sure all containers will be preempted in a single round.
+    conf.setFloat(CapacitySchedulerConfiguration.
+        INTRAQUEUE_PREEMPTION_MAX_ALLOWABLE_LIMIT,
+        (float) 1.0);
+
+    String labelsConfig = "=100,true;";
+    String nodesConfig = // n1 has no label
+        "n1= res=100";
+    String queuesConfig =
+        // guaranteed,max,used,pending,reserved
+        "root(=[100 100 100 1 0]);" + // root
+            "-a(=[100 100 100 1 0])"; // a
+
+    // user1/app1 has 60 resources in queue a
+    // user2/app2 has 40 resources in queue a
+    // user3/app3 is requesting 20 resources in queue a
+    // With 3 users, preemptable user limit should be around 35 resources each.
+    // With FairOrderingPolicy enabled on queue a, all 20 resources should be
+    // preempted from app1
+    String appsConfig =
+    // queueName\t(prio,resource,host,expression,#repeat,reserved,pending,user)
+        "a\t" // app1, user1 in a
+            + "(1,1,n1,,60,false,0,user1);" +
+            "a\t" // app2, user2 in a
+            + "(1,1,n1,,40,false,0,user2);" +
+            "a\t" // app3, user3 in a
+            + "(1,1,n1,,0,false,20,user3)"
+            ;
+
+    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+    policy.editSchedule();
+
+    verify(mDisp, times(20)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(1))));
+  }
+
+  /*
+   * When the capacity scheduler fifo ordering policy is enabled, preempt first
+   * from the youngest application until reduced to user limit, then preempt
+   * from next youngest app.
+   */
+  @Test
+  public void testIntraQueuePreemptionFifoOrderingPolicyEnabled()
+      throws IOException {
+    // Enable FifoOrderingPolicy for yarn.scheduler.capacity.root.a
+    conf.set(CapacitySchedulerConfiguration.PREFIX
+        + CapacitySchedulerConfiguration.ROOT + ".a.ordering-policy", "fifo");
+    // Make sure all containers will be preempted in a single round.
+    conf.setFloat(CapacitySchedulerConfiguration.
+        INTRAQUEUE_PREEMPTION_MAX_ALLOWABLE_LIMIT,
+        (float) 1.0);
+
+    String labelsConfig = "=100,true;";
+    String nodesConfig = // n1 has no label
+        "n1= res=100";
+    String queuesConfig =
+        // guaranteed,max,used,pending,reserved
+        "root(=[100 100 100 1 0]);" + // root
+            "-a(=[100 100 100 1 0])"; // a
+
+    // user1/app1 has 60 resources in queue a
+    // user2/app2 has 40 resources in queue a
+    // user3/app3 is requesting 20 resources in queue a
+    // With 3 users, preemptable user limit should be around 35 resources each.
+    // With FifoOrderingPolicy enabled on queue a, the first 5 should come from
+    // the youngest app, app2, until app2 is reduced to the user limit of 35.
+    String appsConfig =
+    // queueName\t(prio,resource,host,expression,#repeat,reserved,pending,user)
+        "a\t" // app1, user1 in a
+            + "(1,1,n1,,60,false,0,user1);" +
+            "a\t" // app2, user2 in a
+            + "(1,1,n1,,40,false,0,user2);" +
+            "a\t" // app3, user3 in a
+            + "(1,1,n1,,0,false,5,user3)"
+            ;
+
+    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+    policy.editSchedule();
+
+    verify(mDisp, times(5)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(2))));
+
+    // user1/app1 has 60 resources in queue a
+    // user2/app2 has 35 resources in queue a
+    // user3/app3 has 5 resources and is requesting 15 resources in queue a
+    // With 3 users, preemptable user limit should be around 35 resources each.
+    // The next 15 should come from app1 even though app2 is younger since app2
+    // has already been reduced to its user limit.
+    appsConfig =
+    // queueName\t(prio,resource,host,expression,#repeat,reserved,pending,user)
+        "a\t" // app1, user1 in a
+            + "(1,1,n1,,60,false,0,user1);" +
+            "a\t" // app2, user2 in a
+            + "(1,1,n1,,35,false,0,user2);" +
+            "a\t" // app3, user3 in a
+            + "(1,1,n1,,5,false,15,user3)"
+            ;
+
+    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+    policy.editSchedule();
+
+    verify(mDisp, times(15)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(1))));
+  }
+
+  /*
+   * When the capacity scheduler fair ordering policy is enabled, preempt first
+   * from the youngest application from the user that is the farthest over their
+   * user limit.
+   */
+  @Test
+  public void testIntraQueuePreemptionFairOrderingPolicyMulitipleAppsPerUser()
+      throws IOException {
+    // Enable FairOrderingPolicy for yarn.scheduler.capacity.root.a
+    conf.set(CapacitySchedulerConfiguration.PREFIX
+        + CapacitySchedulerConfiguration.ROOT + ".a.ordering-policy", "fair");
+    // Make sure all containers will be preempted in a single round.
+    conf.setFloat(CapacitySchedulerConfiguration.
+        INTRAQUEUE_PREEMPTION_MAX_ALLOWABLE_LIMIT,
+        (float) 1.0);
+
+    String labelsConfig = "=100,true;";
+    String nodesConfig = // n1 has no label
+        "n1= res=100";
+    String queuesConfig =
+        // guaranteed,max,used,pending,reserved
+        "root(=[100 100 100 1 0]);" + // root
+            "-a(=[100 100 100 1 0])"; // a
+
+    // user1/app1 has 35 resources in queue a
+    // user1/app2 has 25 resources in queue a
+    // user2/app3 has 40 resources in queue a
+    // user3/app4 is requesting 20 resources in queue a
+    // With 3 users, preemptable user limit should be around 35 resources each.
+    // With FairOrderingPolicy enabled on queue a, all 20 resources should be
+    // preempted from app1 since it's the most over served app from the most
+    // over served user
+    String appsConfig =
+    // queueName\t(prio,resource,host,expression,#repeat,reserved,pending,user)
+        "a\t" // app1 and app2, user1 in a
+            + "(1,1,n1,,35,false,0,user1);" +
+            "a\t"
+            + "(1,1,n1,,25,false,0,user1);" +
+            "a\t" // app3, user2 in a
+            + "(1,1,n1,,40,false,0,user2);" +
+            "a\t" // app4, user3 in a
+            + "(1,1,n1,,0,false,20,user3)"
+            ;
+
+    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+    policy.editSchedule();
+
+    verify(mDisp, times(20)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(1))));
+  }
+
+  /*
+   * When the capacity scheduler fifo ordering policy is enabled and a user has
+   * multiple apps, preempt first from the youngest application.
+   */
+  @Test
+  public void testIntraQueuePreemptionFifoOrderingPolicyMultipleAppsPerUser()
+      throws IOException {
+    // Enable FifoOrderingPolicy for yarn.scheduler.capacity.root.a
+    conf.set(CapacitySchedulerConfiguration.PREFIX
+        + CapacitySchedulerConfiguration.ROOT + ".a.ordering-policy", "fifo");
+    // Make sure all containers will be preempted in a single round.
+    conf.setFloat(CapacitySchedulerConfiguration.
+        INTRAQUEUE_PREEMPTION_MAX_ALLOWABLE_LIMIT,
+        (float) 1.0);
+
+    String labelsConfig = "=100,true;";
+    String nodesConfig = // n1 has no label
+        "n1= res=100";
+    String queuesConfig =
+        // guaranteed,max,used,pending,reserved
+        "root(=[100 100 100 1 0]);" + // root
+            "-a(=[100 100 100 1 0])"; // a
+
+    // user1/app1 has 40 resources in queue a
+    // user1/app2 has 20 resources in queue a
+    // user3/app3 has 40 resources in queue a
+    // user4/app4 is requesting 20 resources in queue a
+    // With 3 users, preemptable user limit should be around 35 resources each.
+    String appsConfig =
+    // queueName\t(prio,resource,host,expression,#repeat,reserved,pending,user)
+        "a\t" // app1, user1 in a
+            + "(1,1,n1,,40,false,0,user1);" +
+        "a\t" // app2, user1 in a
+            + "(1,1,n1,,20,false,0,user1);" +
+        "a\t" // app3, user3 in a
+            + "(1,1,n1,,40,false,0,user3);" +
+        "a\t" // app4, user4 in a
+            + "(1,1,n1,,0,false,25,user4)"
+            ;
+
+    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+    policy.editSchedule();
+
+    // app3 is the younges and also over its user limit. 5 should be preempted
+    // from app3 until it comes down to user3's user limit.
+    verify(mDisp, times(5)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(3))));
+
+    // User1's app2 is its youngest. 19 should be preempted from app2, leaving
+    // only the AM
+    verify(mDisp, times(19)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(2))));
+
+    // Preempt the remaining resource from User1's oldest app1.
+    verify(mDisp, times(1)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(1))));
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: HDFS-13620. Randomize the test directory path for TestHDFSFileSystemContract. Contributed by Anbang Hu.

Posted by bo...@apache.org.
HDFS-13620. Randomize the test directory path for TestHDFSFileSystemContract. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8605a385
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8605a385
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8605a385

Branch: refs/heads/YARN-7402
Commit: 8605a38514b4f7a2a549c7ecf8e1421e61bb4d67
Parents: 2a9652e
Author: Inigo Goiri <in...@apache.org>
Authored: Fri May 25 19:43:33 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Fri May 25 19:43:33 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java     | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8605a385/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
index 50d1e75..6da46de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs;
 
+import java.io.File;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
@@ -25,6 +26,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -39,7 +41,9 @@ public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
     Configuration conf = new HdfsConfiguration();
     conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,
         FileSystemContractBaseTest.TEST_UMASK);
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    File basedir = GenericTestUtils.getRandomizedTestDir();
+    cluster = new MiniDFSCluster.Builder(conf, basedir).numDataNodes(2)
+        .build();
     fs = cluster.getFileSystem();
     defaultWorkingDirectory = "/user/" + 
            UserGroupInformation.getCurrentUser().getShortUserName();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: HDFS-13591. TestDFSShell#testSetrepLow fails on Windows. Contributed by Anbang Hu.

Posted by bo...@apache.org.
HDFS-13591. TestDFSShell#testSetrepLow fails on Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9dbf4f01
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9dbf4f01
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9dbf4f01

Branch: refs/heads/YARN-7402
Commit: 9dbf4f01665d5480a70395a24519cbab5d4db0c5
Parents: 91d7c74
Author: Inigo Goiri <in...@apache.org>
Authored: Mon May 28 16:34:02 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Mon May 28 16:34:02 2018 -0700

----------------------------------------------------------------------
 .../test/java/org/apache/hadoop/hdfs/TestDFSShell.java    | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dbf4f01/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index e82863a..c352dc9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -2829,11 +2829,11 @@ public class TestDFSShell {
         System.setErr(origErr);
       }
 
-      assertEquals("Error message is not the expected error message",
-          "setrep: Requested replication factor of 1 is less than "
-              + "the required minimum of 2 for /tmp/TestDFSShell-"
-              + "testSetrepLow/testFileForSetrepLow\n",
-          bao.toString());
+      assertTrue("Error message is not the expected error message"
+          + bao.toString(), bao.toString().startsWith(
+              "setrep: Requested replication factor of 1 is less than "
+                  + "the required minimum of 2 for /tmp/TestDFSShell-"
+                  + "testSetrepLow/testFileForSetrepLow"));
     } finally {
       shell.close();
       cluster.shutdown();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: HDFS-13493. Reduce the HttpServer2 thread count on DataNodes. Contributed by Erik Krogen.

Posted by bo...@apache.org.
HDFS-13493. Reduce the HttpServer2 thread count on DataNodes. Contributed by Erik Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cddbbe5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cddbbe5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cddbbe5f

Branch: refs/heads/YARN-7402
Commit: cddbbe5f690e4617413f6e986adc6fa900629f03
Parents: e30938a
Author: Inigo Goiri <in...@apache.org>
Authored: Wed May 23 12:12:08 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Wed May 23 12:12:08 2018 -0700

----------------------------------------------------------------------
 .../hdfs/server/datanode/web/DatanodeHttpServer.java  | 14 +++++++++++++-
 1 file changed, 13 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cddbbe5f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index 0ce327a..4349c26 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@ -89,6 +89,13 @@ public class DatanodeHttpServer implements Closeable {
   private InetSocketAddress httpsAddress;
   static final Log LOG = LogFactory.getLog(DatanodeHttpServer.class);
 
+  // HttpServer threads are only used for the web UI and basic servlets, so
+  // set them to the minimum possible
+  private static final int HTTP_SELECTOR_THREADS = 1;
+  private static final int HTTP_ACCEPTOR_THREADS = 1;
+  private static final int HTTP_MAX_THREADS =
+      HTTP_SELECTOR_THREADS + HTTP_ACCEPTOR_THREADS + 1;
+
   public DatanodeHttpServer(final Configuration conf,
       final DataNode datanode,
       final ServerSocketChannel externalHttpChannel)
@@ -97,7 +104,12 @@ public class DatanodeHttpServer implements Closeable {
     this.conf = conf;
 
     Configuration confForInfoServer = new Configuration(conf);
-    confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
+    confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS_KEY,
+        HTTP_MAX_THREADS);
+    confForInfoServer.setInt(HttpServer2.HTTP_SELECTOR_COUNT_KEY,
+        HTTP_SELECTOR_THREADS);
+    confForInfoServer.setInt(HttpServer2.HTTP_ACCEPTOR_COUNT_KEY,
+        HTTP_ACCEPTOR_THREADS);
     int proxyPort =
         confForInfoServer.getInt(DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT, 0);
     HttpServer2.Builder builder = new HttpServer2.Builder()


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: YARN-6648. [GPG] Add SubClusterCleaner in Global Policy Generator. (botong)

Posted by bo...@apache.org.
YARN-6648. [GPG] Add SubClusterCleaner in Global Policy Generator. (botong)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46a4a945
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46a4a945
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46a4a945

Branch: refs/heads/YARN-7402
Commit: 46a4a945732afdefec9828d1c43b77d32609bb8a
Parents: bca8e9b
Author: Botong Huang <bo...@apache.org>
Authored: Thu Feb 1 14:43:48 2018 -0800
Committer: Botong Huang <bo...@apache.org>
Committed: Tue May 29 10:48:40 2018 -0700

----------------------------------------------------------------------
 .../dev-support/findbugs-exclude.xml            |   5 +
 .../hadoop/yarn/conf/YarnConfiguration.java     |  18 +++
 .../src/main/resources/yarn-default.xml         |  24 ++++
 .../store/impl/MemoryFederationStateStore.java  |  13 ++
 .../utils/FederationStateStoreFacade.java       |  41 ++++++-
 .../GlobalPolicyGenerator.java                  |  92 ++++++++++-----
 .../subclustercleaner/SubClusterCleaner.java    | 109 +++++++++++++++++
 .../subclustercleaner/package-info.java         |  19 +++
 .../TestSubClusterCleaner.java                  | 118 +++++++++++++++++++
 9 files changed, 409 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46a4a945/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 5841361..bf2e376 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -380,6 +380,11 @@
     <Method name="initAndStartNodeManager" />
     <Bug pattern="DM_EXIT" />
   </Match>
+  <Match>
+    <Class name="org.apache.hadoop.yarn.server.globalpolicygenerator.GlobalPolicyGenerator" />
+    <Medhod name="startGPG" />
+    <Bug pattern="DM_EXIT" />
+  </Match>
  
   <!-- Ignore heartbeat exception when killing localizer -->
   <Match>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46a4a945/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index f7f82f8..7c78e0d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3326,6 +3326,24 @@ public class YarnConfiguration extends Configuration {
   public static final boolean DEFAULT_ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED =
       false;
 
+  private static final String FEDERATION_GPG_PREFIX =
+      FEDERATION_PREFIX + "gpg.";
+
+  // The number of threads to use for the GPG scheduled executor service
+  public static final String GPG_SCHEDULED_EXECUTOR_THREADS =
+      FEDERATION_GPG_PREFIX + "scheduled.executor.threads";
+  public static final int DEFAULT_GPG_SCHEDULED_EXECUTOR_THREADS = 10;
+
+  // The interval at which the subcluster cleaner runs, -1 means disabled
+  public static final String GPG_SUBCLUSTER_CLEANER_INTERVAL_MS =
+      FEDERATION_GPG_PREFIX + "subcluster.cleaner.interval-ms";
+  public static final long DEFAULT_GPG_SUBCLUSTER_CLEANER_INTERVAL_MS = -1;
+
+  // The expiration time for a subcluster heartbeat, default is 30 minutes
+  public static final String GPG_SUBCLUSTER_EXPIRATION_MS =
+      FEDERATION_GPG_PREFIX + "subcluster.heartbeat.expiration-ms";
+  public static final long DEFAULT_GPG_SUBCLUSTER_EXPIRATION_MS = 1800000;
+
   ////////////////////////////////
   // Other Configs
   ////////////////////////////////

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46a4a945/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index b0ffc48..8a450d3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3524,6 +3524,30 @@
 
   <property>
     <description>
+      The number of threads to use for the GPG scheduled executor service.
+    </description>
+    <name>yarn.federation.gpg.scheduled.executor.threads</name>
+    <value>10</value>
+  </property>
+
+  <property>
+    <description>
+      The interval at which the subcluster cleaner runs, -1 means disabled.
+    </description>
+    <name>yarn.federation.gpg.subcluster.cleaner.interval-ms</name>
+    <value>-1</value>
+  </property>
+
+  <property>
+    <description>
+      The expiration time for a subcluster heartbeat, default is 30 minutes.
+    </description>
+    <name>yarn.federation.gpg.subcluster.heartbeat.expiration-ms</name>
+    <value>1800000</value>
+  </property>
+
+  <property>
+    <description>
        It is TimelineClient 1.5 configuration whether to store active
        application’s timeline data with in user directory i.e
        ${yarn.timeline-service.entity-group-fs-store.active-dir}/${user.name}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46a4a945/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
index 7c06256..b42fc79 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
@@ -68,6 +68,8 @@ import org.apache.hadoop.yarn.util.MonotonicClock;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * In-memory implementation of {@link FederationStateStore}.
  */
@@ -158,6 +160,17 @@ public class MemoryFederationStateStore implements FederationStateStore {
     return SubClusterHeartbeatResponse.newInstance();
   }
 
+  @VisibleForTesting
+  public void setSubClusterLastHeartbeat(SubClusterId subClusterId,
+      long lastHeartbeat) throws YarnException {
+    SubClusterInfo subClusterInfo = membership.get(subClusterId);
+    if (subClusterInfo == null) {
+      throw new YarnException(
+          "Subcluster " + subClusterId.toString() + " does not exist");
+    }
+    subClusterInfo.setLastHeartBeat(lastHeartbeat);
+  }
+
   @Override
   public GetSubClusterInfoResponse getSubCluster(
       GetSubClusterInfoRequest request) throws YarnException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46a4a945/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
index 682eb14..ef77114 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
@@ -62,9 +62,11 @@ import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolic
 import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse;
 import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest;
 import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState;
 import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -221,6 +223,22 @@ public final class FederationStateStoreFacade {
   }
 
   /**
+   * Deregister a <em>subcluster</em> identified by {@code SubClusterId} to
+   * change state in federation. This can be done to mark the sub cluster lost,
+   * deregistered, or decommissioned.
+   *
+   * @param subClusterId the target subclusterId
+   * @param subClusterState the state to update it to
+   * @throws YarnException if the request is invalid/fails
+   */
+  public void deregisterSubCluster(SubClusterId subClusterId,
+      SubClusterState subClusterState) throws YarnException {
+    stateStore.deregisterSubCluster(
+        SubClusterDeregisterRequest.newInstance(subClusterId, subClusterState));
+    return;
+  }
+
+  /**
    * Returns the {@link SubClusterInfo} for the specified {@link SubClusterId}.
    *
    * @param subClusterId the identifier of the sub-cluster
@@ -255,8 +273,7 @@ public final class FederationStateStoreFacade {
   public SubClusterInfo getSubCluster(final SubClusterId subClusterId,
       final boolean flushCache) throws YarnException {
     if (flushCache && isCachingEnabled()) {
-      LOG.info("Flushing subClusters from cache and rehydrating from store,"
-          + " most likely on account of RM failover.");
+      LOG.info("Flushing subClusters from cache and rehydrating from store.");
       cache.remove(buildGetSubClustersCacheRequest(false));
     }
     return getSubCluster(subClusterId);
@@ -287,6 +304,26 @@ public final class FederationStateStoreFacade {
   }
 
   /**
+   * Updates the cache with the central {@link FederationStateStore} and returns
+   * the {@link SubClusterInfo} of all active sub cluster(s).
+   *
+   * @param filterInactiveSubClusters whether to filter out inactive
+   *          sub-clusters
+   * @param flushCache flag to indicate if the cache should be flushed or not
+   * @return the sub cluster information
+   * @throws YarnException if the call to the state store is unsuccessful
+   */
+  public Map<SubClusterId, SubClusterInfo> getSubClusters(
+      final boolean filterInactiveSubClusters, final boolean flushCache)
+      throws YarnException {
+    if (flushCache && isCachingEnabled()) {
+      LOG.info("Flushing subClusters from cache and rehydrating from store.");
+      cache.remove(buildGetSubClustersCacheRequest(filterInactiveSubClusters));
+    }
+    return getSubClusters(filterInactiveSubClusters);
+  }
+
+  /**
    * Returns the {@link SubClusterPolicyConfiguration} for the specified queue.
    *
    * @param queue the queue whose policy is required

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46a4a945/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
index c1f7460..f6cfba0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
@@ -18,8 +18,11 @@
 
 package org.apache.hadoop.yarn.server.globalpolicygenerator;
 
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
+import org.apache.commons.lang.time.DurationFormatUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.service.CompositeService;
@@ -28,6 +31,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.subclustercleaner.SubClusterCleaner;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -55,36 +59,26 @@ public class GlobalPolicyGenerator extends CompositeService {
   // Federation Variables
   private GPGContext gpgContext;
 
+  // Scheduler service that runs tasks periodically
+  private ScheduledThreadPoolExecutor scheduledExecutorService;
+  private SubClusterCleaner subClusterCleaner;
+
   public GlobalPolicyGenerator() {
     super(GlobalPolicyGenerator.class.getName());
     this.gpgContext = new GPGContextImpl();
   }
 
-  protected void initAndStart(Configuration conf, boolean hasToReboot) {
-    try {
-      // Remove the old hook if we are rebooting.
-      if (hasToReboot && null != gpgShutdownHook) {
-        ShutdownHookManager.get().removeShutdownHook(gpgShutdownHook);
-      }
-
-      gpgShutdownHook = new CompositeServiceShutdownHook(this);
-      ShutdownHookManager.get().addShutdownHook(gpgShutdownHook,
-          SHUTDOWN_HOOK_PRIORITY);
-
-      this.init(conf);
-      this.start();
-    } catch (Throwable t) {
-      LOG.error("Error starting globalpolicygenerator", t);
-      System.exit(-1);
-    }
-  }
-
   @Override
   protected void serviceInit(Configuration conf) throws Exception {
     // Set up the context
     this.gpgContext
         .setStateStoreFacade(FederationStateStoreFacade.getInstance());
 
+    this.scheduledExecutorService = new ScheduledThreadPoolExecutor(
+        conf.getInt(YarnConfiguration.GPG_SCHEDULED_EXECUTOR_THREADS,
+            YarnConfiguration.DEFAULT_GPG_SCHEDULED_EXECUTOR_THREADS));
+    this.subClusterCleaner = new SubClusterCleaner(conf, this.gpgContext);
+
     DefaultMetricsSystem.initialize(METRICS_NAME);
 
     // super.serviceInit after all services are added
@@ -94,10 +88,32 @@ public class GlobalPolicyGenerator extends CompositeService {
   @Override
   protected void serviceStart() throws Exception {
     super.serviceStart();
+
+    // Scheduler SubClusterCleaner service
+    long scCleanerIntervalMs = getConfig().getLong(
+        YarnConfiguration.GPG_SUBCLUSTER_CLEANER_INTERVAL_MS,
+        YarnConfiguration.DEFAULT_GPG_SUBCLUSTER_CLEANER_INTERVAL_MS);
+    if (scCleanerIntervalMs > 0) {
+      this.scheduledExecutorService.scheduleAtFixedRate(this.subClusterCleaner,
+          0, scCleanerIntervalMs, TimeUnit.MILLISECONDS);
+      LOG.info("Scheduled sub-cluster cleaner with interval: {}",
+          DurationFormatUtils.formatDurationISO(scCleanerIntervalMs));
+    }
   }
 
   @Override
   protected void serviceStop() throws Exception {
+    try {
+      if (this.scheduledExecutorService != null
+          && !this.scheduledExecutorService.isShutdown()) {
+        this.scheduledExecutorService.shutdown();
+        LOG.info("Stopped ScheduledExecutorService");
+      }
+    } catch (Exception e) {
+      LOG.error("Failed to shutdown ScheduledExecutorService", e);
+      throw e;
+    }
+
     if (this.isStopping.getAndSet(true)) {
       return;
     }
@@ -113,20 +129,40 @@ public class GlobalPolicyGenerator extends CompositeService {
     return this.gpgContext;
   }
 
+  private void initAndStart(Configuration conf, boolean hasToReboot) {
+    // Remove the old hook if we are rebooting.
+    if (hasToReboot && null != gpgShutdownHook) {
+      ShutdownHookManager.get().removeShutdownHook(gpgShutdownHook);
+    }
+
+    gpgShutdownHook = new CompositeServiceShutdownHook(this);
+    ShutdownHookManager.get().addShutdownHook(gpgShutdownHook,
+        SHUTDOWN_HOOK_PRIORITY);
+
+    this.init(conf);
+    this.start();
+  }
+
   @SuppressWarnings("resource")
   public static void startGPG(String[] argv, Configuration conf) {
     boolean federationEnabled =
         conf.getBoolean(YarnConfiguration.FEDERATION_ENABLED,
             YarnConfiguration.DEFAULT_FEDERATION_ENABLED);
-    if (federationEnabled) {
-      Thread.setDefaultUncaughtExceptionHandler(
-          new YarnUncaughtExceptionHandler());
-      StringUtils.startupShutdownMessage(GlobalPolicyGenerator.class, argv,
-          LOG);
-      GlobalPolicyGenerator globalPolicyGenerator = new GlobalPolicyGenerator();
-      globalPolicyGenerator.initAndStart(conf, false);
-    } else {
-      LOG.warn("Federation is not enabled. The gpg cannot start.");
+    try {
+      if (federationEnabled) {
+        Thread.setDefaultUncaughtExceptionHandler(
+            new YarnUncaughtExceptionHandler());
+        StringUtils.startupShutdownMessage(GlobalPolicyGenerator.class, argv,
+            LOG);
+        GlobalPolicyGenerator globalPolicyGenerator =
+            new GlobalPolicyGenerator();
+        globalPolicyGenerator.initAndStart(conf, false);
+      } else {
+        LOG.warn("Federation is not enabled. The gpg cannot start.");
+      }
+    } catch (Throwable t) {
+      LOG.error("Error starting globalpolicygenerator", t);
+      System.exit(-1);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46a4a945/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
new file mode 100644
index 0000000..dad5121
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator.subclustercleaner;
+
+import java.util.Date;
+import java.util.Map;
+
+import org.apache.commons.lang.time.DurationFormatUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The sub-cluster cleaner is one of the GPG's services that periodically checks
+ * the membership table in FederationStateStore and mark sub-clusters that have
+ * not sent a heartbeat in certain amount of time as LOST.
+ */
+public class SubClusterCleaner implements Runnable {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SubClusterCleaner.class);
+
+  private GPGContext gpgContext;
+  private long heartbeatExpirationMillis;
+
+  /**
+   * The sub-cluster cleaner runnable is invoked by the sub cluster cleaner
+   * service to check the membership table and remove sub clusters that have not
+   * sent a heart beat in some amount of time.
+   */
+  public SubClusterCleaner(Configuration conf, GPGContext gpgContext) {
+    this.heartbeatExpirationMillis =
+        conf.getLong(YarnConfiguration.GPG_SUBCLUSTER_EXPIRATION_MS,
+            YarnConfiguration.DEFAULT_GPG_SUBCLUSTER_EXPIRATION_MS);
+    this.gpgContext = gpgContext;
+    LOG.info("Initialized SubClusterCleaner with heartbeat expiration of {}",
+        DurationFormatUtils.formatDurationISO(this.heartbeatExpirationMillis));
+  }
+
+  @Override
+  public void run() {
+    try {
+      Date now = new Date();
+      LOG.info("SubClusterCleaner at {}", now);
+
+      Map<SubClusterId, SubClusterInfo> infoMap =
+          this.gpgContext.getStateStoreFacade().getSubClusters(false, true);
+
+      // Iterate over each sub cluster and check last heartbeat
+      for (Map.Entry<SubClusterId, SubClusterInfo> entry : infoMap.entrySet()) {
+        SubClusterInfo subClusterInfo = entry.getValue();
+
+        Date lastHeartBeat = new Date(subClusterInfo.getLastHeartBeat());
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Checking subcluster {} in state {}, last heartbeat at {}",
+              subClusterInfo.getSubClusterId(), subClusterInfo.getState(),
+              lastHeartBeat);
+        }
+
+        if (!subClusterInfo.getState().isUnusable()) {
+          long timeUntilDeregister = this.heartbeatExpirationMillis
+              - (now.getTime() - lastHeartBeat.getTime());
+          // Deregister sub-cluster as SC_LOST if last heartbeat too old
+          if (timeUntilDeregister < 0) {
+            LOG.warn(
+                "Deregistering subcluster {} in state {} last heartbeat at {}",
+                subClusterInfo.getSubClusterId(), subClusterInfo.getState(),
+                new Date(subClusterInfo.getLastHeartBeat()));
+            try {
+              this.gpgContext.getStateStoreFacade().deregisterSubCluster(
+                  subClusterInfo.getSubClusterId(), SubClusterState.SC_LOST);
+            } catch (Exception e) {
+              LOG.error("deregisterSubCluster failed on subcluster "
+                  + subClusterInfo.getSubClusterId(), e);
+            }
+          } else if (LOG.isDebugEnabled()) {
+            LOG.debug("Time until deregister for subcluster {}: {}",
+                entry.getKey(),
+                DurationFormatUtils.formatDurationISO(timeUntilDeregister));
+          }
+        }
+      }
+    } catch (Throwable e) {
+      LOG.error("Subcluster cleaner fails: ", e);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46a4a945/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/package-info.java
new file mode 100644
index 0000000..f65444a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/package-info.java
@@ -0,0 +1,19 @@
+/**
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator.subclustercleaner;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46a4a945/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/TestSubClusterCleaner.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/TestSubClusterCleaner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/TestSubClusterCleaner.java
new file mode 100644
index 0000000..19b8802
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/TestSubClusterCleaner.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator.subclustercleaner;
+
+import java.util.ArrayList;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState;
+import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGContext;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGContextImpl;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Unit test for Sub-cluster Cleaner in GPG.
+ */
+public class TestSubClusterCleaner {
+
+  private Configuration conf;
+  private MemoryFederationStateStore stateStore;
+  private FederationStateStoreFacade facade;
+  private SubClusterCleaner cleaner;
+  private GPGContext gpgContext;
+
+  private ArrayList<SubClusterId> subClusterIds;
+
+  @Before
+  public void setup() throws YarnException {
+    conf = new YarnConfiguration();
+
+    // subcluster expires in one second
+    conf.setLong(YarnConfiguration.GPG_SUBCLUSTER_EXPIRATION_MS, 1000);
+
+    stateStore = new MemoryFederationStateStore();
+    stateStore.init(conf);
+
+    facade = FederationStateStoreFacade.getInstance();
+    facade.reinitialize(stateStore, conf);
+
+    gpgContext = new GPGContextImpl();
+    gpgContext.setStateStoreFacade(facade);
+
+    cleaner = new SubClusterCleaner(conf, gpgContext);
+
+    // Create and register six sub clusters
+    subClusterIds = new ArrayList<SubClusterId>();
+    for (int i = 0; i < 3; i++) {
+      // Create sub cluster id and info
+      SubClusterId subClusterId =
+          SubClusterId.newInstance("SUBCLUSTER-" + Integer.toString(i));
+
+      SubClusterInfo subClusterInfo = SubClusterInfo.newInstance(subClusterId,
+          "1.2.3.4:1", "1.2.3.4:2", "1.2.3.4:3", "1.2.3.4:4",
+          SubClusterState.SC_RUNNING, System.currentTimeMillis(), "");
+      // Register the sub cluster
+      stateStore.registerSubCluster(
+          SubClusterRegisterRequest.newInstance(subClusterInfo));
+      // Append the id to a local list
+      subClusterIds.add(subClusterId);
+    }
+  }
+
+  @After
+  public void breakDown() throws Exception {
+    stateStore.close();
+  }
+
+  @Test
+  public void testSubClusterRegisterHeartBeatTime() throws YarnException {
+    cleaner.run();
+    Assert.assertEquals(3, facade.getSubClusters(true, true).size());
+  }
+
+  /**
+   * Test the base use case.
+   */
+  @Test
+  public void testSubClusterHeartBeat() throws YarnException {
+    // The first subcluster reports as Unhealthy
+    SubClusterId subClusterId = subClusterIds.get(0);
+    stateStore.subClusterHeartbeat(SubClusterHeartbeatRequest
+        .newInstance(subClusterId, SubClusterState.SC_UNHEALTHY, "capacity"));
+
+    // The second subcluster didn't heartbeat for two seconds, should mark lost
+    subClusterId = subClusterIds.get(1);
+    stateStore.setSubClusterLastHeartbeat(subClusterId,
+        System.currentTimeMillis() - 2000);
+
+    cleaner.run();
+    Assert.assertEquals(1, facade.getSubClusters(true, true).size());
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: YARN-4599. Set OOM control for memory cgroups. (Miklos Szegedi via Haibo Chen)

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java
new file mode 100644
index 0000000..118d172
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java
@@ -0,0 +1,319 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.junit.Test;
+
+import java.io.File;
+import java.nio.charset.Charset;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test for elastic non-strict memory controller based on cgroups.
+ */
+public class TestCGroupElasticMemoryController {
+  private YarnConfiguration conf = new YarnConfiguration();
+  private File script = new File("target/" +
+      TestCGroupElasticMemoryController.class.getName());
+
+  /**
+   * Test that at least one memory type is requested.
+   * @throws YarnException on exception
+   */
+  @Test(expected = YarnException.class)
+  public void testConstructorOff()
+      throws YarnException {
+    CGroupElasticMemoryController controller =
+        new CGroupElasticMemoryController(
+            conf,
+            null,
+            null,
+            false,
+            false,
+            10000
+        );
+  }
+
+  /**
+   * Test that the OOM logic is pluggable.
+   * @throws YarnException on exception
+   */
+  @Test
+  public void testConstructorHandler()
+      throws YarnException {
+    conf.setClass(YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_HANDLER,
+        DummyRunnableWithContext.class, Runnable.class);
+    CGroupsHandler handler = mock(CGroupsHandler.class);
+    when(handler.getPathForCGroup(any(), any())).thenReturn("");
+    CGroupElasticMemoryController controller =
+        new CGroupElasticMemoryController(
+            conf,
+            null,
+            handler,
+            true,
+            false,
+            10000
+        );
+  }
+
+  /**
+   * Test that the handler is notified about multiple OOM events.
+   * @throws Exception on exception
+   */
+  @Test
+  public void testMultipleOOMEvents() throws Exception {
+    conf.set(YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_LISTENER_PATH,
+        script.getAbsolutePath());
+    try {
+      FileUtils.writeStringToFile(script,
+          "#!/bin/bash\nprintf oomevent;printf oomevent;\n",
+          Charset.defaultCharset(), false);
+      assertTrue("Could not set executable",
+          script.setExecutable(true));
+
+      CGroupsHandler cgroups = mock(CGroupsHandler.class);
+      when(cgroups.getPathForCGroup(any(), any())).thenReturn("");
+      when(cgroups.getCGroupParam(any(), any(), any()))
+          .thenReturn("under_oom 0");
+
+      Runnable handler = mock(Runnable.class);
+      doNothing().when(handler).run();
+
+      CGroupElasticMemoryController controller =
+          new CGroupElasticMemoryController(
+              conf,
+              null,
+              cgroups,
+              true,
+              false,
+              10000,
+              handler
+          );
+      controller.run();
+      verify(handler, times(2)).run();
+    } finally {
+      assertTrue(String.format("Could not clean up script %s",
+          script.getAbsolutePath()), script.delete());
+    }
+  }
+
+  /**
+   * Test the scenario that the controller is stopped before.
+   * the child process starts
+   * @throws Exception one exception
+   */
+  @Test
+  public void testStopBeforeStart() throws Exception {
+    conf.set(YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_LISTENER_PATH,
+        script.getAbsolutePath());
+    try {
+      FileUtils.writeStringToFile(script,
+          "#!/bin/bash\nprintf oomevent;printf oomevent;\n",
+          Charset.defaultCharset(), false);
+      assertTrue("Could not set executable",
+          script.setExecutable(true));
+
+      CGroupsHandler cgroups = mock(CGroupsHandler.class);
+      when(cgroups.getPathForCGroup(any(), any())).thenReturn("");
+      when(cgroups.getCGroupParam(any(), any(), any()))
+          .thenReturn("under_oom 0");
+
+      Runnable handler = mock(Runnable.class);
+      doNothing().when(handler).run();
+
+      CGroupElasticMemoryController controller =
+          new CGroupElasticMemoryController(
+              conf,
+              null,
+              cgroups,
+              true,
+              false,
+              10000,
+              handler
+          );
+      controller.stopListening();
+      controller.run();
+      verify(handler, times(0)).run();
+    } finally {
+      assertTrue(String.format("Could not clean up script %s",
+          script.getAbsolutePath()), script.delete());
+    }
+  }
+
+  /**
+   * Test the edge case that OOM is never resolved.
+   * @throws Exception on exception
+   */
+  @Test(expected = YarnRuntimeException.class)
+  public void testInfiniteOOM() throws Exception {
+    conf.set(YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_LISTENER_PATH,
+        script.getAbsolutePath());
+    Runnable handler = mock(Runnable.class);
+    try {
+      FileUtils.writeStringToFile(script,
+          "#!/bin/bash\nprintf oomevent;sleep 1000;\n",
+          Charset.defaultCharset(), false);
+      assertTrue("Could not set executable",
+          script.setExecutable(true));
+
+      CGroupsHandler cgroups = mock(CGroupsHandler.class);
+      when(cgroups.getPathForCGroup(any(), any())).thenReturn("");
+      when(cgroups.getCGroupParam(any(), any(), any()))
+          .thenReturn("under_oom 1");
+
+      doNothing().when(handler).run();
+
+      CGroupElasticMemoryController controller =
+          new CGroupElasticMemoryController(
+              conf,
+              null,
+              cgroups,
+              true,
+              false,
+              10000,
+              handler
+          );
+      controller.run();
+    } finally {
+      verify(handler, times(1)).run();
+      assertTrue(String.format("Could not clean up script %s",
+          script.getAbsolutePath()), script.delete());
+    }
+  }
+
+  /**
+   * Test the edge case that OOM cannot be resolved due to the lack of
+   * containers.
+   * @throws Exception on exception
+   */
+  @Test(expected = YarnRuntimeException.class)
+  public void testNothingToKill() throws Exception {
+    conf.set(YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_LISTENER_PATH,
+        script.getAbsolutePath());
+    Runnable handler = mock(Runnable.class);
+    try {
+      FileUtils.writeStringToFile(script,
+          "#!/bin/bash\nprintf oomevent;sleep 1000;\n",
+          Charset.defaultCharset(), false);
+      assertTrue("Could not set executable",
+          script.setExecutable(true));
+
+      CGroupsHandler cgroups = mock(CGroupsHandler.class);
+      when(cgroups.getPathForCGroup(any(), any())).thenReturn("");
+      when(cgroups.getCGroupParam(any(), any(), any()))
+          .thenReturn("under_oom 1");
+
+      doThrow(new YarnRuntimeException("Expected")).when(handler).run();
+
+      CGroupElasticMemoryController controller =
+          new CGroupElasticMemoryController(
+              conf,
+              null,
+              cgroups,
+              true,
+              false,
+              10000,
+              handler
+          );
+      controller.run();
+    } finally {
+      verify(handler, times(1)).run();
+      assertTrue(String.format("Could not clean up script %s",
+          script.getAbsolutePath()), script.delete());
+    }
+  }
+
+  /**
+   * Test that node manager can exit listening.
+   * This is done by running a long running listener for 10 seconds.
+   * Then we wait for 2 seconds and stop listening.
+   * @throws Exception exception occurred
+   */
+  @Test
+  public void testNormalExit() throws Exception {
+    conf.set(YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_LISTENER_PATH,
+        script.getAbsolutePath());
+    try {
+      FileUtils.writeStringToFile(script,
+          "#!/bin/bash\nsleep 10000;\n",
+          Charset.defaultCharset(), false);
+      assertTrue("Could not set executable",
+          script.setExecutable(true));
+
+      CGroupsHandler cgroups = mock(CGroupsHandler.class);
+      when(cgroups.getPathForCGroup(any(), any())).thenReturn("");
+      when(cgroups.getCGroupParam(any(), any(), any()))
+          .thenReturn("under_oom 0");
+
+      Runnable handler = mock(Runnable.class);
+      doNothing().when(handler).run();
+
+      CGroupElasticMemoryController controller =
+          new CGroupElasticMemoryController(
+              conf,
+              null,
+              cgroups,
+              true,
+              false,
+              10000,
+              handler
+          );
+      ExecutorService service = Executors.newFixedThreadPool(1);
+      service.submit(() -> {
+        try {
+          Thread.sleep(2000);
+        } catch (InterruptedException ex) {
+          assertTrue("Wait interrupted.", false);
+        }
+        controller.stopListening();
+      });
+      controller.run();
+    } finally {
+      assertTrue(String.format("Could not clean up script %s",
+          script.getAbsolutePath()), script.delete());
+    }
+  }
+
+  /**
+   * Test that DefaultOOMHandler is instantiated correctly in
+   * the elastic constructor.
+   * @throws YarnException Could not set up elastic memory control.
+   */
+  @Test
+  public void testDefaultConstructor() throws YarnException{
+    CGroupsHandler handler = mock(CGroupsHandler.class);
+    when(handler.getPathForCGroup(any(), any())).thenReturn("");
+    CGroupElasticMemoryController controller =
+        new CGroupElasticMemoryController(
+            conf, null, handler, true, false, 10);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java
index 416b4fd..5c7e233 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java
@@ -65,17 +65,15 @@ public class TestCGroupsMemoryResourceHandlerImpl {
     conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, true);
     try {
       cGroupsMemoryResourceHandler.bootstrap(conf);
-      Assert.fail("Pmem check should not be allowed to run with cgroups");
     } catch(ResourceHandlerException re) {
-      // do nothing
+      Assert.fail("Pmem check should be allowed to run with cgroups");
     }
     conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
     conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, true);
     try {
       cGroupsMemoryResourceHandler.bootstrap(conf);
-      Assert.fail("Vmem check should not be allowed to run with cgroups");
     } catch(ResourceHandlerException re) {
-      // do nothing
+      Assert.fail("Vmem check should be allowed to run with cgroups");
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestDefaultOOMHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestDefaultOOMHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestDefaultOOMHandler.java
new file mode 100644
index 0000000..60c38fe
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestDefaultOOMHandler.java
@@ -0,0 +1,307 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.LinkedHashMap;
+import java.util.concurrent.ConcurrentHashMap;
+
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_FILE_TASKS;
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES;
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_OOM_CONTROL;
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_USAGE_BYTES;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test default out of memory handler.
+ */
+public class TestDefaultOOMHandler {
+
+  /**
+   * Test an OOM situation where no containers are running.
+   */
+  @Test(expected = YarnRuntimeException.class)
+  public void testNoContainers() throws Exception {
+    Context context = mock(Context.class);
+
+    when(context.getContainers()).thenReturn(new ConcurrentHashMap<>());
+
+    CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class);
+    when(cGroupsHandler.getCGroupParam(
+        CGroupsHandler.CGroupController.MEMORY,
+        "",
+        CGROUP_PARAM_MEMORY_OOM_CONTROL))
+        .thenReturn("under_oom 1").thenReturn("under_oom 0");
+
+    DefaultOOMHandler handler = new DefaultOOMHandler(context, false);
+    handler.setCGroupsHandler(cGroupsHandler);
+
+    handler.run();
+  }
+
+  /**
+   * We have two containers, both out of limit. We should kill the later one.
+   *
+   * @throws Exception exception
+   */
+  @Test
+  public void testBothContainersOOM() throws Exception {
+    ConcurrentHashMap<ContainerId, Container> containers =
+        new ConcurrentHashMap<>(new LinkedHashMap<>());
+
+    Container c1 = mock(Container.class);
+    ContainerId cid1 = createContainerId(1);
+    when(c1.getContainerId()).thenReturn(cid1);
+    when(c1.getResource()).thenReturn(Resource.newInstance(10, 1));
+    when(c1.getContainerStartTime()).thenReturn((long) 1);
+    containers.put(createContainerId(1), c1);
+
+    Container c2 = mock(Container.class);
+    ContainerId cid2 = createContainerId(2);
+    when(c2.getContainerId()).thenReturn(cid2);
+    when(c2.getResource()).thenReturn(Resource.newInstance(10, 1));
+    when(c2.getContainerStartTime()).thenReturn((long) 2);
+    containers.put(cid2, c2);
+
+    CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class);
+    when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
+        cid1.toString(), CGROUP_FILE_TASKS))
+        .thenReturn("1234").thenReturn("");
+    when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
+        cid1.toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES))
+        .thenReturn(getMB(11));
+    when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
+        cid1.toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES))
+        .thenReturn(getMB(11));
+    when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
+        cid2.toString(), CGROUP_FILE_TASKS))
+        .thenReturn("1235").thenReturn("");
+    when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
+        cid2.toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES))
+        .thenReturn(getMB(11));
+    when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
+        cid2.toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES))
+        .thenReturn(getMB(11));
+
+    ContainerExecutor ex = mock(ContainerExecutor.class);
+
+    runOOMHandler(containers, cGroupsHandler, ex);
+
+    verify(ex, times(1)).signalContainer(
+        new ContainerSignalContext.Builder()
+            .setPid("1235")
+            .setContainer(c2)
+            .setSignal(ContainerExecutor.Signal.KILL)
+            .build()
+    );
+    verify(ex, times(1)).signalContainer(any());
+  }
+
+  /**
+   * We have two containers, one out of limit. We should kill that one.
+   * This should happen even, if it was started earlier
+   *
+   * @throws Exception exception
+   */
+  @Test
+  public void testOneContainerOOM() throws Exception {
+    ConcurrentHashMap<ContainerId, Container> containers =
+        new ConcurrentHashMap<>(new LinkedHashMap<>());
+
+    Container c1 = mock(Container.class);
+    ContainerId cid1 = createContainerId(1);
+    when(c1.getContainerId()).thenReturn(cid1);
+    when(c1.getResource()).thenReturn(Resource.newInstance(10, 1));
+    when(c1.getContainerStartTime()).thenReturn((long) 2);
+    containers.put(createContainerId(1), c1);
+
+    Container c2 = mock(Container.class);
+    ContainerId cid2 = createContainerId(2);
+    when(c2.getContainerId()).thenReturn(cid2);
+    when(c2.getResource()).thenReturn(Resource.newInstance(10, 1));
+    when(c2.getContainerStartTime()).thenReturn((long) 1);
+    containers.put(cid2, c2);
+
+    CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class);
+    when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
+        cid1.toString(), CGROUP_FILE_TASKS))
+        .thenReturn("1234").thenReturn("");
+    when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
+        cid1.toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES))
+        .thenReturn(getMB(9));
+    when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
+        cid1.toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES))
+        .thenReturn(getMB(9));
+    when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
+        cid2.toString(), CGROUP_FILE_TASKS))
+        .thenReturn("1235").thenReturn("");
+    when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
+        cid2.toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES))
+        .thenReturn(getMB(11));
+    when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
+        cid2.toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES))
+        .thenReturn(getMB(11));
+
+    ContainerExecutor ex = mock(ContainerExecutor.class);
+    runOOMHandler(containers, cGroupsHandler, ex);
+
+    verify(ex, times(1)).signalContainer(
+        new ContainerSignalContext.Builder()
+            .setPid("1235")
+            .setContainer(c2)
+            .setSignal(ContainerExecutor.Signal.KILL)
+            .build()
+    );
+    verify(ex, times(1)).signalContainer(any());
+  }
+
+  /**
+   * We have two containers, neither out of limit. We should kill the later one.
+   *
+   * @throws Exception exception
+   */
+  @Test
+  public void testNoContainerOOM() throws Exception {
+    ConcurrentHashMap<ContainerId, Container> containers =
+        new ConcurrentHashMap<>(new LinkedHashMap<>());
+
+    Container c1 = mock(Container.class);
+    ContainerId cid1 = createContainerId(1);
+    when(c1.getContainerId()).thenReturn(cid1);
+    when(c1.getResource()).thenReturn(Resource.newInstance(10, 1));
+    when(c1.getContainerStartTime()).thenReturn((long) 1);
+    containers.put(createContainerId(1), c1);
+
+    Container c2 = mock(Container.class);
+    ContainerId cid2 = createContainerId(2);
+    when(c2.getContainerId()).thenReturn(cid2);
+    when(c2.getResource()).thenReturn(Resource.newInstance(10, 1));
+    when(c2.getContainerStartTime()).thenReturn((long) 2);
+    containers.put(cid2, c2);
+
+    CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class);
+    when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
+        cid1.toString(), CGROUP_FILE_TASKS))
+        .thenReturn("1234").thenReturn("");
+    when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
+        cid1.toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES))
+        .thenReturn(getMB(9));
+    when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
+        cid1.toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES))
+        .thenReturn(getMB(9));
+    when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
+        cid2.toString(), CGROUP_FILE_TASKS))
+        .thenReturn("1235").thenReturn("");
+    when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
+        cid2.toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES))
+        .thenReturn(getMB(9));
+    when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
+        cid2.toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES))
+        .thenReturn(getMB(9));
+
+    ContainerExecutor ex = mock(ContainerExecutor.class);
+    runOOMHandler(containers, cGroupsHandler, ex);
+
+    verify(ex, times(1)).signalContainer(
+        new ContainerSignalContext.Builder()
+            .setPid("1235")
+            .setContainer(c2)
+            .setSignal(ContainerExecutor.Signal.KILL)
+            .build()
+    );
+    verify(ex, times(1)).signalContainer(any());
+  }
+
+  private void runOOMHandler(
+      ConcurrentHashMap<ContainerId, Container> containers,
+      CGroupsHandler cGroupsHandler, ContainerExecutor ex)
+      throws IOException, ResourceHandlerException {
+    Context context = mock(Context.class);
+    when(context.getContainers()).thenReturn(containers);
+
+    when(ex.signalContainer(any()))
+        .thenAnswer(invocation -> {
+          assertEquals("Wrong pid killed", "1235",
+              ((ContainerSignalContext) invocation.getArguments()[0]).getPid());
+          return true;
+        });
+
+    when(cGroupsHandler.getCGroupParam(
+        CGroupsHandler.CGroupController.MEMORY,
+        "",
+        CGROUP_PARAM_MEMORY_OOM_CONTROL))
+        .thenReturn("under_oom 1").thenReturn("under_oom 0");
+
+    when(context.getContainerExecutor()).thenReturn(ex);
+
+    DefaultOOMHandler handler = new DefaultOOMHandler(context, false);
+    handler.setCGroupsHandler(cGroupsHandler);
+
+    handler.run();
+  }
+
+  private class AppId extends ApplicationIdPBImpl {
+    AppId(long clusterTs, int appId) {
+      this.setClusterTimestamp(clusterTs);
+      this.setId(appId);
+    }
+  }
+
+  private ContainerId createContainerId(int id) {
+    ApplicationId applicationId = new AppId(1, 1);
+
+    ApplicationAttemptId applicationAttemptId
+        = mock(ApplicationAttemptId.class);
+    when(applicationAttemptId.getApplicationId()).thenReturn(applicationId);
+    when(applicationAttemptId.getAttemptId()).thenReturn(1);
+
+    ContainerId containerId = mock(ContainerId.class);
+    when(containerId.toString()).thenReturn(Integer.toString(id));
+    when(containerId.getContainerId()).thenReturn(new Long(1));
+
+    return containerId;
+  }
+
+  ContainerTokenIdentifier getToken() {
+    ContainerTokenIdentifier id = mock(ContainerTokenIdentifier.class);
+    when(id.getVersion()).thenReturn(1);
+    return id;
+  }
+
+  String getMB(long mb) {
+    return Long.toString(mb * 1024 * 1024);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
index 412b8cd..2882b32 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
@@ -94,6 +94,7 @@ public class TestContainersMonitor extends BaseContainerManagerTest {
         YarnConfiguration.NM_MON_RESOURCE_CALCULATOR,
         LinuxResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class);
     conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, true);
+    conf.setBoolean(YarnConfiguration.NM_MEMORY_RESOURCE_ENFORCED, false);
     super.setup();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java
index c5fdccd..8aee532 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java
@@ -174,9 +174,10 @@ public class TestContainersMonitorResourceChange {
   }
 
   @Test
-  public void testContainersResourceChange() throws Exception {
+  public void testContainersResourceChangePolling() throws Exception {
     // set container monitor interval to be 20ms
     conf.setLong(YarnConfiguration.NM_CONTAINER_MON_INTERVAL_MS, 20L);
+    conf.setBoolean(YarnConfiguration.NM_MEMORY_RESOURCE_ENFORCED, false);
     containersMonitor = createContainersMonitor(executor, dispatcher, context);
     containersMonitor.init(conf);
     containersMonitor.start();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCGroupsMemory.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCGroupsMemory.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCGroupsMemory.md
new file mode 100644
index 0000000..ec93234
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCGroupsMemory.md
@@ -0,0 +1,133 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+Using Memory Control in YARN
+=======================
+
+YARN has multiple features to enforce container memory limits. There are three types of controls in YARN that can be used.
+1. The polling feature monitors periodically measures container memory usage and kills the containers that exceed their limits. This is a legacy feature with some issues notably a delay that may lead to node shutdown.
+2. Strict memory control kills each container that has exceeded its limits. It is using the OOM killer capability of the cgroups Linux kernel feature.
+3. Elastic memory control is also based on cgroups. It allows bursting and starts killing containers only, if the overall system memory usage reaches a limit.
+
+If you use 2. or 3. feature 1. is disabled.
+
+Strict Memory Feature
+---------------------
+
+cgroups can be used to preempt containers in case of out-of-memory events. This feature leverages cgroups to clean up containers with the kernel when this happens. If your container exited with exit code `137`, then ou can verify the cause in `/var/log/messages`.
+
+Elastic Memory Feature
+----------------------
+
+The cgroups kernel feature has the ability to notify the node manager, if the parent cgroup of all containers specified by `yarn.nodemanager.linux-container-executor.cgroups.hierarchy` goes over a memory limit. The YARN feature that uses this ability is called elastic memory control. The benefits are that containers can burst using more memory than they are reserved to. This is allowed as long as we do not exceed the overall memory limit. When the limit is reached the kernel freezes all the containers and notifies the node manager. The node manager chooses a container and preempts it. It continues this step until the node is resumed from the OOM condition.
+
+The Limit for Elastic Memory Control
+---------
+
+The limit is the amount of memory allocated to all the containers on the node. The limit is specified by `yarn.nodemanager.resource.memory-mb` and `yarn.nodemanager.vmem-pmem-ratio`. If these are not set, the limit is set based on the available resources. See `yarn.nodemanager.resource.detect-hardware-capabilities` for details.
+
+The pluggable preemption logic
+------------------------------
+
+The preemption logic specifies which container to preempt in a node wide out-of-memory situation. The default logic is the `DefaultOOMHandler`. It picks the latest container that exceeded its memory limit. In the unlikely case that no such container is found, it preempts the container that was launched most recently. This continues until the OOM condition is resolved. This logic supports bursting, when containers use more memory than they reserved as long as we have memory available. This helps to improve the overall cluster utilization. The logic ensures that as long as a container is within its limit, it won't get preempted. If the container bursts it can be preempted. There is a case that all containers are within their limits but we are out of memory. This can also happen in case of oversubscription. We prefer preemting the latest containers to minimize the cost and value lost. Once preempted, the data in the container is lost.
+
+The default out-of-memory handler can be updated using `yarn.nodemanager.elastic-memory-control.oom-handler`. The class named in this configuration entry has to implement java.lang.Runnable. The `run()` function will be called in a node level out-of-memory situation. The constructor should accept an `NmContext` object.
+
+Physical and virtual memory control
+----------------------------------
+
+In case of Elastic Memory Control, the limit applies to the physical or virtual (rss+swap in cgroups) memory depending on whether `yarn.nodemanager.pmem-check-enabled` or `yarn.nodemanager.vmem-check-enabled` is set.
+
+There is no reason to set them both. If the system runs with swap disabled, both will have the same number. If swap is enabled the virtual memory counter will account for pages in physical memory and on the disk. This is what the application allocated and it has control over. The limit should be applied to the virtual memory in this case. When swapping is enabled, the physical memory is no more than the virtual memory and it is adjusted by the kernel not just by the container. There is no point preempting a container when it exceeds a physical memory limit with swapping. The system will just swap out some memory, when needed.
+
+Virtual memory measurement and swapping
+--------------------------------------------
+
+There is a difference between the virtual memory reported by the container monitor and the virtual memory limit specified in the elastic memory control feature. The container monitor uses `ProcfsBasedProcessTree` by default for measurements that returns values from the `proc` file system. The virtual memory returned is the size of the address space of all the processes in each container. This includes anonymous pages, pages swapped out to disk, mapped files and reserved pages among others. Reserved pages are not backed by either physical or swapped memory. They can be a large part of the virtual memory usage. The reservabe address space was limited on 32 bit processors but it is very large on 64-bit ones making this metric less useful. Some Java Virtual Machines reserve large amounts of pages but they do not actually use it. This will result in gigabytes of virtual memory usage shown. However, this does not mean that anything is wrong with the container.
+
+Because of this you can now use `CGroupsResourceCalculator`. This shows only the sum of the physical memory usage and swapped pages as virtual memory usage excluding the reserved address space. This reflects much better what the application and the container allocated.
+
+In order to enable cgroups based resource calculation set `yarn.nodemanager.resource-calculator.class` to `org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsResourceCalculator`.
+
+Configuration quickstart
+------------------------
+
+The following levels of memory enforcement are available and supported:
+
+Level | Configuration type | Options
+---|---|---
+0 | No memory control | All settings below are false
+1 | Strict Container Memory enforcement through polling | P or V
+2 | Strict Container Memory enforcement through cgroups | CG, C and (P or V)
+3 | Elastic Memory Control through cgroups | CG, E and (P or V)
+
+The symbols above mean that the respective configuration entries are `true`:
+
+P: `yarn.nodemanager.pmem-check-enabled`
+
+V: `yarn.nodemanager.vmem-check-enabled`
+
+C: `yarn.nodemanager.resource.memory.enforced`
+
+E: `yarn.nodemanager.elastic-memory-control.enabled`
+
+cgroups prerequisites
+---------------------
+
+CG: C and E require the following prerequisites:
+1. `yarn.nodemanager.container-executor.class` should be `org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor`.
+2. `yarn.nodemanager.runtime.linux.allowed-runtimes` should at least be `default`.
+3. `yarn.nodemanager.resource.memory.enabled` should be `true`
+
+Configuring no memory control
+-----------------------------
+
+`yarn.nodemanager.pmem-check-enabled` and `yarn.nodemanager.vmem-check-enabled` should be `false`.
+
+`yarn.nodemanager.resource.memory.enforced` should be `false`.
+
+`yarn.nodemanager.elastic-memory-control.enabled` should be `false`.
+
+Configuring strict container memory enforcement with polling without cgroups
+----------------------------------------------------------------
+
+`yarn.nodemanager.pmem-check-enabled` or `yarn.nodemanager.vmem-check-enabled` should be `true`.
+
+`yarn.nodemanager.resource.memory.enforced` should be `false`.
+
+`yarn.nodemanager.elastic-memory-control.enabled` should be `false`.
+
+Configuring strict container memory enforcement with cgroups
+------------------------------------------------------------
+
+Strict memory control preempts containers right away using the OOM killer feature of the kernel, when they reach their physical or virtual memory limits. You need to set the following options on top of the prerequisites above to use strict memory control.
+
+Configure the cgroups prerequisites mentioned above.
+
+`yarn.nodemanager.pmem-check-enabled` or `yarn.nodemanager.vmem-check-enabled` should be `true`. You can set them both. **Currently this is ignored by the code, only physical limits can be selected.**
+
+`yarn.nodemanager.resource.memory.enforced` should be true
+
+Configuring elastic memory resource control
+------------------------------------------
+
+The cgroups based elastic memory control preempts containers only if the overall system memory usage reaches its limit allowing bursting. This feature requires setting the following options on top of the prerequisites.
+
+Configure the cgroups prerequisites mentioned above.
+
+`yarn.nodemanager.elastic-memory-control.enabled` should be `true`.
+
+`yarn.nodemanager.resource.memory.enforced` should be `false`
+
+`yarn.nodemanager.pmem-check-enabled` or `yarn.nodemanager.vmem-check-enabled` should be `true`. If swapping is turned off the former should be set, the latter should be set otherwise.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: HDFS-13611. Unsafe use of Text as a ConcurrentHashMap key in PBHelperClient.

Posted by bo...@apache.org.
HDFS-13611. Unsafe use of Text as a ConcurrentHashMap key in PBHelperClient.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9b63deb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9b63deb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9b63deb

Branch: refs/heads/YARN-7402
Commit: c9b63deb533274ca8ef4939f6cd13f728a067f7b
Parents: 1388de1
Author: Andrew Wang <wa...@apache.org>
Authored: Thu May 24 09:56:23 2018 -0700
Committer: Andrew Wang <wa...@apache.org>
Committed: Thu May 24 09:56:23 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java     | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9b63deb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 579ac43..490ccb4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -247,7 +247,7 @@ public class PBHelperClient {
     ByteString value = fixedByteStringCache.get(key);
     if (value == null) {
       value = ByteString.copyFromUtf8(key.toString());
-      fixedByteStringCache.put(key, value);
+      fixedByteStringCache.put(new Text(key.copyBytes()), value);
     }
     return value;
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: YARN-8292: Fix the dominant resource preemption cannot happen when some of the resource vector becomes negative. Contributed by Wangda Tan.

Posted by bo...@apache.org.
YARN-8292: Fix the dominant resource preemption cannot happen when some of the resource vector becomes negative. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d5509c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d5509c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d5509c6

Branch: refs/heads/YARN-7402
Commit: 8d5509c68156faaa6641f4e747fc9ff80adccf88
Parents: bddfe79
Author: Eric E Payne <er...@oath.com>
Authored: Fri May 25 16:06:09 2018 +0000
Committer: Eric E Payne <er...@oath.com>
Committed: Fri May 25 16:06:09 2018 +0000

----------------------------------------------------------------------
 .../resource/DefaultResourceCalculator.java     |  15 ++-
 .../resource/DominantResourceCalculator.java    |  39 ++++---
 .../yarn/util/resource/ResourceCalculator.java  |  13 ++-
 .../hadoop/yarn/util/resource/Resources.java    |   5 -
 .../AbstractPreemptableResourceCalculator.java  |  58 ++++++++---
 .../CapacitySchedulerPreemptionUtils.java       |  61 +++++++++--
 .../capacity/FifoCandidatesSelector.java        |   8 +-
 .../FifoIntraQueuePreemptionPlugin.java         |   4 +-
 .../capacity/IntraQueueCandidatesSelector.java  |   2 +-
 .../capacity/PreemptableResourceCalculator.java |   6 +-
 .../monitor/capacity/TempQueuePerPartition.java |   8 +-
 ...alCapacityPreemptionPolicyMockFramework.java |  30 ++++++
 .../TestPreemptionForQueueWithPriorities.java   | 103 ++++++++++++-------
 ...pacityPreemptionPolicyInterQueueWithDRF.java |  60 ++++++++++-
 14 files changed, 312 insertions(+), 100 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index 6375c4a..ab6d7f5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -136,13 +136,18 @@ public class DefaultResourceCalculator extends ResourceCalculator {
   }
 
   @Override
-  public boolean isAnyMajorResourceZero(Resource resource) {
-    return resource.getMemorySize() == 0f;
-  }
-
-  @Override
   public Resource normalizeDown(Resource r, Resource stepFactor) {
     return Resources.createResource(
         roundDown((r.getMemorySize()), stepFactor.getMemorySize()));
   }
+
+  @Override
+  public boolean isAnyMajorResourceZeroOrNegative(Resource resource) {
+    return resource.getMemorySize() <= 0;
+  }
+
+  @Override
+  public boolean isAnyMajorResourceAboveZero(Resource resource) {
+    return resource.getMemorySize() > 0;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 6fed23b..2e85ebc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -577,19 +577,6 @@ public class DominantResourceCalculator extends ResourceCalculator {
   }
 
   @Override
-  public boolean isAnyMajorResourceZero(Resource resource) {
-    int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
-    for (int i = 0; i < maxLength; i++) {
-      ResourceInformation resourceInformation = resource
-          .getResourceInformation(i);
-      if (resourceInformation.getValue() == 0L) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  @Override
   public Resource normalizeDown(Resource r, Resource stepFactor) {
     Resource ret = Resource.newInstance(r);
     int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
@@ -613,4 +600,30 @@ public class DominantResourceCalculator extends ResourceCalculator {
     }
     return ret;
   }
+
+  @Override
+  public boolean isAnyMajorResourceZeroOrNegative(Resource resource) {
+    int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+    for (int i = 0; i < maxLength; i++) {
+      ResourceInformation resourceInformation = resource.getResourceInformation(
+          i);
+      if (resourceInformation.getValue() <= 0L) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  @Override
+  public boolean isAnyMajorResourceAboveZero(Resource resource) {
+    int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+    for (int i = 0; i < maxLength; i++) {
+      ResourceInformation resourceInformation = resource.getResourceInformation(
+          i);
+      if (resourceInformation.getValue() > 0) {
+        return true;
+      }
+    }
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
index 1c42126..51078cd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
@@ -239,12 +239,12 @@ public abstract class ResourceCalculator {
 
   /**
    * Check if resource has any major resource types (which are all NodeManagers
-   * included) a zero value.
+   * included) a zero value or negative value.
    *
    * @param resource resource
    * @return returns true if any resource is zero.
    */
-  public abstract boolean isAnyMajorResourceZero(Resource resource);
+  public abstract boolean isAnyMajorResourceZeroOrNegative(Resource resource);
 
   /**
    * Get resource <code>r</code>and normalize down using step-factor
@@ -257,4 +257,13 @@ public abstract class ResourceCalculator {
    * @return resulting normalized resource
    */
   public abstract Resource normalizeDown(Resource r, Resource stepFactor);
+
+  /**
+   * Check if resource has any major resource types (which are all NodeManagers
+   * included) has a >0 value.
+   *
+   * @param resource resource
+   * @return returns true if any resource is >0
+   */
+  public abstract boolean isAnyMajorResourceAboveZero(Resource resource);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index 1c08844..7826f51 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -547,11 +547,6 @@ public class Resources {
     return ret;
   }
 
-  public static boolean isAnyMajorResourceZero(ResourceCalculator rc,
-      Resource resource) {
-    return rc.isAnyMajorResourceZero(resource);
-  }
-
   public static Resource normalizeDown(ResourceCalculator calculator,
       Resource resource, Resource factor) {
     return calculator.normalizeDown(resource, factor);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
index 2589970..64b3615 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
@@ -18,12 +18,6 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.Iterator;
-import java.util.PriorityQueue;
-
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.policy.PriorityUtilizationQueueOrderingPolicy;
@@ -32,6 +26,12 @@ import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.PriorityQueue;
+
 /**
  * Calculate how much resources need to be preempted for each queue,
  * will be used by {@link PreemptionCandidatesSelector}.
@@ -40,7 +40,8 @@ public class AbstractPreemptableResourceCalculator {
 
   protected final CapacitySchedulerPreemptionContext context;
   protected final ResourceCalculator rc;
-  private boolean isReservedPreemptionCandidatesSelector;
+  protected boolean isReservedPreemptionCandidatesSelector;
+  private Resource stepFactor;
 
   static class TQComparator implements Comparator<TempQueuePerPartition> {
     private ResourceCalculator rc;
@@ -90,6 +91,11 @@ public class AbstractPreemptableResourceCalculator {
     rc = preemptionContext.getResourceCalculator();
     this.isReservedPreemptionCandidatesSelector =
         isReservedPreemptionCandidatesSelector;
+
+    stepFactor = Resource.newInstance(0, 0);
+    for (ResourceInformation ri : stepFactor.getResources()) {
+      ri.setValue(1);
+    }
   }
 
   /**
@@ -122,23 +128,24 @@ public class AbstractPreemptableResourceCalculator {
     TQComparator tqComparator = new TQComparator(rc, totGuarant);
     PriorityQueue<TempQueuePerPartition> orderedByNeed = new PriorityQueue<>(10,
         tqComparator);
-    for (Iterator<TempQueuePerPartition> i = qAlloc.iterator(); i.hasNext();) {
+    for (Iterator<TempQueuePerPartition> i = qAlloc.iterator(); i.hasNext(); ) {
       TempQueuePerPartition q = i.next();
       Resource used = q.getUsed();
 
       Resource initIdealAssigned;
       if (Resources.greaterThan(rc, totGuarant, used, q.getGuaranteed())) {
-        initIdealAssigned =
-            Resources.add(q.getGuaranteed(), q.untouchableExtra);
-      } else {
+        initIdealAssigned = Resources.add(
+            Resources.componentwiseMin(q.getGuaranteed(), q.getUsed()),
+            q.untouchableExtra);
+      } else{
         initIdealAssigned = Resources.clone(used);
       }
 
       // perform initial assignment
       initIdealAssignment(totGuarant, q, initIdealAssigned);
 
-
       Resources.subtractFrom(unassigned, q.idealAssigned);
+
       // If idealAssigned < (allocated + used + pending), q needs more
       // resources, so
       // add it to the list of underserved queues, ordered by need.
@@ -152,7 +159,6 @@ public class AbstractPreemptableResourceCalculator {
     // left
     while (!orderedByNeed.isEmpty() && Resources.greaterThan(rc, totGuarant,
         unassigned, Resources.none())) {
-      Resource wQassigned = Resource.newInstance(0, 0);
       // we compute normalizedGuarantees capacity based on currently active
       // queues
       resetCapacity(unassigned, orderedByNeed, ignoreGuarantee);
@@ -166,11 +172,26 @@ public class AbstractPreemptableResourceCalculator {
       Collection<TempQueuePerPartition> underserved = getMostUnderservedQueues(
           orderedByNeed, tqComparator);
 
+      // This value will be used in every round to calculate ideal allocation.
+      // So make a copy to avoid it changed during calculation.
+      Resource dupUnassignedForTheRound = Resources.clone(unassigned);
+
       for (Iterator<TempQueuePerPartition> i = underserved.iterator(); i
           .hasNext();) {
+        if (!rc.isAnyMajorResourceAboveZero(unassigned)) {
+          break;
+        }
+
         TempQueuePerPartition sub = i.next();
-        Resource wQavail = Resources.multiplyAndNormalizeUp(rc, unassigned,
-            sub.normalizedGuarantee, Resource.newInstance(1, 1));
+
+        // How much resource we offer to the queue (to increase its ideal_alloc
+        Resource wQavail = Resources.multiplyAndNormalizeUp(rc,
+            dupUnassignedForTheRound,
+            sub.normalizedGuarantee, this.stepFactor);
+
+        // Make sure it is not beyond unassigned
+        wQavail = Resources.componentwiseMin(wQavail, unassigned);
+
         Resource wQidle = sub.offer(wQavail, rc, totGuarant,
             isReservedPreemptionCandidatesSelector);
         Resource wQdone = Resources.subtract(wQavail, wQidle);
@@ -180,9 +201,12 @@ public class AbstractPreemptableResourceCalculator {
           // queue, recalculating its order based on need.
           orderedByNeed.add(sub);
         }
-        Resources.addTo(wQassigned, wQdone);
+
+        Resources.subtractFrom(unassigned, wQdone);
+
+        // Make sure unassigned is always larger than 0
+        unassigned = Resources.componentwiseMax(unassigned, Resources.none());
       }
-      Resources.subtractFrom(unassigned, wQassigned);
     }
 
     // Sometimes its possible that, all queues are properly served. So intra

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
index f097e9c..5396d61 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
@@ -132,6 +133,16 @@ public class CapacitySchedulerPreemptionUtils {
    *          map to hold preempted containers
    * @param totalPreemptionAllowed
    *          total preemption allowed per round
+   * @param conservativeDRF
+   *          should we do conservativeDRF preemption or not.
+   *          When true:
+   *            stop preempt container when any major resource type <= 0 for to-
+   *            preempt.
+   *            This is default preemption behavior of intra-queue preemption
+   *          When false:
+   *            stop preempt container when: all major resource type <= 0 for
+   *            to-preempt.
+   *            This is default preemption behavior of inter-queue preemption
    * @return should we preempt rmContainer. If we should, deduct from
    *         <code>resourceToObtainByPartition</code>
    */
@@ -140,7 +151,7 @@ public class CapacitySchedulerPreemptionUtils {
       Map<String, Resource> resourceToObtainByPartitions,
       RMContainer rmContainer, Resource clusterResource,
       Map<ApplicationAttemptId, Set<RMContainer>> preemptMap,
-      Resource totalPreemptionAllowed) {
+      Resource totalPreemptionAllowed, boolean conservativeDRF) {
     ApplicationAttemptId attemptId = rmContainer.getApplicationAttemptId();
 
     // We will not account resource of a container twice or more
@@ -152,13 +163,49 @@ public class CapacitySchedulerPreemptionUtils {
         rmContainer.getAllocatedNode());
     Resource toObtainByPartition = resourceToObtainByPartitions
         .get(nodePartition);
+    if (null == toObtainByPartition) {
+      return false;
+    }
+
+    // If a toObtain resource type == 0, set it to -1 to avoid 0 resource
+    // type affect following doPreemption check: isAnyMajorResourceZero
+    for (ResourceInformation ri : toObtainByPartition.getResources()) {
+      if (ri.getValue() == 0) {
+        ri.setValue(-1);
+      }
+    }
+
+    if (rc.isAnyMajorResourceAboveZero(toObtainByPartition) && Resources.fitsIn(
+        rc, rmContainer.getAllocatedResource(), totalPreemptionAllowed)) {
+      boolean doPreempt;
+
+      // How much resource left after preemption happen.
+      Resource toObtainAfterPreemption = Resources.subtract(toObtainByPartition,
+          rmContainer.getAllocatedResource());
+
+      if (conservativeDRF) {
+        doPreempt = !rc.isAnyMajorResourceZeroOrNegative(toObtainByPartition);
+      } else {
+        // When we want to do more aggressive preemption, we will do preemption
+        // only if:
+        // - The preempt of the container makes positive contribution to the
+        //   to-obtain resource. Positive contribution means any positive
+        //   resource type decreases.
+        //
+        //   This is example of positive contribution:
+        //     * before: <30, 10, 5>, after <20, 10, -10>
+        //   But this not positive contribution:
+        //     * before: <30, 10, 0>, after <30, 10, -15>
+        doPreempt = Resources.lessThan(rc, clusterResource,
+            Resources
+                .componentwiseMax(toObtainAfterPreemption, Resources.none()),
+            Resources.componentwiseMax(toObtainByPartition, Resources.none()));
+      }
+
+      if (!doPreempt) {
+        return false;
+      }
 
-    if (null != toObtainByPartition
-        && Resources.greaterThan(rc, clusterResource, toObtainByPartition,
-            Resources.none())
-        && Resources.fitsIn(rc, rmContainer.getAllocatedResource(),
-            totalPreemptionAllowed)
-        && !Resources.isAnyMajorResourceZero(rc, toObtainByPartition)) {
       Resources.subtractFrom(toObtainByPartition,
           rmContainer.getAllocatedResource());
       Resources.subtractFrom(totalPreemptionAllowed,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
index 748548a..3b2fcbb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
@@ -111,7 +111,7 @@ public class FifoCandidatesSelector
                   .tryPreemptContainerAndDeductResToObtain(rc,
                       preemptionContext, resToObtainByPartition, c,
                       clusterResource, selectedCandidates,
-                      totalPreemptionAllowed);
+                      totalPreemptionAllowed, false);
               if (!preempted) {
                 continue;
               }
@@ -187,7 +187,7 @@ public class FifoCandidatesSelector
       boolean preempted = CapacitySchedulerPreemptionUtils
           .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext,
               resToObtainByPartition, c, clusterResource, preemptMap,
-              totalPreemptionAllowed);
+              totalPreemptionAllowed, false);
       if (preempted) {
         Resources.subtractFrom(skippedAMSize, c.getAllocatedResource());
       }
@@ -221,7 +221,7 @@ public class FifoCandidatesSelector
       // Try to preempt this container
       CapacitySchedulerPreemptionUtils.tryPreemptContainerAndDeductResToObtain(
           rc, preemptionContext, resToObtainByPartition, c, clusterResource,
-          selectedContainers, totalPreemptionAllowed);
+          selectedContainers, totalPreemptionAllowed, false);
 
       if (!preemptionContext.isObserveOnly()) {
         preemptionContext.getRMContext().getDispatcher().getEventHandler()
@@ -264,7 +264,7 @@ public class FifoCandidatesSelector
       // Try to preempt this container
       CapacitySchedulerPreemptionUtils.tryPreemptContainerAndDeductResToObtain(
           rc, preemptionContext, resToObtainByPartition, c, clusterResource,
-          selectedContainers, totalPreemptionAllowed);
+          selectedContainers, totalPreemptionAllowed, false);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
index 1776bd4..40f333f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
@@ -278,8 +278,8 @@ public class FifoIntraQueuePreemptionPlugin
 
       // Once unallocated resource is 0, we can stop assigning ideal per app.
       if (Resources.lessThanOrEqual(rc, clusterResource,
-          queueReassignableResource, Resources.none())
-          || Resources.isAnyMajorResourceZero(rc, queueReassignableResource)) {
+          queueReassignableResource, Resources.none()) || rc
+          .isAnyMajorResourceZeroOrNegative(queueReassignableResource)) {
         continue;
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
index 5b6932e..a91fac7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
@@ -230,7 +230,7 @@ public class IntraQueueCandidatesSelector extends PreemptionCandidatesSelector {
       boolean ret = CapacitySchedulerPreemptionUtils
           .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext,
               resToObtainByPartition, c, clusterResource, selectedCandidates,
-              totalPreemptedResourceAllowed);
+              totalPreemptedResourceAllowed, true);
 
       // Subtract from respective user's resource usage once a container is
       // selected for preemption.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
index 676c14f..08d834e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
@@ -41,8 +41,6 @@ public class PreemptableResourceCalculator
   private static final Log LOG =
       LogFactory.getLog(PreemptableResourceCalculator.class);
 
-  private boolean isReservedPreemptionCandidatesSelector;
-
   /**
    * PreemptableResourceCalculator constructor
    *
@@ -95,8 +93,8 @@ public class PreemptableResourceCalculator
     }
 
     // first compute the allocation as a fixpoint based on guaranteed capacity
-    computeFixpointAllocation(tot_guarant, nonZeroGuarQueues, unassigned,
-        false);
+    computeFixpointAllocation(tot_guarant, new HashSet<>(nonZeroGuarQueues),
+        unassigned, false);
 
     // if any capacity is left unassigned, distributed among zero-guarantee
     // queues uniformly (i.e., not based on guaranteed capacity, as this is zero)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
index 9d8297d..4214acc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
@@ -151,7 +151,7 @@ public class TempQueuePerPartition extends AbstractPreemptionEntity {
     //               # This is for leaf queue only.
     //               max(guaranteed, used) - assigned}
     // remain = avail - accepted
-    Resource accepted = Resources.min(rc, clusterResource,
+    Resource accepted = Resources.componentwiseMin(
         absMaxCapIdealAssignedDelta,
         Resources.min(rc, clusterResource, avail, Resources
             /*
@@ -186,6 +186,12 @@ public class TempQueuePerPartition extends AbstractPreemptionEntity {
 
     accepted = acceptedByLocality(rc, accepted);
 
+    // accept should never be < 0
+    accepted = Resources.componentwiseMax(accepted, Resources.none());
+
+    // or more than offered
+    accepted = Resources.componentwiseMin(accepted, avail);
+
     Resource remain = Resources.subtract(avail, accepted);
     Resources.addTo(idealAssigned, accepted);
     return remain;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
index a8e2697..a972584 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
@@ -59,6 +60,7 @@ import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
+import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.mockito.ArgumentMatcher;
@@ -104,10 +106,32 @@ public class ProportionalCapacityPreemptionPolicyMockFramework {
   EventHandler<Event> mDisp = null;
   ProportionalCapacityPreemptionPolicy policy = null;
   Resource clusterResource = null;
+  // Initialize resource map
+  Map<String, ResourceInformation> riMap = new HashMap<>();
+
+  private void resetResourceInformationMap() {
+    // Initialize mandatory resources
+    ResourceInformation memory = ResourceInformation.newInstance(
+        ResourceInformation.MEMORY_MB.getName(),
+        ResourceInformation.MEMORY_MB.getUnits(),
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
+    ResourceInformation vcores = ResourceInformation.newInstance(
+        ResourceInformation.VCORES.getName(),
+        ResourceInformation.VCORES.getUnits(),
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
+    riMap.put(ResourceInformation.MEMORY_URI, memory);
+    riMap.put(ResourceInformation.VCORES_URI, vcores);
+
+    ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
+  }
 
   @SuppressWarnings("unchecked")
   @Before
   public void setup() {
+    resetResourceInformationMap();
+
     org.apache.log4j.Logger.getRootLogger().setLevel(
         org.apache.log4j.Level.DEBUG);
 
@@ -142,6 +166,12 @@ public class ProportionalCapacityPreemptionPolicyMockFramework {
     partitionToResource = new HashMap<>();
     nodeIdToSchedulerNodes = new HashMap<>();
     nameToCSQueues = new HashMap<>();
+    clusterResource = Resource.newInstance(0, 0);
+  }
+
+  @After
+  public void cleanup() {
+    resetResourceInformationMap();
   }
 
   public void buildEnv(String labelsConfig, String nodesConfig,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java
index e9a8116..6a953cf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java
@@ -20,44 +20,25 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
 import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
+import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
 
 import static org.mockito.Matchers.argThat;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 public class TestPreemptionForQueueWithPriorities
     extends ProportionalCapacityPreemptionPolicyMockFramework {
-  // Initialize resource map
-  private Map<String, ResourceInformation> riMap = new HashMap<>();
-
   @Before
   public void setup() {
-
-    // Initialize mandatory resources
-    ResourceInformation memory = ResourceInformation.newInstance(
-        ResourceInformation.MEMORY_MB.getName(),
-        ResourceInformation.MEMORY_MB.getUnits(),
-        YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
-        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
-    ResourceInformation vcores = ResourceInformation.newInstance(
-        ResourceInformation.VCORES.getName(),
-        ResourceInformation.VCORES.getUnits(),
-        YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
-        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
-    riMap.put(ResourceInformation.MEMORY_URI, memory);
-    riMap.put(ResourceInformation.VCORES_URI, vcores);
-
-    ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
-
+    rc = new DefaultResourceCalculator();
     super.setup();
     policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock);
   }
@@ -340,8 +321,8 @@ public class TestPreemptionForQueueWithPriorities
      *   - a2 (capacity=60), p=1
      * - b (capacity=30), p=1
      *   - b1 (capacity=50), p=1
-     *   - b1 (capacity=50), p=2
-     * - c (capacity=40), p=2
+     *   - b2 (capacity=50), p=2
+     * - c (capacity=40), p=1
      * </pre>
      */
     String labelsConfig = "=100,true"; // default partition
@@ -349,11 +330,11 @@ public class TestPreemptionForQueueWithPriorities
     String queuesConfig =
         // guaranteed,max,used,pending
         "root(=[100 100 100 100]);" + //root
-            "-a(=[30 100 40 50]){priority=1};" + // a
+            "-a(=[29 100 40 50]){priority=1};" + // a
             "--a1(=[12 100 20 50]){priority=1};" + // a1
-            "--a2(=[18 100 20 50]){priority=1};" + // a2
-            "-b(=[30 100 59 50]){priority=1};" + // b
-            "--b1(=[15 100 30 50]){priority=1};" + // b1
+            "--a2(=[17 100 20 50]){priority=1};" + // a2
+            "-b(=[31 100 59 50]){priority=1};" + // b
+            "--b1(=[16 100 30 50]){priority=1};" + // b1
             "--b2(=[15 100 29 50]){priority=2};" + // b2
             "-c(=[40 100 1 30]){priority=1}";   // c
     String appsConfig =
@@ -362,7 +343,7 @@ public class TestPreemptionForQueueWithPriorities
             "a2\t(1,1,n1,,20,false);" + // app2 in a2
             "b1\t(1,1,n1,,30,false);" + // app3 in b1
             "b2\t(1,1,n1,,29,false);" + // app4 in b2
-            "c\t(1,1,n1,,29,false)"; // app5 in c
+            "c\t(1,1,n1,,1,false)"; // app5 in c
 
 
     buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
@@ -370,16 +351,16 @@ public class TestPreemptionForQueueWithPriorities
 
     // Preemption should first divide capacities between a / b, and b2 should
     // get less preemption than b1 (because b2 has higher priority)
-    verify(mDisp, times(5)).handle(argThat(
+    verify(mDisp, times(6)).handle(argThat(
         new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
             getAppAttemptId(1))));
-    verify(mDisp, never()).handle(argThat(
+    verify(mDisp, times(1)).handle(argThat(
         new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
             getAppAttemptId(2))));
-    verify(mDisp, times(15)).handle(argThat(
+    verify(mDisp, times(13)).handle(argThat(
         new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
             getAppAttemptId(3))));
-    verify(mDisp, times(9)).handle(argThat(
+    verify(mDisp, times(10)).handle(argThat(
         new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
             getAppAttemptId(4))));
   }
@@ -426,7 +407,7 @@ public class TestPreemptionForQueueWithPriorities
 
     // Preemption should first divide capacities between a / b, and b1 should
     // get less preemption than b2 (because b1 has higher priority)
-    verify(mDisp, never()).handle(argThat(
+    verify(mDisp, times(3)).handle(argThat(
         new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
             getAppAttemptId(1))));
     verify(mDisp, never()).handle(argThat(
@@ -505,4 +486,56 @@ public class TestPreemptionForQueueWithPriorities
             getAppAttemptId(3))));
   }
 
+  @Test
+  public void test3ResourceTypesInterQueuePreemption() throws IOException {
+    rc = new DominantResourceCalculator();
+    when(cs.getResourceCalculator()).thenReturn(rc);
+
+    // Initialize resource map
+    String RESOURCE_1 = "res1";
+    riMap.put(RESOURCE_1, ResourceInformation.newInstance(RESOURCE_1, "", 0,
+        ResourceTypes.COUNTABLE, 0, Integer.MAX_VALUE));
+
+    ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
+
+    /**
+     * Queue structure is:
+     *
+     * <pre>
+     *              root
+     *           /  \  \
+     *          a    b  c
+     * </pre>
+     *  A / B / C have 33.3 / 33.3 / 33.4 resources
+     *  Total cluster resource have mem=30, cpu=18, GPU=6
+     *  A uses mem=6, cpu=3, GPU=3
+     *  B uses mem=6, cpu=3, GPU=3
+     *  C is asking mem=1,cpu=1,GPU=1
+     *
+     *  We expect it can preempt from one of the jobs
+     */
+    String labelsConfig =
+        "=30:18:6,true;";
+    String nodesConfig =
+        "n1= res=30:18:6;"; // n1 is default partition
+    String queuesConfig =
+        // guaranteed,max,used,pending
+        "root(=[30:18:6 30:18:6 12:12:6 1:1:1]){priority=1};" + //root
+            "-a(=[10:6:2 10:6:2 6:6:3 0:0:0]){priority=1};" + // a
+            "-b(=[10:6:2 10:6:2 6:6:3 0:0:0]){priority=1};" + // b
+            "-c(=[10:6:2 10:6:2 0:0:0 1:1:1]){priority=2}"; // c
+    String appsConfig=
+        //queueName\t(priority,resource,host,expression,#repeat,reserved)
+        "a\t" // app1 in a1
+            + "(1,2:2:1,n1,,3,false);" +
+            "b\t" // app2 in b2
+            + "(1,2:2:1,n1,,3,false)";
+
+    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+    policy.editSchedule();
+
+    verify(mDisp, times(1)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(1))));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
index c8a1f0f..14a3a9a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java
@@ -18,11 +18,16 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.junit.Before;
 import org.junit.Test;
 
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import java.io.IOException;
+
 import static org.mockito.Matchers.argThat;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.times;
@@ -41,8 +46,7 @@ public class TestProportionalCapacityPreemptionPolicyInterQueueWithDRF
   }
 
   @Test
-  public void testInterQueuePreemptionWithMultipleResource()
-      throws Exception {
+  public void testInterQueuePreemptionWithMultipleResource() throws Exception {
     /**
      * Queue structure is:
      *
@@ -121,4 +125,52 @@ public class TestProportionalCapacityPreemptionPolicyInterQueueWithDRF
         new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
             getAppAttemptId(1))));
   }
-}
+
+  @Test
+  public void test3ResourceTypesInterQueuePreemption() throws IOException {
+    // Initialize resource map
+    String RESOURCE_1 = "res1";
+    riMap.put(RESOURCE_1, ResourceInformation
+        .newInstance(RESOURCE_1, "", 0, ResourceTypes.COUNTABLE, 0,
+            Integer.MAX_VALUE));
+
+    ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
+
+    /*
+     *              root
+     *           /  \  \
+     *          a    b  c
+     *
+     *  A / B / C have 33.3 / 33.3 / 33.4 resources
+     *  Total cluster resource have mem=30, cpu=18, GPU=6
+     *  A uses mem=6, cpu=3, GPU=3
+     *  B uses mem=6, cpu=3, GPU=3
+     *  C is asking mem=1,cpu=1,GPU=1
+     *
+     *  We expect it can preempt from one of the jobs
+     */
+    String labelsConfig = "=30:18:6,true;";
+    String nodesConfig = "n1= res=30:18:6;"; // n1 is default partition
+    String queuesConfig =
+        // guaranteed,max,used,pending
+        "root(=[30:18:6 30:18:6 12:12:6 1:1:1]);" + //root
+            "-a(=[10:7:2 10:6:3 6:6:3 0:0:0]);" + // a
+            "-b(=[10:6:2 10:6:3 6:6:3 0:0:0]);" + // b
+            "-c(=[10:5:2 10:6:2 0:0:0 1:1:1])"; // c
+    String appsConfig =
+        //queueName\t(priority,resource,host,expression,#repeat,reserved)
+        "a\t" // app1 in a1
+            + "(1,2:2:1,n1,,3,false);" + "b\t" // app2 in b2
+            + "(1,2:2:1,n1,,3,false)";
+
+    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+    policy.editSchedule();
+
+    verify(mDisp, times(0)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(1))));
+    verify(mDisp, times(1)).handle(argThat(
+        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+            getAppAttemptId(2))));
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: YARN-7707. [GPG] Policy generator framework. Contributed by Young Chen

Posted by bo...@apache.org.
YARN-7707. [GPG] Policy generator framework. Contributed by Young Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5da8ca6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5da8ca6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5da8ca6

Branch: refs/heads/YARN-7402
Commit: f5da8ca6f04b7db40fccfd00cc4ff8ca1b2da74b
Parents: 46a4a94
Author: Botong Huang <bo...@apache.org>
Authored: Fri Mar 23 17:07:10 2018 -0700
Committer: Botong Huang <bo...@apache.org>
Committed: Tue May 29 10:48:40 2018 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |  36 +-
 .../src/main/resources/yarn-default.xml         |  40 +++
 .../utils/FederationStateStoreFacade.java       |  13 +
 .../pom.xml                                     |  18 +
 .../globalpolicygenerator/GPGContext.java       |   4 +
 .../globalpolicygenerator/GPGContextImpl.java   |  10 +
 .../globalpolicygenerator/GPGPolicyFacade.java  | 220 ++++++++++++
 .../server/globalpolicygenerator/GPGUtils.java  |  80 +++++
 .../GlobalPolicyGenerator.java                  |  17 +
 .../policygenerator/GlobalPolicy.java           |  76 +++++
 .../policygenerator/NoOpGlobalPolicy.java       |  36 ++
 .../policygenerator/PolicyGenerator.java        | 261 ++++++++++++++
 .../UniformWeightedLocalityGlobalPolicy.java    |  71 ++++
 .../policygenerator/package-info.java           |  24 ++
 .../TestGPGPolicyFacade.java                    | 202 +++++++++++
 .../policygenerator/TestPolicyGenerator.java    | 338 +++++++++++++++++++
 .../src/test/resources/schedulerInfo1.json      | 134 ++++++++
 .../src/test/resources/schedulerInfo2.json      | 196 +++++++++++
 18 files changed, 1775 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 7c78e0d..b224818 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3326,7 +3326,7 @@ public class YarnConfiguration extends Configuration {
   public static final boolean DEFAULT_ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED =
       false;
 
-  private static final String FEDERATION_GPG_PREFIX =
+  public static final String FEDERATION_GPG_PREFIX =
       FEDERATION_PREFIX + "gpg.";
 
   // The number of threads to use for the GPG scheduled executor service
@@ -3344,6 +3344,40 @@ public class YarnConfiguration extends Configuration {
       FEDERATION_GPG_PREFIX + "subcluster.heartbeat.expiration-ms";
   public static final long DEFAULT_GPG_SUBCLUSTER_EXPIRATION_MS = 1800000;
 
+  public static final String FEDERATION_GPG_POLICY_PREFIX =
+      FEDERATION_GPG_PREFIX + "policy.generator.";
+
+  /** The interval at which the policy generator runs, default is one hour. */
+  public static final String GPG_POLICY_GENERATOR_INTERVAL_MS =
+      FEDERATION_GPG_POLICY_PREFIX + "interval-ms";
+  public static final long DEFAULT_GPG_POLICY_GENERATOR_INTERVAL_MS = -1;
+
+  /**
+   * The configured policy generator class, runs NoOpGlobalPolicy by
+   * default.
+   */
+  public static final String GPG_GLOBAL_POLICY_CLASS =
+      FEDERATION_GPG_POLICY_PREFIX + "class";
+  public static final String DEFAULT_GPG_GLOBAL_POLICY_CLASS =
+      "org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator."
+          + "NoOpGlobalPolicy";
+
+  /**
+   * Whether or not the policy generator is running in read only (won't modify
+   * policies), default is false.
+   */
+  public static final String GPG_POLICY_GENERATOR_READONLY =
+      FEDERATION_GPG_POLICY_PREFIX + "readonly";
+  public static final boolean DEFAULT_GPG_POLICY_GENERATOR_READONLY =
+      false;
+
+  /**
+   * Which sub-clusters the policy generator should blacklist.
+   */
+  public static final String GPG_POLICY_GENERATOR_BLACKLIST =
+      FEDERATION_GPG_POLICY_PREFIX + "blacklist";
+
+
   ////////////////////////////////
   // Other Configs
   ////////////////////////////////

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 8a450d3..2a33019 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3548,6 +3548,46 @@
 
   <property>
     <description>
+      The interval at which the policy generator runs, default is one hour
+    </description>
+    <name>yarn.federation.gpg.policy.generator.interval-ms</name>
+    <value>3600000</value>
+  </property>
+
+  <property>
+    <description>
+      The configured policy generator class, runs NoOpGlobalPolicy by default
+    </description>
+    <name>yarn.federation.gpg.policy.generator.class</name>
+    <value>org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator.NoOpGlobalPolicy</value>
+  </property>
+
+  <property>
+    <description>
+      Whether or not the policy generator is running in read only (won't modify policies), default is false
+    </description>
+    <name>yarn.federation.gpg.policy.generator.readonly</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>
+      Whether or not the policy generator is running in read only (won't modify policies), default is false
+    </description>
+    <name>yarn.federation.gpg.policy.generator.readonly</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>
+      Which subclusters the gpg should blacklist, default is none
+    </description>
+    <name>yarn.federation.gpg.policy.generator.blacklist</name>
+    <value></value>
+  </property>
+
+  <property>
+    <description>
        It is TimelineClient 1.5 configuration whether to store active
        application’s timeline data with in user directory i.e
        ${yarn.timeline-service.entity-group-fs-store.active-dir}/${user.name}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
index ef77114..e8ab8b8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolic
 import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse;
 import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest;
 import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
@@ -373,6 +374,18 @@ public final class FederationStateStoreFacade {
   }
 
   /**
+   * Set a policy configuration into the state store.
+   *
+   * @param policyConf the policy configuration to set
+   * @throws YarnException if the request is invalid/fails
+   */
+  public void setPolicyConfiguration(SubClusterPolicyConfiguration policyConf)
+      throws YarnException {
+    stateStore.setPolicyConfiguration(
+        SetSubClusterPolicyConfigurationRequest.newInstance(policyConf));
+  }
+
+  /**
    * Adds the home {@link SubClusterId} for the specified {@link ApplicationId}.
    *
    * @param appHomeSubCluster the mapping of the application to it's home

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
index 9bbb936..9398b0b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
@@ -63,6 +63,12 @@
 
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-timelineservice</artifactId>
+      <scope>provided</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
     </dependency>
 
@@ -73,6 +79,12 @@
     </dependency>
 
     <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-yarn-server-common</artifactId>
       <type>test-jar</type>
@@ -92,6 +104,12 @@
       <plugin>
         <groupId>org.apache.rat</groupId>
         <artifactId>apache-rat-plugin</artifactId>
+        <configuration>
+          <excludes>
+            <exclude>src/test/resources/schedulerInfo1.json</exclude>
+            <exclude>src/test/resources/schedulerInfo2.json</exclude>
+          </excludes>
+        </configuration>
       </plugin>
     </plugins>
   </build>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContext.java
index da8a383..6b0a5a4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContext.java
@@ -28,4 +28,8 @@ public interface GPGContext {
   FederationStateStoreFacade getStateStoreFacade();
 
   void setStateStoreFacade(FederationStateStoreFacade facade);
+
+  GPGPolicyFacade getPolicyFacade();
+
+  void setPolicyFacade(GPGPolicyFacade facade);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContextImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContextImpl.java
index 3884ace..bb49844 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContextImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContextImpl.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade
 public class GPGContextImpl implements GPGContext {
 
   private FederationStateStoreFacade facade;
+  private GPGPolicyFacade policyFacade;
 
   @Override
   public FederationStateStoreFacade getStateStoreFacade() {
@@ -38,4 +39,13 @@ public class GPGContextImpl implements GPGContext {
     this.facade = federationStateStoreFacade;
   }
 
+  @Override
+  public GPGPolicyFacade getPolicyFacade(){
+    return policyFacade;
+  }
+
+  @Override
+  public void setPolicyFacade(GPGPolicyFacade gpgPolicyfacade){
+    policyFacade = gpgPolicyfacade;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGPolicyFacade.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGPolicyFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGPolicyFacade.java
new file mode 100644
index 0000000..4c61a14
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGPolicyFacade.java
@@ -0,0 +1,220 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyUtils;
+import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
+import org.apache.hadoop.yarn.server.federation.policies.manager.WeightedLocalityPolicyManager;
+import org.apache.hadoop.yarn.server.federation.policies.router.FederationRouterPolicy;
+import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.FederationAMRMProxyPolicy;
+import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import org.apache.hadoop.yarn.server.federation.policies.manager.FederationPolicyManager;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration;
+import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * A utility class for the GPG Policy Generator to read and write policies
+ * into the FederationStateStore. Policy specific logic is abstracted away in
+ * this class, so the PolicyGenerator can avoid dealing with policy
+ * construction, reinitialization, and serialization.
+ *
+ * There are only two exposed methods:
+ *
+ * {@link #getPolicyManager(String)}
+ * Gets the PolicyManager via queue name. Null if there is no policy
+ * configured for the specified queue. The PolicyManager can be used to
+ * extract the {@link FederationRouterPolicy} and
+ * {@link FederationAMRMProxyPolicy}, as well as any policy specific parameters
+ *
+ * {@link #setPolicyManager(FederationPolicyManager)}
+ * Sets the PolicyManager. If the policy configuration is the same, no change
+ * occurs. Otherwise, the internal cache is updated and the new configuration
+ * is written into the FederationStateStore
+ *
+ * This class assumes that the GPG is the only service
+ * writing policies. Thus, the only FederationStateStore reads occur the first
+ * time a queue policy is retrieved - after that, the GPG only writes to the
+ * FederationStateStore.
+ *
+ * The class uses a PolicyManager cache and a SubClusterPolicyConfiguration
+ * cache. The primary use for these caches are to serve reads, and to
+ * identify when the PolicyGenerator has actually changed the policy
+ * so unnecessary FederationStateStore policy writes can be avoided.
+ */
+
+public class GPGPolicyFacade {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(GPGPolicyFacade.class);
+
+  private FederationStateStoreFacade stateStore;
+
+  private Map<String, FederationPolicyManager> policyManagerMap;
+  private Map<String, SubClusterPolicyConfiguration> policyConfMap;
+
+  private boolean readOnly;
+
+  public GPGPolicyFacade(FederationStateStoreFacade stateStore,
+      Configuration conf) {
+    this.stateStore = stateStore;
+    this.policyManagerMap = new HashMap<>();
+    this.policyConfMap = new HashMap<>();
+    this.readOnly =
+        conf.getBoolean(YarnConfiguration.GPG_POLICY_GENERATOR_READONLY,
+            YarnConfiguration.DEFAULT_GPG_POLICY_GENERATOR_READONLY);
+  }
+
+  /**
+   * Provides a utility for the policy generator to read the policy manager
+   * from the FederationStateStore. Because the policy generator should be the
+   * only component updating the policy, this implementation does not use the
+   * reinitialization feature.
+   *
+   * @param queueName the name of the queue we want the policy manager for.
+   * @return the policy manager responsible for the queue policy.
+   */
+  public FederationPolicyManager getPolicyManager(String queueName)
+      throws YarnException {
+    FederationPolicyManager policyManager = policyManagerMap.get(queueName);
+    // If we don't have the policy manager cached, pull configuration
+    // from the FederationStateStore to create and cache it
+    if (policyManager == null) {
+      try {
+        // If we don't have the configuration cached, pull it
+        // from the stateStore
+        SubClusterPolicyConfiguration conf = policyConfMap.get(queueName);
+        if (conf == null) {
+          conf = stateStore.getPolicyConfiguration(queueName);
+        }
+        // If configuration is still null, it does not exist in the
+        // FederationStateStore
+        if (conf == null) {
+          LOG.info("Read null policy for queue {}", queueName);
+          return null;
+        }
+        policyManager =
+            FederationPolicyUtils.instantiatePolicyManager(conf.getType());
+        policyManager.setQueue(queueName);
+
+        // TODO there is currently no way to cleanly deserialize a policy
+        // manager sub type from just the configuration
+        if (policyManager instanceof WeightedLocalityPolicyManager) {
+          WeightedPolicyInfo wpinfo =
+              WeightedPolicyInfo.fromByteBuffer(conf.getParams());
+          WeightedLocalityPolicyManager wlpmanager =
+              (WeightedLocalityPolicyManager) policyManager;
+          LOG.info("Updating policy for queue {} to configured weights router: "
+                  + "{}, amrmproxy: {}", queueName,
+              wpinfo.getRouterPolicyWeights(),
+              wpinfo.getAMRMPolicyWeights());
+          wlpmanager.setWeightedPolicyInfo(wpinfo);
+        } else {
+          LOG.warn("Warning: FederationPolicyManager of unsupported type {}, "
+              + "initialization may be incomplete ", policyManager.getClass());
+        }
+
+        policyManagerMap.put(queueName, policyManager);
+        policyConfMap.put(queueName, conf);
+      } catch (YarnException e) {
+        LOG.error("Error reading SubClusterPolicyConfiguration from state "
+            + "store for queue: {}", queueName);
+        throw e;
+      }
+    }
+    return policyManager;
+  }
+
+  /**
+   * Provides a utility for the policy generator to write a policy manager
+   * into the FederationStateStore. The facade keeps a cache and will only write
+   * into the FederationStateStore if the policy configuration has changed.
+   *
+   * @param policyManager The policy manager we want to update into the state
+   *                      store. It contains policy information as well as
+   *                      the queue name we will update for.
+   */
+  public void setPolicyManager(FederationPolicyManager policyManager)
+      throws YarnException {
+    if (policyManager == null) {
+      LOG.warn("Attempting to set null policy manager");
+      return;
+    }
+    // Extract the configuration from the policy manager
+    String queue = policyManager.getQueue();
+    SubClusterPolicyConfiguration conf;
+    try {
+      conf = policyManager.serializeConf();
+    } catch (FederationPolicyInitializationException e) {
+      LOG.warn("Error serializing policy for queue {}", queue);
+      throw e;
+    }
+    if (conf == null) {
+      // State store does not currently support setting a policy back to null
+      // because it reads the queue name to set from the policy!
+      LOG.warn("Skip setting policy to null for queue {} into state store",
+          queue);
+      return;
+    }
+    // Compare with configuration cache, if different, write the conf into
+    // store and update our conf and manager cache
+    if (!confCacheEqual(queue, conf)) {
+      try {
+        if (readOnly) {
+          LOG.info("[read-only] Skipping policy update for queue {}", queue);
+          return;
+        }
+        LOG.info("Updating policy for queue {} into state store", queue);
+        stateStore.setPolicyConfiguration(conf);
+        policyConfMap.put(queue, conf);
+        policyManagerMap.put(queue, policyManager);
+      } catch (YarnException e) {
+        LOG.warn("Error writing SubClusterPolicyConfiguration to state "
+            + "store for queue: {}", queue);
+        throw e;
+      }
+    } else {
+      LOG.info("Setting unchanged policy - state store write skipped");
+    }
+  }
+
+  /**
+   * @param queue the queue to check the cached policy configuration for
+   * @param conf the new policy configuration
+   * @return whether or not the conf is equal to the cached conf
+   */
+  private boolean confCacheEqual(String queue,
+      SubClusterPolicyConfiguration conf) {
+    SubClusterPolicyConfiguration cachedConf = policyConfMap.get(queue);
+    if (conf == null && cachedConf == null) {
+      return true;
+    } else if (conf != null && cachedConf != null) {
+      if (conf.equals(cachedConf)) {
+        return true;
+      }
+    }
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
new file mode 100644
index 0000000..429bec4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.MediaType;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo;
+
+/**
+ * GPGUtils contains utility functions for the GPG.
+ *
+ */
+public final class GPGUtils {
+
+  // hide constructor
+  private GPGUtils() {
+  }
+
+  /**
+   * Performs an invocation of the the remote RMWebService.
+   */
+  public static <T> T invokeRMWebService(Configuration conf, String webAddr,
+      String path, final Class<T> returnType) {
+    Client client = Client.create();
+    T obj = null;
+
+    WebResource webResource = client.resource(webAddr);
+    ClientResponse response = webResource.path("ws/v1/cluster").path(path)
+        .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+    if (response.getStatus() == HttpServletResponse.SC_OK) {
+      obj = response.getEntity(returnType);
+    } else {
+      throw new YarnRuntimeException("Bad response from remote web service: "
+          + response.getStatus());
+    }
+    return obj;
+  }
+
+  /**
+   * Creates a uniform weighting of 1.0 for each sub cluster.
+   */
+  public static Map<SubClusterIdInfo, Float> createUniformWeights(
+      Set<SubClusterId> ids) {
+    Map<SubClusterIdInfo, Float> weights =
+        new HashMap<>();
+    for(SubClusterId id : ids) {
+      weights.put(new SubClusterIdInfo(id), 1.0f);
+    }
+    return weights;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
index f6cfba0..88b9f2b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator.PolicyGenerator;
 import org.apache.hadoop.yarn.server.globalpolicygenerator.subclustercleaner.SubClusterCleaner;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -62,6 +63,7 @@ public class GlobalPolicyGenerator extends CompositeService {
   // Scheduler service that runs tasks periodically
   private ScheduledThreadPoolExecutor scheduledExecutorService;
   private SubClusterCleaner subClusterCleaner;
+  private PolicyGenerator policyGenerator;
 
   public GlobalPolicyGenerator() {
     super(GlobalPolicyGenerator.class.getName());
@@ -73,11 +75,15 @@ public class GlobalPolicyGenerator extends CompositeService {
     // Set up the context
     this.gpgContext
         .setStateStoreFacade(FederationStateStoreFacade.getInstance());
+    this.gpgContext
+        .setPolicyFacade(new GPGPolicyFacade(
+            this.gpgContext.getStateStoreFacade(), conf));
 
     this.scheduledExecutorService = new ScheduledThreadPoolExecutor(
         conf.getInt(YarnConfiguration.GPG_SCHEDULED_EXECUTOR_THREADS,
             YarnConfiguration.DEFAULT_GPG_SCHEDULED_EXECUTOR_THREADS));
     this.subClusterCleaner = new SubClusterCleaner(conf, this.gpgContext);
+    this.policyGenerator = new PolicyGenerator(conf, this.gpgContext);
 
     DefaultMetricsSystem.initialize(METRICS_NAME);
 
@@ -99,6 +105,17 @@ public class GlobalPolicyGenerator extends CompositeService {
       LOG.info("Scheduled sub-cluster cleaner with interval: {}",
           DurationFormatUtils.formatDurationISO(scCleanerIntervalMs));
     }
+
+    // Schedule PolicyGenerator
+    long policyGeneratorIntervalMillis = getConfig().getLong(
+        YarnConfiguration.GPG_POLICY_GENERATOR_INTERVAL_MS,
+        YarnConfiguration.DEFAULT_GPG_POLICY_GENERATOR_INTERVAL_MS);
+    if(policyGeneratorIntervalMillis > 0){
+      this.scheduledExecutorService.scheduleAtFixedRate(this.policyGenerator,
+          0, policyGeneratorIntervalMillis, TimeUnit.MILLISECONDS);
+      LOG.info("Scheduled policygenerator with interval: {}",
+          DurationFormatUtils.formatDurationISO(policyGeneratorIntervalMillis));
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/GlobalPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/GlobalPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/GlobalPolicy.java
new file mode 100644
index 0000000..38d762d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/GlobalPolicy.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.federation.policies.manager.FederationPolicyManager;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+
+import java.util.Collections;
+import java.util.Map;
+
+/**
+ * This interface defines the plug-able policy that the PolicyGenerator uses
+ * to update policies into the state store.
+ */
+
+public abstract class GlobalPolicy implements Configurable {
+
+  private Configuration conf;
+
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  /**
+   * Return a map of the object type and RM path to request it from - the
+   * framework will query these paths and provide the objects to the policy.
+   * Delegating this responsibility to the PolicyGenerator enables us to avoid
+   * duplicate calls to the same * endpoints as the GlobalPolicy is invoked
+   * once per queue.
+   */
+  protected Map<Class, String> registerPaths() {
+    // Default register nothing
+    return Collections.emptyMap();
+  }
+
+  /**
+   * Given a queue, cluster metrics, and policy manager, update the policy
+   * to account for the cluster status. This method defines the policy generator
+   * behavior.
+   *
+   * @param queueName   name of the queue
+   * @param clusterInfo subClusterId map to cluster information about the
+   *                    SubCluster used to make policy decisions
+   * @param manager     the FederationPolicyManager for the queue's existing
+   *                    policy the manager may be null, in which case the policy
+   *                    will need to be created
+   * @return policy manager that handles the updated (or created) policy
+   */
+  protected abstract FederationPolicyManager updatePolicy(String queueName,
+      Map<SubClusterId, Map<Class, Object>> clusterInfo,
+      FederationPolicyManager manager);
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/NoOpGlobalPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/NoOpGlobalPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/NoOpGlobalPolicy.java
new file mode 100644
index 0000000..c2d578f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/NoOpGlobalPolicy.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator;
+
+import org.apache.hadoop.yarn.server.federation.policies.manager.FederationPolicyManager;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+
+import java.util.Map;
+
+/**
+ * Default policy that does not update any policy configurations.
+ */
+public class NoOpGlobalPolicy extends GlobalPolicy{
+
+  @Override
+  public FederationPolicyManager updatePolicy(String queueName,
+      Map<SubClusterId, Map<Class, Object>> clusterInfo,
+      FederationPolicyManager manager) {
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/PolicyGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/PolicyGenerator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/PolicyGenerator.java
new file mode 100644
index 0000000..5681ff0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/PolicyGenerator.java
@@ -0,0 +1,261 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.federation.policies.manager.FederationPolicyManager;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
+import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGContext;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGUtils;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * The PolicyGenerator runs periodically and updates the policy configuration
+ * for each queue into the FederationStateStore. The policy update behavior is
+ * defined by the GlobalPolicy instance that is used.
+ */
+
+public class PolicyGenerator implements Runnable, Configurable {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(PolicyGenerator.class);
+
+  private GPGContext gpgContext;
+  private Configuration conf;
+
+  // Information request map
+  private Map<Class, String> pathMap = new HashMap<>();
+
+  // Global policy instance
+  @VisibleForTesting
+  protected GlobalPolicy policy;
+
+  /**
+   * The PolicyGenerator periodically reads SubCluster load and updates
+   * policies into the FederationStateStore.
+   */
+  public PolicyGenerator(Configuration conf, GPGContext context) {
+    setConf(conf);
+    init(context);
+  }
+
+  private void init(GPGContext context) {
+    this.gpgContext = context;
+    LOG.info("Initialized PolicyGenerator");
+  }
+
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+    this.policy = FederationStateStoreFacade
+        .createInstance(conf, YarnConfiguration.GPG_GLOBAL_POLICY_CLASS,
+            YarnConfiguration.DEFAULT_GPG_GLOBAL_POLICY_CLASS,
+            GlobalPolicy.class);
+    policy.setConf(conf);
+    pathMap.putAll(policy.registerPaths());
+  }
+
+  @Override
+  public Configuration getConf() {
+    return this.conf;
+  }
+
+  @Override
+  public final void run() {
+    Map<SubClusterId, SubClusterInfo> activeSubClusters;
+    try {
+      activeSubClusters = gpgContext.getStateStoreFacade().getSubClusters(true);
+    } catch (YarnException e) {
+      LOG.error("Error retrieving active sub-clusters", e);
+      return;
+    }
+
+    // Parse the scheduler information from all the SCs
+    Map<SubClusterId, SchedulerInfo> schedInfo =
+        getSchedulerInfo(activeSubClusters);
+
+    // Extract and enforce that all the schedulers have matching type
+    Set<String> queueNames = extractQueues(schedInfo);
+
+    // Remove black listed SubClusters
+    activeSubClusters.keySet().removeAll(getBlackList());
+    LOG.info("Active non-blacklist sub-clusters: {}",
+        activeSubClusters.keySet());
+
+    // Get cluster metrics information from non black listed RMs - later used
+    // to evaluate SubCluster load
+    Map<SubClusterId, Map<Class, Object>> clusterInfo =
+        getInfos(activeSubClusters);
+
+    // Update into the FederationStateStore
+    for (String queueName : queueNames) {
+      // Retrieve the manager from the policy facade
+      FederationPolicyManager manager;
+      try {
+        manager = this.gpgContext.getPolicyFacade().getPolicyManager(queueName);
+      } catch (YarnException e) {
+        LOG.error("GetPolicy for queue {} failed", queueName, e);
+        continue;
+      }
+      LOG.info("Updating policy for queue {}", queueName);
+      manager = policy.updatePolicy(queueName, clusterInfo, manager);
+      try {
+        this.gpgContext.getPolicyFacade().setPolicyManager(manager);
+      } catch (YarnException e) {
+        LOG.error("SetPolicy for queue {} failed", queueName, e);
+      }
+    }
+  }
+
+  /**
+   * Helper to retrieve metrics from the RM REST endpoints.
+   *
+   * @param activeSubClusters A map of active SubCluster IDs to info
+   */
+  @VisibleForTesting
+  protected Map<SubClusterId, Map<Class, Object>> getInfos(
+      Map<SubClusterId, SubClusterInfo> activeSubClusters) {
+
+    Map<SubClusterId, Map<Class, Object>> clusterInfo = new HashMap<>();
+    for (SubClusterInfo sci : activeSubClusters.values()) {
+      for (Map.Entry<Class, String> e : this.pathMap.entrySet()) {
+        if (!clusterInfo.containsKey(sci.getSubClusterId())) {
+          clusterInfo.put(sci.getSubClusterId(), new HashMap<Class, Object>());
+        }
+        Object ret = GPGUtils
+            .invokeRMWebService(conf, sci.getRMWebServiceAddress(),
+                e.getValue(), e.getKey());
+        clusterInfo.get(sci.getSubClusterId()).put(e.getKey(), ret);
+      }
+    }
+
+    return clusterInfo;
+  }
+
+  /**
+   * Helper to retrieve SchedulerInfos.
+   *
+   * @param activeSubClusters A map of active SubCluster IDs to info
+   */
+  @VisibleForTesting
+  protected Map<SubClusterId, SchedulerInfo> getSchedulerInfo(
+      Map<SubClusterId, SubClusterInfo> activeSubClusters) {
+    Map<SubClusterId, SchedulerInfo> schedInfo =
+        new HashMap<>();
+    for (SubClusterInfo sci : activeSubClusters.values()) {
+      SchedulerTypeInfo sti = GPGUtils
+          .invokeRMWebService(conf, sci.getRMWebServiceAddress(),
+              RMWSConsts.SCHEDULER, SchedulerTypeInfo.class);
+      if(sti != null){
+        schedInfo.put(sci.getSubClusterId(), sti.getSchedulerInfo());
+      } else {
+        LOG.warn("Skipped null scheduler info from SubCluster " + sci
+            .getSubClusterId().toString());
+      }
+    }
+    return schedInfo;
+  }
+
+  /**
+   * Helper to get a set of blacklisted SubCluster Ids from configuration.
+   */
+  private Set<SubClusterId> getBlackList() {
+    String blackListParam =
+        conf.get(YarnConfiguration.GPG_POLICY_GENERATOR_BLACKLIST);
+    if(blackListParam == null){
+      return Collections.emptySet();
+    }
+    Set<SubClusterId> blackList = new HashSet<>();
+    for (String id : blackListParam.split(",")) {
+      blackList.add(SubClusterId.newInstance(id));
+    }
+    return blackList;
+  }
+
+  /**
+   * Given the scheduler information for all RMs, extract the union of
+   * queue names - right now we only consider instances of capacity scheduler.
+   *
+   * @param schedInfo the scheduler information
+   * @return a set of queue names
+   */
+  private Set<String> extractQueues(
+      Map<SubClusterId, SchedulerInfo> schedInfo) {
+    Set<String> queueNames = new HashSet<String>();
+    for (Map.Entry<SubClusterId, SchedulerInfo> entry : schedInfo.entrySet()) {
+      if (entry.getValue() instanceof CapacitySchedulerInfo) {
+        // Flatten the queue structure and get only non leaf queues
+        queueNames.addAll(flattenQueue((CapacitySchedulerInfo) entry.getValue())
+            .get(CapacitySchedulerQueueInfo.class));
+      } else {
+        LOG.warn("Skipping SubCluster {}, not configured with capacity "
+            + "scheduler", entry.getKey());
+      }
+    }
+    return queueNames;
+  }
+
+  // Helpers to flatten the queue structure into a multimap of
+  // queue type to set of queue names
+  private Map<Class, Set<String>> flattenQueue(CapacitySchedulerInfo csi) {
+    Map<Class, Set<String>> flattened = new HashMap<Class, Set<String>>();
+    addOrAppend(flattened, csi.getClass(), csi.getQueueName());
+    for (CapacitySchedulerQueueInfo csqi : csi.getQueues().getQueueInfoList()) {
+      flattenQueue(csqi, flattened);
+    }
+    return flattened;
+  }
+
+  private void flattenQueue(CapacitySchedulerQueueInfo csi,
+      Map<Class, Set<String>> flattened) {
+    addOrAppend(flattened, csi.getClass(), csi.getQueueName());
+    if (csi.getQueues() != null) {
+      for (CapacitySchedulerQueueInfo csqi : csi.getQueues()
+          .getQueueInfoList()) {
+        flattenQueue(csqi, flattened);
+      }
+    }
+  }
+
+  private <K, V> void addOrAppend(Map<K, Set<V>> multimap, K key, V value) {
+    if (!multimap.containsKey(key)) {
+      multimap.put(key, new HashSet<V>());
+    }
+    multimap.get(key).add(value);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/UniformWeightedLocalityGlobalPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/UniformWeightedLocalityGlobalPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/UniformWeightedLocalityGlobalPolicy.java
new file mode 100644
index 0000000..826cb02
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/UniformWeightedLocalityGlobalPolicy.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator;
+
+import org.apache.commons.math3.optim.nonlinear.vector.Weight;
+import org.apache.hadoop.yarn.server.federation.policies.manager.FederationPolicyManager;
+import org.apache.hadoop.yarn.server.federation.policies.manager.WeightedLocalityPolicyManager;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
+
+/**
+ * Simple policy that generates and updates uniform weighted locality
+ * policies.
+ */
+public class UniformWeightedLocalityGlobalPolicy extends GlobalPolicy{
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(UniformWeightedLocalityGlobalPolicy.class);
+
+  @Override
+  protected FederationPolicyManager updatePolicy(String queueName,
+      Map<SubClusterId, Map<Class, Object>> clusterInfo,
+      FederationPolicyManager currentManager){
+    if(currentManager == null){
+      // Set uniform weights for all SubClusters
+      LOG.info("Creating uniform weighted policy queue {}", queueName);
+      WeightedLocalityPolicyManager manager =
+          new WeightedLocalityPolicyManager();
+      manager.setQueue(queueName);
+      Map<SubClusterIdInfo, Float> policyWeights =
+          GPGUtils.createUniformWeights(clusterInfo.keySet());
+      manager.getWeightedPolicyInfo().setAMRMPolicyWeights(policyWeights);
+      manager.getWeightedPolicyInfo().setRouterPolicyWeights(policyWeights);
+      currentManager = manager;
+    }
+    if(currentManager instanceof WeightedLocalityPolicyManager){
+      LOG.info("Updating policy for queue {} to default weights", queueName);
+      WeightedLocalityPolicyManager wlpmanager =
+          (WeightedLocalityPolicyManager) currentManager;
+      wlpmanager.getWeightedPolicyInfo().setAMRMPolicyWeights(
+          GPGUtils.createUniformWeights(clusterInfo.keySet()));
+      wlpmanager.getWeightedPolicyInfo().setRouterPolicyWeights(
+          GPGUtils.createUniformWeights(clusterInfo.keySet()));
+    } else {
+      LOG.info("Policy for queue {} is of type {}, expected {}",
+          queueName, currentManager.getClass(), Weight.class);
+    }
+    return currentManager;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/package-info.java
new file mode 100644
index 0000000..e8ff436
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/package-info.java
@@ -0,0 +1,24 @@
+/**
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Classes comprising the policy generator for the GPG. Responsibilities include
+ * generating and updating policies based on the cluster status.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/TestGPGPolicyFacade.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/TestGPGPolicyFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/TestGPGPolicyFacade.java
new file mode 100644
index 0000000..d78c11f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/TestGPGPolicyFacade.java
@@ -0,0 +1,202 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.federation.policies.manager.FederationPolicyManager;
+import org.apache.hadoop.yarn.server.federation.policies.manager.WeightedLocalityPolicyManager;
+import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
+import org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration;
+import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Matchers;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ * Unit test for GPG Policy Facade.
+ */
+public class TestGPGPolicyFacade {
+
+  private Configuration conf;
+  private FederationStateStore stateStore;
+  private FederationStateStoreFacade facade =
+      FederationStateStoreFacade.getInstance();
+  private GPGPolicyFacade policyFacade;
+
+  private Set<SubClusterId> subClusterIds;
+
+  private SubClusterPolicyConfiguration testConf;
+
+  private static final String TEST_QUEUE = "test-queue";
+
+  public TestGPGPolicyFacade() {
+    conf = new Configuration();
+    conf.setInt(YarnConfiguration.FEDERATION_CACHE_TIME_TO_LIVE_SECS, 0);
+    subClusterIds = new HashSet<>();
+    subClusterIds.add(SubClusterId.newInstance("sc0"));
+    subClusterIds.add(SubClusterId.newInstance("sc1"));
+    subClusterIds.add(SubClusterId.newInstance("sc2"));
+  }
+
+  @Before
+  public void setUp() throws IOException, YarnException {
+    stateStore = new MemoryFederationStateStore();
+    stateStore.init(conf);
+    facade.reinitialize(stateStore, conf);
+    policyFacade = new GPGPolicyFacade(facade, conf);
+    WeightedLocalityPolicyManager manager =
+        new WeightedLocalityPolicyManager();
+    // Add a test policy for test queue
+    manager.setQueue(TEST_QUEUE);
+    manager.getWeightedPolicyInfo().setAMRMPolicyWeights(
+        GPGUtils.createUniformWeights(subClusterIds));
+    manager.getWeightedPolicyInfo().setRouterPolicyWeights(
+        GPGUtils.createUniformWeights(subClusterIds));
+    testConf = manager.serializeConf();
+    stateStore.setPolicyConfiguration(SetSubClusterPolicyConfigurationRequest
+        .newInstance(testConf));
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    stateStore.close();
+    stateStore = null;
+  }
+
+  @Test
+  public void testGetPolicy() throws YarnException {
+    WeightedLocalityPolicyManager manager =
+        (WeightedLocalityPolicyManager) policyFacade
+            .getPolicyManager(TEST_QUEUE);
+    Assert.assertEquals(testConf, manager.serializeConf());
+  }
+
+  /**
+   * Test that new policies are written into the state store.
+   */
+  @Test
+  public void testSetNewPolicy() throws YarnException {
+    WeightedLocalityPolicyManager manager =
+        new WeightedLocalityPolicyManager();
+    manager.setQueue(TEST_QUEUE + 0);
+    manager.getWeightedPolicyInfo().setAMRMPolicyWeights(
+        GPGUtils.createUniformWeights(subClusterIds));
+    manager.getWeightedPolicyInfo().setRouterPolicyWeights(
+        GPGUtils.createUniformWeights(subClusterIds));
+    SubClusterPolicyConfiguration policyConf = manager.serializeConf();
+    policyFacade.setPolicyManager(manager);
+
+    manager =
+        (WeightedLocalityPolicyManager) policyFacade
+            .getPolicyManager(TEST_QUEUE + 0);
+    Assert.assertEquals(policyConf, manager.serializeConf());
+  }
+
+  /**
+   * Test that overwriting policies are updated in the state store.
+   */
+  @Test
+  public void testOverwritePolicy() throws YarnException {
+    subClusterIds.add(SubClusterId.newInstance("sc3"));
+    WeightedLocalityPolicyManager manager =
+        new WeightedLocalityPolicyManager();
+    manager.setQueue(TEST_QUEUE);
+    manager.getWeightedPolicyInfo().setAMRMPolicyWeights(
+        GPGUtils.createUniformWeights(subClusterIds));
+    manager.getWeightedPolicyInfo().setRouterPolicyWeights(
+        GPGUtils.createUniformWeights(subClusterIds));
+    SubClusterPolicyConfiguration policyConf = manager.serializeConf();
+    policyFacade.setPolicyManager(manager);
+
+    manager =
+        (WeightedLocalityPolicyManager) policyFacade
+            .getPolicyManager(TEST_QUEUE);
+    Assert.assertEquals(policyConf, manager.serializeConf());
+  }
+
+  /**
+   * Test that the write through cache works.
+   */
+  @Test
+  public void testWriteCache() throws YarnException {
+    stateStore = mock(MemoryFederationStateStore.class);
+    facade.reinitialize(stateStore, conf);
+    when(stateStore.getPolicyConfiguration(Matchers.any(
+        GetSubClusterPolicyConfigurationRequest.class))).thenReturn(
+        GetSubClusterPolicyConfigurationResponse.newInstance(testConf));
+    policyFacade = new GPGPolicyFacade(facade, conf);
+
+    // Query once to fill the cache
+    FederationPolicyManager manager = policyFacade.getPolicyManager(TEST_QUEUE);
+    // State store should be contacted once
+    verify(stateStore, times(1)).getPolicyConfiguration(
+        Matchers.any(GetSubClusterPolicyConfigurationRequest.class));
+
+    // If we set the same policy, the state store should be untouched
+    policyFacade.setPolicyManager(manager);
+    verify(stateStore, times(0)).setPolicyConfiguration(
+        Matchers.any(SetSubClusterPolicyConfigurationRequest.class));
+  }
+
+  /**
+   * Test that when read only is enabled, the state store is not changed.
+   */
+  @Test
+  public void testReadOnly() throws YarnException {
+    conf.setBoolean(YarnConfiguration.GPG_POLICY_GENERATOR_READONLY, true);
+    stateStore = mock(MemoryFederationStateStore.class);
+    facade.reinitialize(stateStore, conf);
+    when(stateStore.getPolicyConfiguration(Matchers.any(
+        GetSubClusterPolicyConfigurationRequest.class))).thenReturn(
+        GetSubClusterPolicyConfigurationResponse.newInstance(testConf));
+    policyFacade = new GPGPolicyFacade(facade, conf);
+
+    // If we set a policy, the state store should be untouched
+    WeightedLocalityPolicyManager manager =
+        new WeightedLocalityPolicyManager();
+    // Add a test policy for test queue
+    manager.setQueue(TEST_QUEUE);
+    manager.getWeightedPolicyInfo().setAMRMPolicyWeights(
+        GPGUtils.createUniformWeights(subClusterIds));
+    manager.getWeightedPolicyInfo().setRouterPolicyWeights(
+        GPGUtils.createUniformWeights(subClusterIds));
+    policyFacade.setPolicyManager(manager);
+    verify(stateStore, times(0)).setPolicyConfiguration(
+        Matchers.any(SetSubClusterPolicyConfigurationRequest.class));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/TestPolicyGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/TestPolicyGenerator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/TestPolicyGenerator.java
new file mode 100644
index 0000000..9d27b3b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/TestPolicyGenerator.java
@@ -0,0 +1,338 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator;
+
+import com.sun.jersey.api.json.JSONConfiguration;
+import com.sun.jersey.api.json.JSONJAXBContext;
+import com.sun.jersey.api.json.JSONUnmarshaller;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.federation.policies.manager.FederationPolicyManager;
+import org.apache.hadoop.yarn.server.federation.policies.manager.WeightedLocalityPolicyManager;
+import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState;
+import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGContext;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGContextImpl;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGPolicyFacade;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGUtils;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+
+import javax.xml.bind.JAXBException;
+import java.io.IOException;
+import java.io.StringReader;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+/**
+ * Unit test for GPG Policy Generator.
+ */
+public class TestPolicyGenerator {
+
+  private static final int NUM_SC = 3;
+
+  private Configuration conf;
+  private FederationStateStore stateStore;
+  private FederationStateStoreFacade facade =
+      FederationStateStoreFacade.getInstance();
+
+  private List<SubClusterId> subClusterIds;
+  private Map<SubClusterId, SubClusterInfo> subClusterInfos;
+  private Map<SubClusterId, Map<Class, Object>> clusterInfos;
+  private Map<SubClusterId, SchedulerInfo> schedulerInfos;
+
+  private GPGContext gpgContext;
+
+  private PolicyGenerator policyGenerator;
+
+  public TestPolicyGenerator() {
+    conf = new Configuration();
+    conf.setInt(YarnConfiguration.FEDERATION_CACHE_TIME_TO_LIVE_SECS, 0);
+
+    gpgContext = new GPGContextImpl();
+    gpgContext.setPolicyFacade(new GPGPolicyFacade(facade, conf));
+    gpgContext.setStateStoreFacade(facade);
+  }
+
+  @Before
+  public void setUp() throws IOException, YarnException, JAXBException {
+    subClusterIds = new ArrayList<>();
+    subClusterInfos = new HashMap<>();
+    clusterInfos = new HashMap<>();
+    schedulerInfos = new HashMap<>();
+
+    CapacitySchedulerInfo sti1 =
+        readJSON("src/test/resources/schedulerInfo1.json",
+            CapacitySchedulerInfo.class);
+    CapacitySchedulerInfo sti2 =
+        readJSON("src/test/resources/schedulerInfo2.json",
+            CapacitySchedulerInfo.class);
+
+    // Set up sub clusters
+    for (int i = 0; i < NUM_SC; ++i) {
+      // Sub cluster Id
+      SubClusterId id = SubClusterId.newInstance("sc" + i);
+      subClusterIds.add(id);
+
+      // Sub cluster info
+      SubClusterInfo cluster = SubClusterInfo
+          .newInstance(id, "amrm:" + i, "clientrm:" + i, "rmadmin:" + i,
+              "rmweb:" + i, SubClusterState.SC_RUNNING, 0, "");
+      subClusterInfos.put(id, cluster);
+
+      // Cluster metrics info
+      ClusterMetricsInfo metricsInfo = new ClusterMetricsInfo();
+      metricsInfo.setAppsPending(2000);
+      if (!clusterInfos.containsKey(id)) {
+        clusterInfos.put(id, new HashMap<Class, Object>());
+      }
+      clusterInfos.get(id).put(ClusterMetricsInfo.class, metricsInfo);
+
+      schedulerInfos.put(id, sti1);
+    }
+
+    // Change one of the sub cluster schedulers
+    schedulerInfos.put(subClusterIds.get(0), sti2);
+
+    stateStore = mock(FederationStateStore.class);
+    when(stateStore.getSubClusters((GetSubClustersInfoRequest) any()))
+        .thenReturn(GetSubClustersInfoResponse.newInstance(
+            new ArrayList<SubClusterInfo>(subClusterInfos.values())));
+    facade.reinitialize(stateStore, conf);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    stateStore.close();
+    stateStore = null;
+  }
+
+  private <T> T readJSON(String pathname, Class<T> classy)
+      throws IOException, JAXBException {
+
+    JSONJAXBContext jc =
+        new JSONJAXBContext(JSONConfiguration.mapped().build(), classy);
+    JSONUnmarshaller unmarshaller = jc.createJSONUnmarshaller();
+    String contents = new String(Files.readAllBytes(Paths.get(pathname)));
+    return unmarshaller.unmarshalFromJSON(new StringReader(contents), classy);
+
+  }
+
+  @Test
+  public void testPolicyGenerator() throws YarnException {
+    policyGenerator = new TestablePolicyGenerator();
+    policyGenerator.policy = mock(GlobalPolicy.class);
+    policyGenerator.run();
+    verify(policyGenerator.policy, times(1))
+        .updatePolicy("default", clusterInfos, null);
+    verify(policyGenerator.policy, times(1))
+        .updatePolicy("default2", clusterInfos, null);
+  }
+
+  @Test
+  public void testBlacklist() throws YarnException {
+    conf.set(YarnConfiguration.GPG_POLICY_GENERATOR_BLACKLIST,
+        subClusterIds.get(0).toString());
+    Map<SubClusterId, Map<Class, Object>> blacklistedCMI =
+        new HashMap<>(clusterInfos);
+    blacklistedCMI.remove(subClusterIds.get(0));
+    policyGenerator = new TestablePolicyGenerator();
+    policyGenerator.policy = mock(GlobalPolicy.class);
+    policyGenerator.run();
+    verify(policyGenerator.policy, times(1))
+        .updatePolicy("default", blacklistedCMI, null);
+    verify(policyGenerator.policy, times(0))
+        .updatePolicy("default", clusterInfos, null);
+  }
+
+  @Test
+  public void testBlacklistTwo() throws YarnException {
+    conf.set(YarnConfiguration.GPG_POLICY_GENERATOR_BLACKLIST,
+        subClusterIds.get(0).toString() + "," + subClusterIds.get(1)
+            .toString());
+    Map<SubClusterId, Map<Class, Object>> blacklistedCMI =
+        new HashMap<>(clusterInfos);
+    blacklistedCMI.remove(subClusterIds.get(0));
+    blacklistedCMI.remove(subClusterIds.get(1));
+    policyGenerator = new TestablePolicyGenerator();
+    policyGenerator.policy = mock(GlobalPolicy.class);
+    policyGenerator.run();
+    verify(policyGenerator.policy, times(1))
+        .updatePolicy("default", blacklistedCMI, null);
+    verify(policyGenerator.policy, times(0))
+        .updatePolicy("default", clusterInfos, null);
+  }
+
+  @Test
+  public void testExistingPolicy() throws YarnException {
+    WeightedLocalityPolicyManager manager = new WeightedLocalityPolicyManager();
+    // Add a test policy for test queue
+    manager.setQueue("default");
+    manager.getWeightedPolicyInfo().setAMRMPolicyWeights(GPGUtils
+        .createUniformWeights(new HashSet<SubClusterId>(subClusterIds)));
+    manager.getWeightedPolicyInfo().setRouterPolicyWeights(GPGUtils
+        .createUniformWeights(new HashSet<SubClusterId>(subClusterIds)));
+    SubClusterPolicyConfiguration testConf = manager.serializeConf();
+    when(stateStore.getPolicyConfiguration(
+        GetSubClusterPolicyConfigurationRequest.newInstance("default")))
+        .thenReturn(
+            GetSubClusterPolicyConfigurationResponse.newInstance(testConf));
+
+    policyGenerator = new TestablePolicyGenerator();
+    policyGenerator.policy = mock(GlobalPolicy.class);
+    policyGenerator.run();
+
+    ArgumentCaptor<FederationPolicyManager> argCaptor =
+        ArgumentCaptor.forClass(FederationPolicyManager.class);
+    verify(policyGenerator.policy, times(1))
+        .updatePolicy(eq("default"), eq(clusterInfos), argCaptor.capture());
+    assertEquals(argCaptor.getValue().getClass(), manager.getClass());
+    assertEquals(argCaptor.getValue().serializeConf(), manager.serializeConf());
+  }
+
+  @Test
+  public void testCallRM() {
+
+    CapacitySchedulerConfiguration csConf =
+        new CapacitySchedulerConfiguration();
+
+    final String a = CapacitySchedulerConfiguration.ROOT + ".a";
+    final String b = CapacitySchedulerConfiguration.ROOT + ".b";
+    final String a1 = a + ".a1";
+    final String a2 = a + ".a2";
+    final String b1 = b + ".b1";
+    final String b2 = b + ".b2";
+    final String b3 = b + ".b3";
+    float aCapacity = 10.5f;
+    float bCapacity = 89.5f;
+    float a1Capacity = 30;
+    float a2Capacity = 70;
+    float b1Capacity = 79.2f;
+    float b2Capacity = 0.8f;
+    float b3Capacity = 20;
+
+    // Define top-level queues
+    csConf.setQueues(CapacitySchedulerConfiguration.ROOT,
+        new String[] {"a", "b"});
+
+    csConf.setCapacity(a, aCapacity);
+    csConf.setCapacity(b, bCapacity);
+
+    // Define 2nd-level queues
+    csConf.setQueues(a, new String[] {"a1", "a2"});
+    csConf.setCapacity(a1, a1Capacity);
+    csConf.setUserLimitFactor(a1, 100.0f);
+    csConf.setCapacity(a2, a2Capacity);
+    csConf.setUserLimitFactor(a2, 100.0f);
+
+    csConf.setQueues(b, new String[] {"b1", "b2", "b3"});
+    csConf.setCapacity(b1, b1Capacity);
+    csConf.setUserLimitFactor(b1, 100.0f);
+    csConf.setCapacity(b2, b2Capacity);
+    csConf.setUserLimitFactor(b2, 100.0f);
+    csConf.setCapacity(b3, b3Capacity);
+    csConf.setUserLimitFactor(b3, 100.0f);
+
+    YarnConfiguration rmConf = new YarnConfiguration(csConf);
+
+    ResourceManager resourceManager = new ResourceManager();
+    rmConf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+    resourceManager.init(rmConf);
+    resourceManager.start();
+
+    String rmAddress = WebAppUtils.getRMWebAppURLWithScheme(this.conf);
+    SchedulerTypeInfo sti = GPGUtils
+        .invokeRMWebService(conf, rmAddress, RMWSConsts.SCHEDULER,
+            SchedulerTypeInfo.class);
+
+    Assert.assertNotNull(sti);
+  }
+
+  /**
+   * Testable policy generator overrides the methods that communicate
+   * with the RM REST endpoint, allowing us to inject faked responses.
+   */
+  class TestablePolicyGenerator extends PolicyGenerator {
+
+    TestablePolicyGenerator() {
+      super(conf, gpgContext);
+    }
+
+    @Override
+    protected Map<SubClusterId, Map<Class, Object>> getInfos(
+        Map<SubClusterId, SubClusterInfo> activeSubClusters) {
+      Map<SubClusterId, Map<Class, Object>> ret = new HashMap<>();
+      for (SubClusterId id : activeSubClusters.keySet()) {
+        if (!ret.containsKey(id)) {
+          ret.put(id, new HashMap<Class, Object>());
+        }
+        ret.get(id).put(ClusterMetricsInfo.class,
+            clusterInfos.get(id).get(ClusterMetricsInfo.class));
+      }
+      return ret;
+    }
+
+    @Override
+    protected Map<SubClusterId, SchedulerInfo> getSchedulerInfo(
+        Map<SubClusterId, SubClusterInfo> activeSubClusters) {
+      Map<SubClusterId, SchedulerInfo> ret =
+          new HashMap<SubClusterId, SchedulerInfo>();
+      for (SubClusterId id : activeSubClusters.keySet()) {
+        ret.put(id, schedulerInfos.get(id));
+      }
+      return ret;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo1.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo1.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo1.json
new file mode 100644
index 0000000..3ad4594
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo1.json
@@ -0,0 +1,134 @@
+{
+  "capacity": 100.0,
+  "usedCapacity": 0.0,
+  "maxCapacity": 100.0,
+  "queueName": "root",
+  "queues": {
+    "queue": [
+      {
+        "type": "capacitySchedulerLeafQueueInfo",
+        "capacity": 100.0,
+        "usedCapacity": 0.0,
+        "maxCapacity": 100.0,
+        "absoluteCapacity": 100.0,
+        "absoluteMaxCapacity": 100.0,
+        "absoluteUsedCapacity": 0.0,
+        "numApplications": 484,
+        "queueName": "default",
+        "state": "RUNNING",
+        "resourcesUsed": {
+          "memory": 0,
+          "vCores": 0
+        },
+        "hideReservationQueues": false,
+        "nodeLabels": [
+          "*"
+        ],
+        "numActiveApplications": 484,
+        "numPendingApplications": 0,
+        "numContainers": 0,
+        "maxApplications": 10000,
+        "maxApplicationsPerUser": 10000,
+        "userLimit": 100,
+        "users": {
+          "user": [
+            {
+              "username": "Default",
+              "resourcesUsed": {
+                "memory": 0,
+                "vCores": 0
+              },
+              "numPendingApplications": 0,
+              "numActiveApplications": 468,
+              "AMResourceUsed": {
+                "memory": 30191616,
+                "vCores": 468
+              },
+              "userResourceLimit": {
+                "memory": 31490048,
+                "vCores": 7612
+              }
+            }
+          ]
+        },
+        "userLimitFactor": 1.0,
+        "AMResourceLimit": {
+          "memory": 31490048,
+          "vCores": 7612
+        },
+        "usedAMResource": {
+          "memory": 30388224,
+          "vCores": 532
+        },
+        "userAMResourceLimit": {
+          "memory": 31490048,
+          "vCores": 7612
+        },
+        "preemptionDisabled": true
+      }
+    ]
+  },
+  "health": {
+    "lastrun": 1517951638085,
+    "operationsInfo": {
+      "entry": {
+        "key": "last-allocation",
+        "value": {
+          "nodeId": "node0:0",
+          "containerId": "container_e61477_1517922128312_0340_01_000001",
+          "queue": "root.default"
+        }
+      },
+      "entry": {
+        "key": "last-reservation",
+        "value": {
+          "nodeId": "node0:1",
+          "containerId": "container_e61477_1517879828320_0249_01_000001",
+          "queue": "root.default"
+        }
+      },
+      "entry": {
+        "key": "last-release",
+        "value": {
+          "nodeId": "node0:2",
+          "containerId": "container_e61477_1517922128312_0340_01_000001",
+          "queue": "root.default"
+        }
+      },
+      "entry": {
+        "key": "last-preemption",
+        "value": {
+          "nodeId": "N/A",
+          "containerId": "N/A",
+          "queue": "N/A"
+        }
+      }
+    },
+    "lastRunDetails": [
+      {
+        "operation": "releases",
+        "count": 0,
+        "resources": {
+          "memory": 0,
+          "vCores": 0
+        }
+      },
+      {
+        "operation": "allocations",
+        "count": 0,
+        "resources": {
+          "memory": 0,
+          "vCores": 0
+        }
+      },
+      {
+        "operation": "reservations",
+        "count": 0,
+        "resources": {
+          "memory": 0,
+          "vCores": 0
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: HDFS-13619. TestAuditLoggerWithCommands fails on Windows. Contributed by Anbang Hu.

Posted by bo...@apache.org.
HDFS-13619. TestAuditLoggerWithCommands fails on Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13d25289
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13d25289
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13d25289

Branch: refs/heads/YARN-7402
Commit: 13d25289076b39daf481fb1ee15939dbfe4a6b23
Parents: 8733012
Author: Inigo Goiri <in...@apache.org>
Authored: Fri May 25 13:32:34 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Fri May 25 13:32:34 2018 -0700

----------------------------------------------------------------------
 .../hdfs/server/namenode/TestAuditLoggerWithCommands.java       | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d25289/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
index 41ee03f..222a1de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
@@ -1264,8 +1264,9 @@ public class TestAuditLoggerWithCommands {
   }
 
   private int verifyAuditLogs(String pattern) {
-    int length = auditlog.getOutput().split("\n").length;
-    String lastAudit = auditlog.getOutput().split("\n")[length - 1];
+    int length = auditlog.getOutput().split(System.lineSeparator()).length;
+    String lastAudit = auditlog.getOutput()
+        .split(System.lineSeparator())[length - 1];
     assertTrue("Unexpected log!", lastAudit.matches(pattern));
     return length;
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: YARN-8348. Incorrect and missing AfterClass in HBase-tests to fix NPE failures. Contributed by Giovanni Matteo Fumarola.

Posted by bo...@apache.org.
YARN-8348. Incorrect and missing AfterClass in HBase-tests to fix NPE failures. Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7261561
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7261561
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7261561

Branch: refs/heads/YARN-7402
Commit: d72615611cfa6bd82756270d4b10136ec1e56741
Parents: e99e5bf
Author: Inigo Goiri <in...@apache.org>
Authored: Wed May 23 14:43:59 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Wed May 23 14:43:59 2018 -0700

----------------------------------------------------------------------
 .../storage/TestHBaseTimelineStorageApps.java                | 4 +++-
 .../storage/TestHBaseTimelineStorageDomain.java              | 8 ++++++++
 .../storage/TestHBaseTimelineStorageEntities.java            | 4 +++-
 .../storage/TestHBaseTimelineStorageSchema.java              | 8 ++++++++
 .../storage/flow/TestHBaseStorageFlowActivity.java           | 4 +++-
 .../storage/flow/TestHBaseStorageFlowRun.java                | 4 +++-
 .../storage/flow/TestHBaseStorageFlowRunCompaction.java      | 4 +++-
 7 files changed, 31 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7261561/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
index bc33427..0dee442 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
@@ -1936,6 +1936,8 @@ public class TestHBaseTimelineStorageApps {
 
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
-    util.shutdownMiniCluster();
+    if (util != null) {
+      util.shutdownMiniCluster();
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7261561/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java
index 2932e0c..1f59088 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelp
 import org.apache.hadoop.yarn.server.timelineservice.storage.domain.DomainColumn;
 import org.apache.hadoop.yarn.server.timelineservice.storage.domain.DomainRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.domain.DomainTableRW;
+import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -123,4 +124,11 @@ public class TestHBaseTimelineStorageDomain {
     assertEquals("user1,user2 group1,group2", readers);
     assertEquals("writer1,writer2", writers);
   }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    if (util != null) {
+      util.shutdownMiniCluster();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7261561/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
index 6932a316..116285c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
@@ -1879,7 +1879,9 @@ public class TestHBaseTimelineStorageEntities {
 
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
-    util.shutdownMiniCluster();
+    if (util != null) {
+      util.shutdownMiniCluster();
+    }
   }
 
   private boolean verifyRowKeyForSubApplication(byte[] rowKey, String suAppUser,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7261561/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageSchema.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageSchema.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageSchema.java
index f838178..950ce62 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageSchema.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageSchema.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertTrue;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableRW;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTableRW;
+import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -137,4 +138,11 @@ public class TestHBaseTimelineStorageSchema {
     hbaseConf
     .unset(YarnConfiguration.TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX_NAME);
   }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    if (util != null) {
+      util.shutdownMiniCluster();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7261561/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
index 645b7d5..3143463 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
@@ -492,6 +492,8 @@ public class TestHBaseStorageFlowActivity {
 
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
-    util.shutdownMiniCluster();
+    if (util != null) {
+      util.shutdownMiniCluster();
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7261561/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
index c7d0d4e..c3ee758 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
@@ -1075,6 +1075,8 @@ public class TestHBaseStorageFlowRun {
 
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
-    util.shutdownMiniCluster();
+    if (util != null) {
+      util.shutdownMiniCluster();
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7261561/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java
index 2ff37af..e0a58da 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java
@@ -850,6 +850,8 @@ public class TestHBaseStorageFlowRunCompaction {
 
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
-    util.shutdownMiniCluster();
+    if (util != null) {
+      util.shutdownMiniCluster();
+    }
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: YARN-7402. [GPG] Fix potential connection leak in GPGUtils. Contributed by Giovanni Matteo Fumarola.

Posted by bo...@apache.org.
YARN-7402. [GPG] Fix potential connection leak in GPGUtils. Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5bf22dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5bf22dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5bf22dc

Branch: refs/heads/YARN-7402
Commit: c5bf22dc13b5bbe57b45fe81dd2d912af3b87602
Parents: f5da8ca
Author: Botong Huang <bo...@apache.org>
Authored: Wed May 23 12:45:32 2018 -0700
Committer: Botong Huang <bo...@apache.org>
Committed: Tue May 29 10:48:40 2018 -0700

----------------------------------------------------------------------
 .../server/globalpolicygenerator/GPGUtils.java  | 31 +++++++++++++-------
 1 file changed, 20 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5bf22dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
index 429bec4..31cee1c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
@@ -18,21 +18,22 @@
 
 package org.apache.hadoop.yarn.server.globalpolicygenerator;
 
+import static javax.servlet.http.HttpServletResponse.SC_OK;
+
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 
-import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.core.MediaType;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo;
 
 import com.sun.jersey.api.client.Client;
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.WebResource;
-import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
-import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo;
 
 /**
  * GPGUtils contains utility functions for the GPG.
@@ -53,15 +54,23 @@ public final class GPGUtils {
     T obj = null;
 
     WebResource webResource = client.resource(webAddr);
-    ClientResponse response = webResource.path("ws/v1/cluster").path(path)
-        .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
-    if (response.getStatus() == HttpServletResponse.SC_OK) {
-      obj = response.getEntity(returnType);
-    } else {
-      throw new YarnRuntimeException("Bad response from remote web service: "
-          + response.getStatus());
+    ClientResponse response = null;
+    try {
+      response = webResource.path("ws/v1/cluster").path(path)
+          .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+      if (response.getStatus() == SC_OK) {
+        obj = response.getEntity(returnType);
+      } else {
+        throw new YarnRuntimeException(
+            "Bad response from remote web service: " + response.getStatus());
+      }
+      return obj;
+    } finally {
+      if (response != null) {
+        response.close();
+      }
+      client.destroy();
     }
-    return obj;
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: HDDS-113. Rest and Rpc Client should verify resource name using HddsClientUtils. Contributed by Lokesh Jain.

Posted by bo...@apache.org.
HDDS-113. Rest and Rpc Client should verify resource name using HddsClientUtils.
Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a9652e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a9652e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a9652e6

Branch: refs/heads/YARN-7402
Commit: 2a9652e69650973f6158b60ff131215827738db6
Parents: 13d2528
Author: Anu Engineer <ae...@apache.org>
Authored: Fri May 25 15:40:46 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Fri May 25 15:45:50 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdds/scm/client/HddsClientUtils.java | 23 +++++++++
 .../apache/hadoop/ozone/client/ObjectStore.java |  9 ----
 .../apache/hadoop/ozone/client/OzoneBucket.java | 24 +--------
 .../apache/hadoop/ozone/client/OzoneVolume.java | 18 +------
 .../hadoop/ozone/client/rest/RestClient.java    | 52 ++++++++------------
 .../hadoop/ozone/client/rpc/RpcClient.java      | 46 +++++++----------
 6 files changed, 64 insertions(+), 108 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9652e6/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
index bc5f8d6..a6813eb 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
@@ -170,6 +170,29 @@ public final class HddsClientUtils {
   }
 
   /**
+   * verifies that bucket / volume name is a valid DNS name.
+   *
+   * @param resourceNames Array of bucket / volume names to be verified.
+   */
+  public static void verifyResourceName(String... resourceNames) {
+    for (String resourceName : resourceNames) {
+      HddsClientUtils.verifyResourceName(resourceName);
+    }
+  }
+
+  /**
+   * Checks that object parameters passed as reference is not null.
+   *
+   * @param references Array of object references to be checked.
+   * @param <T>
+   */
+  public static <T> void checkNotNull(T... references) {
+    for (T ref: references) {
+      Preconditions.checkNotNull(ref);
+    }
+  }
+
+  /**
    * Returns the cache value to be used for list calls.
    * @param conf Configuration object
    * @return list cache size

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9652e6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
index d8b3011..c5f0689 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
@@ -63,8 +63,6 @@ public class ObjectStore {
    * @throws IOException
    */
   public void createVolume(String volumeName) throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    HddsClientUtils.verifyResourceName(volumeName);
     proxy.createVolume(volumeName);
   }
 
@@ -76,9 +74,6 @@ public class ObjectStore {
    */
   public void createVolume(String volumeName, VolumeArgs volumeArgs)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(volumeArgs);
-    HddsClientUtils.verifyResourceName(volumeName);
     proxy.createVolume(volumeName, volumeArgs);
   }
 
@@ -89,8 +84,6 @@ public class ObjectStore {
    * @throws IOException
    */
   public OzoneVolume getVolume(String volumeName) throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    HddsClientUtils.verifyResourceName(volumeName);
     OzoneVolume volume = proxy.getVolumeDetails(volumeName);
     return volume;
   }
@@ -150,8 +143,6 @@ public class ObjectStore {
    * @throws IOException
    */
   public void deleteVolume(String volumeName) throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    HddsClientUtils.verifyResourceName(volumeName);
     proxy.deleteVolume(volumeName);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9652e6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 5df0254..2f3cff6 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -104,6 +104,7 @@ public class OzoneBucket {
                      String volumeName, String bucketName,
                      List<OzoneAcl> acls, StorageType storageType,
                      Boolean versioning, long creationTime) {
+    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
     this.proxy = proxy;
     this.volumeName = volumeName;
     this.name = bucketName;
@@ -180,8 +181,6 @@ public class OzoneBucket {
    * @throws IOException
    */
   public void addAcls(List<OzoneAcl> addAcls) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(addAcls);
     proxy.addBucketAcls(volumeName, name, addAcls);
     addAcls.stream().filter(acl -> !acls.contains(acl)).forEach(
         acls::add);
@@ -193,8 +192,6 @@ public class OzoneBucket {
    * @throws IOException
    */
   public void removeAcls(List<OzoneAcl> removeAcls) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(removeAcls);
     proxy.removeBucketAcls(volumeName, name, removeAcls);
     acls.removeAll(removeAcls);
   }
@@ -205,8 +202,6 @@ public class OzoneBucket {
    * @throws IOException
    */
   public void setStorageType(StorageType newStorageType) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(newStorageType);
     proxy.setBucketStorageType(volumeName, name, newStorageType);
     storageType = newStorageType;
   }
@@ -217,8 +212,6 @@ public class OzoneBucket {
    * @throws IOException
    */
   public void setVersioning(Boolean newVersioning) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(newVersioning);
     proxy.setBucketVersioning(volumeName, name, newVersioning);
     versioning = newVersioning;
   }
@@ -233,8 +226,6 @@ public class OzoneBucket {
    */
   public OzoneOutputStream createKey(String key, long size)
       throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(key);
     return createKey(key, size, defaultReplicationType, defaultReplication);
   }
 
@@ -251,10 +242,6 @@ public class OzoneBucket {
                                      ReplicationType type,
                                      ReplicationFactor factor)
       throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(key);
-    Preconditions.checkNotNull(type);
-    Preconditions.checkNotNull(factor);
     return proxy.createKey(volumeName, name, key, size, type, factor);
   }
 
@@ -265,8 +252,6 @@ public class OzoneBucket {
    * @throws IOException
    */
   public OzoneInputStream readKey(String key) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(key);
     return proxy.getKey(volumeName, name, key);
   }
 
@@ -277,8 +262,6 @@ public class OzoneBucket {
    * @throws IOException
    */
   public OzoneKey getKey(String key) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(key);
     return proxy.getKeyDetails(volumeName, name, key);
   }
 
@@ -314,16 +297,11 @@ public class OzoneBucket {
    * @throws IOException
    */
   public void deleteKey(String key) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(key);
     proxy.deleteKey(volumeName, name, key);
   }
 
   public void renameKey(String fromKeyName, String toKeyName)
       throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(fromKeyName);
-    Preconditions.checkNotNull(toKeyName);
     proxy.renameKey(volumeName, name, fromKeyName, toKeyName);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9652e6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
index 4601f1a..77f882a 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java
@@ -83,6 +83,7 @@ public class OzoneVolume {
   public OzoneVolume(Configuration conf, ClientProtocol proxy, String name,
                      String admin, String owner, long quotaInBytes,
                      long creationTime, List<OzoneAcl> acls) {
+    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
     this.proxy = proxy;
     this.name = name;
     this.admin = admin;
@@ -153,8 +154,6 @@ public class OzoneVolume {
    * @throws IOException
    */
   public void setOwner(String owner) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(owner);
     proxy.setVolumeOwner(name, owner);
     this.owner = owner;
   }
@@ -165,8 +164,6 @@ public class OzoneVolume {
    * @throws IOException
    */
   public void setQuota(OzoneQuota  quota) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(quota);
     proxy.setVolumeQuota(name, quota);
     this.quotaInBytes = quota.sizeInBytes();
   }
@@ -178,9 +175,6 @@ public class OzoneVolume {
    */
   public void createBucket(String bucketName)
       throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(bucketName);
-    HddsClientUtils.verifyResourceName(bucketName);
     proxy.createBucket(name, bucketName);
   }
 
@@ -192,10 +186,6 @@ public class OzoneVolume {
    */
   public void createBucket(String bucketName, BucketArgs bucketArgs)
       throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(bucketName);
-    Preconditions.checkNotNull(bucketArgs);
-    HddsClientUtils.verifyResourceName(bucketName);
     proxy.createBucket(name, bucketName, bucketArgs);
   }
 
@@ -206,9 +196,6 @@ public class OzoneVolume {
    * @throws IOException
    */
   public OzoneBucket getBucket(String bucketName) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(bucketName);
-    HddsClientUtils.verifyResourceName(bucketName);
     OzoneBucket bucket = proxy.getBucketDetails(name, bucketName);
     return bucket;
   }
@@ -246,9 +233,6 @@ public class OzoneVolume {
    * @throws IOException
    */
   public void deleteBucket(String bucketName) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(bucketName);
-    HddsClientUtils.verifyResourceName(bucketName);
     proxy.deleteBucket(name, bucketName);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9652e6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
index ac71abe..1169820 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
@@ -211,7 +211,8 @@ public class RestClient implements ClientProtocol {
   public void createVolume(String volumeName, VolumeArgs volArgs)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
+      HddsClientUtils.verifyResourceName(volumeName);
+      Preconditions.checkNotNull(volArgs);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       String owner = volArgs.getOwner() == null ?
           ugi.getUserName() : volArgs.getOwner();
@@ -256,7 +257,7 @@ public class RestClient implements ClientProtocol {
   public void setVolumeOwner(String volumeName, String owner)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
+      HddsClientUtils.verifyResourceName(volumeName);
       Preconditions.checkNotNull(owner);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName);
@@ -273,7 +274,7 @@ public class RestClient implements ClientProtocol {
   public void setVolumeQuota(String volumeName, OzoneQuota quota)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
+      HddsClientUtils.verifyResourceName(volumeName);
       Preconditions.checkNotNull(quota);
       String quotaString = quota.toString();
       URIBuilder builder = new URIBuilder(ozoneRestUri);
@@ -291,7 +292,7 @@ public class RestClient implements ClientProtocol {
   public OzoneVolume getVolumeDetails(String volumeName)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
+      HddsClientUtils.verifyResourceName(volumeName);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName);
       builder.setParameter(Header.OZONE_INFO_QUERY_TAG,
@@ -326,7 +327,7 @@ public class RestClient implements ClientProtocol {
   @Override
   public void deleteVolume(String volumeName) throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
+      HddsClientUtils.verifyResourceName(volumeName);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName);
       HttpDelete httpDelete = new HttpDelete(builder.build());
@@ -362,8 +363,7 @@ public class RestClient implements ClientProtocol {
       String volumeName, String bucketName, BucketArgs bucketArgs)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       Preconditions.checkNotNull(bucketArgs);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       OzoneConsts.Versioning versioning = OzoneConsts.Versioning.DISABLED;
@@ -404,8 +404,7 @@ public class RestClient implements ClientProtocol {
       String volumeName, String bucketName, List<OzoneAcl> addAcls)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       Preconditions.checkNotNull(addAcls);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
 
@@ -429,8 +428,7 @@ public class RestClient implements ClientProtocol {
       String volumeName, String bucketName, List<OzoneAcl> removeAcls)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       Preconditions.checkNotNull(removeAcls);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
 
@@ -454,8 +452,7 @@ public class RestClient implements ClientProtocol {
       String volumeName, String bucketName, Boolean versioning)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       Preconditions.checkNotNull(versioning);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
 
@@ -477,8 +474,7 @@ public class RestClient implements ClientProtocol {
       String volumeName, String bucketName, StorageType storageType)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       Preconditions.checkNotNull(storageType);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
 
@@ -498,8 +494,7 @@ public class RestClient implements ClientProtocol {
   public void deleteBucket(String volumeName, String bucketName)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName +
           PATH_SEPARATOR + bucketName);
@@ -521,8 +516,7 @@ public class RestClient implements ClientProtocol {
   public OzoneBucket getBucketDetails(String volumeName, String bucketName)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName +
           PATH_SEPARATOR + bucketName);
@@ -573,9 +567,8 @@ public class RestClient implements ClientProtocol {
     // TODO: Once ReplicationType and ReplicationFactor are supported in
     // OzoneHandler (in Datanode), set them in header.
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      Preconditions.checkNotNull(keyName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
+      HddsClientUtils.checkNotNull(keyName, type, factor);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName +
           PATH_SEPARATOR + bucketName +
@@ -617,8 +610,7 @@ public class RestClient implements ClientProtocol {
       String volumeName, String bucketName, String keyName)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       Preconditions.checkNotNull(keyName);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName +
@@ -661,8 +653,7 @@ public class RestClient implements ClientProtocol {
   public void deleteKey(String volumeName, String bucketName, String keyName)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       Preconditions.checkNotNull(keyName);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName +
@@ -679,10 +670,8 @@ public class RestClient implements ClientProtocol {
   public void renameKey(String volumeName, String bucketName,
       String fromKeyName, String toKeyName) throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      Preconditions.checkNotNull(fromKeyName);
-      Preconditions.checkNotNull(toKeyName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
+      HddsClientUtils.checkNotNull(fromKeyName, toKeyName);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName + PATH_SEPARATOR + bucketName
           + PATH_SEPARATOR + fromKeyName);
@@ -708,8 +697,7 @@ public class RestClient implements ClientProtocol {
       String volumeName, String bucketName, String keyName)
       throws IOException {
     try {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
+      HddsClientUtils.verifyResourceName(volumeName, bucketName);
       Preconditions.checkNotNull(keyName);
       URIBuilder builder = new URIBuilder(ozoneRestUri);
       builder.setPath(PATH_SEPARATOR + volumeName +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9652e6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index ffe93dd..43b94a1 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -22,6 +22,7 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
@@ -170,7 +171,7 @@ public class RpcClient implements ClientProtocol {
   @Override
   public void createVolume(String volumeName, VolumeArgs volArgs)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
+    HddsClientUtils.verifyResourceName(volumeName);
     Preconditions.checkNotNull(volArgs);
 
     String admin = volArgs.getAdmin() == null ?
@@ -214,7 +215,7 @@ public class RpcClient implements ClientProtocol {
   @Override
   public void setVolumeOwner(String volumeName, String owner)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
+    HddsClientUtils.verifyResourceName(volumeName);
     Preconditions.checkNotNull(owner);
     keySpaceManagerClient.setOwner(volumeName, owner);
   }
@@ -222,7 +223,7 @@ public class RpcClient implements ClientProtocol {
   @Override
   public void setVolumeQuota(String volumeName, OzoneQuota quota)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
+    HddsClientUtils.verifyResourceName(volumeName);
     Preconditions.checkNotNull(quota);
     long quotaInBytes = quota.sizeInBytes();
     keySpaceManagerClient.setQuota(volumeName, quotaInBytes);
@@ -231,7 +232,7 @@ public class RpcClient implements ClientProtocol {
   @Override
   public OzoneVolume getVolumeDetails(String volumeName)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
+    HddsClientUtils.verifyResourceName(volumeName);
     KsmVolumeArgs volume = keySpaceManagerClient.getVolumeInfo(volumeName);
     return new OzoneVolume(
         conf,
@@ -253,7 +254,7 @@ public class RpcClient implements ClientProtocol {
 
   @Override
   public void deleteVolume(String volumeName) throws IOException {
-    Preconditions.checkNotNull(volumeName);
+    HddsClientUtils.verifyResourceName(volumeName);
     keySpaceManagerClient.deleteVolume(volumeName);
   }
 
@@ -307,8 +308,7 @@ public class RpcClient implements ClientProtocol {
   public void createBucket(
       String volumeName, String bucketName, BucketArgs bucketArgs)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(bucketArgs);
 
     Boolean isVersionEnabled = bucketArgs.getVersioning() == null ?
@@ -346,8 +346,7 @@ public class RpcClient implements ClientProtocol {
   public void addBucketAcls(
       String volumeName, String bucketName, List<OzoneAcl> addAcls)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(addAcls);
     KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
     builder.setVolumeName(volumeName)
@@ -360,8 +359,7 @@ public class RpcClient implements ClientProtocol {
   public void removeBucketAcls(
       String volumeName, String bucketName, List<OzoneAcl> removeAcls)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(removeAcls);
     KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
     builder.setVolumeName(volumeName)
@@ -374,8 +372,7 @@ public class RpcClient implements ClientProtocol {
   public void setBucketVersioning(
       String volumeName, String bucketName, Boolean versioning)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(versioning);
     KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
     builder.setVolumeName(volumeName)
@@ -388,8 +385,7 @@ public class RpcClient implements ClientProtocol {
   public void setBucketStorageType(
       String volumeName, String bucketName, StorageType storageType)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(storageType);
     KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
     builder.setVolumeName(volumeName)
@@ -401,8 +397,7 @@ public class RpcClient implements ClientProtocol {
   @Override
   public void deleteBucket(
       String volumeName, String bucketName) throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
     keySpaceManagerClient.deleteBucket(volumeName, bucketName);
   }
 
@@ -415,8 +410,7 @@ public class RpcClient implements ClientProtocol {
   @Override
   public OzoneBucket getBucketDetails(
       String volumeName, String bucketName) throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
     KsmBucketInfo bucketArgs =
         keySpaceManagerClient.getBucketInfo(volumeName, bucketName);
     return new OzoneBucket(
@@ -454,6 +448,8 @@ public class RpcClient implements ClientProtocol {
       String volumeName, String bucketName, String keyName, long size,
       ReplicationType type, ReplicationFactor factor)
       throws IOException {
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
+    HddsClientUtils.checkNotNull(keyName, type, factor);
     String requestId = UUID.randomUUID().toString();
     KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
         .setVolumeName(volumeName)
@@ -486,8 +482,7 @@ public class RpcClient implements ClientProtocol {
   public OzoneInputStream getKey(
       String volumeName, String bucketName, String keyName)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(keyName);
     String requestId = UUID.randomUUID().toString();
     KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
@@ -508,8 +503,7 @@ public class RpcClient implements ClientProtocol {
   public void deleteKey(
       String volumeName, String bucketName, String keyName)
       throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(keyName);
     KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
         .setVolumeName(volumeName)
@@ -522,10 +516,8 @@ public class RpcClient implements ClientProtocol {
   @Override
   public void renameKey(String volumeName, String bucketName,
       String fromKeyName, String toKeyName) throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
-    Preconditions.checkNotNull(fromKeyName);
-    Preconditions.checkNotNull(toKeyName);
+    HddsClientUtils.verifyResourceName(volumeName, bucketName);
+    HddsClientUtils.checkNotNull(fromKeyName, toKeyName);
     KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
         .setVolumeName(volumeName)
         .setBucketName(bucketName)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: YARN-8191. Fair scheduler: queue deletion without RM restart. (Gergo Repas via Haibo Chen)

Posted by bo...@apache.org.
YARN-8191. Fair scheduler: queue deletion without RM restart. (Gergo Repas via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86bc6425
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86bc6425
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86bc6425

Branch: refs/heads/YARN-7402
Commit: 86bc6425d425913899f1d951498bd040e453b3d0
Parents: d9852eb
Author: Haibo Chen <ha...@apache.org>
Authored: Thu May 24 17:07:21 2018 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Thu May 24 17:12:34 2018 -0700

----------------------------------------------------------------------
 .../fair/AllocationFileLoaderService.java       |  16 +-
 .../scheduler/fair/FSLeafQueue.java             |  31 ++
 .../resourcemanager/scheduler/fair/FSQueue.java |   9 +
 .../scheduler/fair/FairScheduler.java           |  29 +-
 .../scheduler/fair/QueueManager.java            | 155 +++++++--
 .../fair/TestAllocationFileLoaderService.java   | 100 +++---
 .../scheduler/fair/TestQueueManager.java        | 337 +++++++++++++++++++
 7 files changed, 596 insertions(+), 81 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86bc6425/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index d8d9051..7a40b6a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -87,7 +87,7 @@ public class AllocationFileLoaderService extends AbstractService {
   private Path allocFile;
   private FileSystem fs;
 
-  private Listener reloadListener;
+  private final Listener reloadListener;
 
   @VisibleForTesting
   long reloadIntervalMs = ALLOC_RELOAD_INTERVAL_MS;
@@ -95,15 +95,16 @@ public class AllocationFileLoaderService extends AbstractService {
   private Thread reloadThread;
   private volatile boolean running = true;
 
-  public AllocationFileLoaderService() {
-    this(SystemClock.getInstance());
+  public AllocationFileLoaderService(Listener reloadListener) {
+    this(reloadListener, SystemClock.getInstance());
   }
 
   private List<Permission> defaultPermissions;
 
-  public AllocationFileLoaderService(Clock clock) {
+  public AllocationFileLoaderService(Listener reloadListener, Clock clock) {
     super(AllocationFileLoaderService.class.getName());
     this.clock = clock;
+    this.reloadListener = reloadListener;
   }
 
   @Override
@@ -114,6 +115,7 @@ public class AllocationFileLoaderService extends AbstractService {
       reloadThread = new Thread(() -> {
         while (running) {
           try {
+            reloadListener.onCheck();
             long time = clock.getTime();
             long lastModified =
                 fs.getFileStatus(allocFile).getModificationTime();
@@ -207,10 +209,6 @@ public class AllocationFileLoaderService extends AbstractService {
     return allocPath;
   }
 
-  public synchronized void setReloadListener(Listener reloadListener) {
-    this.reloadListener = reloadListener;
-  }
-
   /**
    * Updates the allocation list from the allocation config file. This file is
    * expected to be in the XML format specified in the design doc.
@@ -351,5 +349,7 @@ public class AllocationFileLoaderService extends AbstractService {
 
   public interface Listener {
     void onReload(AllocationConfiguration info) throws IOException;
+
+    void onCheck();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86bc6425/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 49d2166..e7da16f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -21,7 +21,9 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -34,6 +36,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -56,6 +59,8 @@ public class FSLeafQueue extends FSQueue {
   // apps that are runnable
   private final List<FSAppAttempt> runnableApps = new ArrayList<>();
   private final List<FSAppAttempt> nonRunnableApps = new ArrayList<>();
+  // assignedApps keeps track of applications that have no appAttempts
+  private final Set<ApplicationId> assignedApps = new HashSet<>();
   // get a lock with fair distribution for app list updates
   private final ReadWriteLock rwl = new ReentrantReadWriteLock(true);
   private final Lock readLock = rwl.readLock();
@@ -89,6 +94,9 @@ public class FSLeafQueue extends FSQueue {
       } else {
         nonRunnableApps.add(app);
       }
+      // when an appAttempt is created for an application, we'd like to move
+      // it over from assignedApps to either runnableApps or nonRunnableApps
+      assignedApps.remove(app.getApplicationId());
       incUsedResource(app.getResourceUsage());
     } finally {
       writeLock.unlock();
@@ -440,6 +448,15 @@ public class FSLeafQueue extends FSQueue {
     return numPendingApps;
   }
 
+  public int getNumAssignedApps() {
+    readLock.lock();
+    try {
+      return assignedApps.size();
+    } finally {
+      readLock.unlock();
+    }
+  }
+
   /**
    * TODO: Based on how frequently this is called, we might want to club
    * counting pending and active apps in the same method.
@@ -609,4 +626,18 @@ public class FSLeafQueue extends FSQueue {
         ", LastTimeAtMinShare: " + lastTimeAtMinShare +
         "}");
   }
+
+  /**
+   * This method is called when an application is assigned to this queue
+   * for book-keeping purposes (to be able to determine if the queue is empty).
+   * @param applicationId the application's id
+   */
+  public void addAssignedApp(ApplicationId applicationId) {
+    writeLock.lock();
+    try {
+      assignedApps.add(applicationId);
+    } finally {
+      writeLock.unlock();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86bc6425/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
index 4babfd5..6b88a32 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
@@ -83,6 +83,7 @@ public abstract class FSQueue implements Queue, Schedulable {
   private long minSharePreemptionTimeout = Long.MAX_VALUE;
   private float fairSharePreemptionThreshold = 0.5f;
   private boolean preemptable = true;
+  private boolean isDynamic = true;
 
   public FSQueue(String name, FairScheduler scheduler, FSParentQueue parent) {
     this.name = name;
@@ -585,4 +586,12 @@ public abstract class FSQueue implements Queue, Schedulable {
    * @param sb the {code StringBuilder} which holds queue states
    */
   protected abstract void dumpStateInternal(StringBuilder sb);
+
+  public boolean isDynamic() {
+    return isDynamic;
+  }
+
+  public void setDynamic(boolean dynamic) {
+    this.isDynamic = dynamic;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86bc6425/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 1c4bd51..4c84aa9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -99,6 +99,7 @@ import org.apache.hadoop.yarn.util.resource.Resources;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.Comparator;
 import java.util.EnumSet;
 import java.util.HashSet;
@@ -207,7 +208,8 @@ public class FairScheduler extends
   public FairScheduler() {
     super(FairScheduler.class.getName());
     context = new FSContext(this);
-    allocsLoader = new AllocationFileLoaderService();
+    allocsLoader =
+        new AllocationFileLoaderService(new AllocationReloadListener());
     queueMgr = new QueueManager(this);
     maxRunningEnforcer = new MaxRunningAppsEnforcer(this);
   }
@@ -516,6 +518,7 @@ public class FairScheduler extends
           new SchedulerApplication<FSAppAttempt>(queue, user);
       applications.put(applicationId, application);
       queue.getMetrics().submitApp(user);
+      queue.addAssignedApp(applicationId);
 
       LOG.info("Accepted application " + applicationId + " from user: " + user
           + ", in queue: " + queue.getName()
@@ -1435,7 +1438,6 @@ public class FairScheduler extends
     }
 
     allocsLoader.init(conf);
-    allocsLoader.setReloadListener(new AllocationReloadListener());
     // If we fail to load allocations file on initialize, we want to fail
     // immediately.  After a successful load, exceptions on future reloads
     // will just result in leaving things as they are.
@@ -1589,6 +1591,7 @@ public class FairScheduler extends
       // Commit the reload; also create any queue defined in the alloc file
       // if it does not already exist, so it can be displayed on the web UI.
 
+      Set<String> removedStaticQueues = getRemovedStaticQueues(queueInfo);
       writeLock.lock();
       try {
         if (queueInfo == null) {
@@ -1599,6 +1602,7 @@ public class FairScheduler extends
           setQueueAcls(allocConf.getQueueAcls());
           allocConf.getDefaultSchedulingPolicy().initialize(getContext());
           queueMgr.updateAllocationConfiguration(allocConf);
+          queueMgr.setQueuesToDynamic(removedStaticQueues);
           applyChildDefaults();
           maxRunningEnforcer.updateRunnabilityOnReload();
         }
@@ -1606,6 +1610,27 @@ public class FairScheduler extends
         writeLock.unlock();
       }
     }
+
+    private Set<String> getRemovedStaticQueues(
+        AllocationConfiguration queueInfo) {
+      if (queueInfo == null || allocConf == null) {
+        return Collections.emptySet();
+      }
+      Set<String> removedStaticQueues = new HashSet<>();
+      for (Set<String> queues : allocConf.getConfiguredQueues().values()) {
+        removedStaticQueues.addAll(queues);
+      }
+      for (Set<String> queues : queueInfo.getConfiguredQueues().values()) {
+        removedStaticQueues.removeAll(queues);
+      }
+      return removedStaticQueues;
+    }
+
+    @Override
+    public void onCheck() {
+      queueMgr.removeEmptyDynamicQueues();
+      queueMgr.removePendingIncompatibleQueues();
+    }
   }
 
   private void setQueueAcls(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86bc6425/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java
index 8734877..632a842 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java
@@ -22,13 +22,17 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.concurrent.CopyOnWriteArrayList;
 
 import javax.xml.parsers.ParserConfigurationException;
 
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -52,6 +56,36 @@ public class QueueManager {
   public static final Log LOG = LogFactory.getLog(
     QueueManager.class.getName());
 
+  private final class IncompatibleQueueRemovalTask {
+
+    private final String queueToCreate;
+    private final FSQueueType queueType;
+
+    private IncompatibleQueueRemovalTask(String queueToCreate,
+        FSQueueType queueType) {
+      this.queueToCreate = queueToCreate;
+      this.queueType = queueType;
+    }
+
+    private void execute() {
+      Boolean removed =
+          removeEmptyIncompatibleQueues(queueToCreate, queueType).orElse(null);
+      if (Boolean.TRUE.equals(removed)) {
+        FSQueue queue = getQueue(queueToCreate, true, queueType, false);
+        if (queue != null &&
+            // if queueToCreate is present in the allocation config, set it
+            // to static
+            scheduler.allocConf.configuredQueues.values().stream()
+            .anyMatch(s -> s.contains(queueToCreate))) {
+          queue.setDynamic(false);
+        }
+      }
+      if (!Boolean.FALSE.equals(removed)) {
+        incompatibleQueuesPendingRemoval.remove(this);
+      }
+    }
+  }
+
   public static final String ROOT_QUEUE = "root";
   
   private final FairScheduler scheduler;
@@ -59,6 +93,8 @@ public class QueueManager {
   private final Collection<FSLeafQueue> leafQueues = 
       new CopyOnWriteArrayList<FSLeafQueue>();
   private final Map<String, FSQueue> queues = new HashMap<String, FSQueue>();
+  private Set<IncompatibleQueueRemovalTask> incompatibleQueuesPendingRemoval =
+      new HashSet<>();
   private FSParentQueue rootQueue;
 
   public QueueManager(FairScheduler scheduler) {
@@ -75,10 +111,13 @@ public class QueueManager {
     // SchedulingPolicy.DEFAULT_POLICY since the allocation file hasn't been
     // loaded yet.
     rootQueue = new FSParentQueue("root", scheduler, null);
+    rootQueue.setDynamic(false);
     queues.put(rootQueue.getName(), rootQueue);
 
     // Create the default queue
-    getLeafQueue(YarnConfiguration.DEFAULT_QUEUE_NAME, true);
+    FSLeafQueue defaultQueue =
+        getLeafQueue(YarnConfiguration.DEFAULT_QUEUE_NAME, true);
+    defaultQueue.setDynamic(false);
     // Recursively reinitialize to propagate queue properties
     rootQueue.reinit(true);
   }
@@ -121,7 +160,8 @@ public class QueueManager {
    */
   public boolean removeLeafQueue(String name) {
     name = ensureRootPrefix(name);
-    return removeEmptyIncompatibleQueues(name, FSQueueType.PARENT);
+    return !Boolean.FALSE.equals(
+        removeEmptyIncompatibleQueues(name, FSQueueType.PARENT).orElse(null));
   }
 
 
@@ -346,9 +386,13 @@ public class QueueManager {
    * 
    * We will never remove the root queue or the default queue in this way.
    *
-   * @return true if we can create queueToCreate or it already exists.
+   * @return Optional.of(Boolean.TRUE)  if there was an incompatible queue that
+   *                                    has been removed,
+   *         Optional.of(Boolean.FALSE) if there was an incompatible queue that
+   *                                    have not be removed,
+   *         Optional.empty()           if there is no incompatible queue.
    */
-  private boolean removeEmptyIncompatibleQueues(String queueToCreate,
+  private Optional<Boolean> removeEmptyIncompatibleQueues(String queueToCreate,
       FSQueueType queueType) {
     queueToCreate = ensureRootPrefix(queueToCreate);
 
@@ -357,7 +401,7 @@ public class QueueManager {
     if (queueToCreate.equals(ROOT_QUEUE) ||
         queueToCreate.startsWith(
             ROOT_QUEUE + "." + YarnConfiguration.DEFAULT_QUEUE_NAME + ".")) {
-      return false;
+      return Optional.empty();
     }
 
     FSQueue queue = queues.get(queueToCreate);
@@ -365,19 +409,18 @@ public class QueueManager {
     if (queue != null) {
       if (queue instanceof FSLeafQueue) {
         if (queueType == FSQueueType.LEAF) {
-          // if queue is already a leaf then return true
-          return true;
+          return Optional.empty();
         }
         // remove incompatibility since queue is a leaf currently
         // needs to change to a parent.
-        return removeQueueIfEmpty(queue);
+        return Optional.of(removeQueueIfEmpty(queue));
       } else {
         if (queueType == FSQueueType.PARENT) {
-          return true;
+          return Optional.empty();
         }
         // If it's an existing parent queue and needs to change to leaf, 
         // remove it if it's empty.
-        return removeQueueIfEmpty(queue);
+        return Optional.of(removeQueueIfEmpty(queue));
       }
     }
 
@@ -389,11 +432,51 @@ public class QueueManager {
       String prefixString = queueToCreate.substring(0, sepIndex);
       FSQueue prefixQueue = queues.get(prefixString);
       if (prefixQueue != null && prefixQueue instanceof FSLeafQueue) {
-        return removeQueueIfEmpty(prefixQueue);
+        return Optional.of(removeQueueIfEmpty(prefixQueue));
       }
       sepIndex = queueToCreate.lastIndexOf('.', sepIndex-1);
     }
-    return true;
+    return Optional.empty();
+  }
+
+  /**
+   * Removes all empty dynamic queues (including empty dynamic parent queues).
+   */
+  public void removeEmptyDynamicQueues() {
+    synchronized (queues) {
+      Set<FSParentQueue> parentQueuesToCheck = new HashSet<>();
+      for (FSQueue queue : getQueues()) {
+        if (queue.isDynamic() && queue.getChildQueues().isEmpty()) {
+          boolean removed = removeQueueIfEmpty(queue);
+          if (removed && queue.getParent().isDynamic()) {
+            parentQueuesToCheck.add(queue.getParent());
+          }
+        }
+      }
+      while (!parentQueuesToCheck.isEmpty()) {
+        FSParentQueue queue = parentQueuesToCheck.iterator().next();
+        if (queue.getChildQueues().isEmpty()) {
+          removeQueue(queue);
+          if (queue.getParent().isDynamic()) {
+            parentQueuesToCheck.add(queue.getParent());
+          }
+        }
+        parentQueuesToCheck.remove(queue);
+      }
+    }
+  }
+
+  /**
+   * Re-checking incompatible queues that could not be removed earlier due to
+   * not being empty, and removing those that became empty.
+   */
+  public void removePendingIncompatibleQueues() {
+    synchronized (queues) {
+      for (IncompatibleQueueRemovalTask removalTask :
+          ImmutableSet.copyOf(incompatibleQueuesPendingRemoval)) {
+        removalTask.execute();
+      }
+    }
   }
 
   /**
@@ -435,7 +518,8 @@ public class QueueManager {
     if (queue instanceof FSLeafQueue) {
       FSLeafQueue leafQueue = (FSLeafQueue)queue;
       return queue.getNumRunnableApps() == 0 &&
-          leafQueue.getNumNonRunnableApps() == 0;
+          leafQueue.getNumNonRunnableApps() == 0 &&
+          leafQueue.getNumAssignedApps() == 0;
     } else {
       for (FSQueue child : queue.getChildQueues()) {
         if (!isEmpty(child)) {
@@ -501,21 +585,13 @@ public class QueueManager {
         LOG.error("Setting scheduling policies for existing queues failed!");
       }
 
-      for (String name : queueConf.getConfiguredQueues().get(
-              FSQueueType.LEAF)) {
-        if (removeEmptyIncompatibleQueues(name, FSQueueType.LEAF)) {
-          getLeafQueue(name, true, false);
-        }
-      }
+      ensureQueueExistsAndIsCompatibleAndIsStatic(queueConf, FSQueueType.LEAF);
+
       // At this point all leaves and 'parents with
       // at least one child' would have been created.
       // Now create parents with no configured leaf.
-      for (String name : queueConf.getConfiguredQueues().get(
-          FSQueueType.PARENT)) {
-        if (removeEmptyIncompatibleQueues(name, FSQueueType.PARENT)) {
-          getParentQueue(name, true, false);
-        }
-      }
+      ensureQueueExistsAndIsCompatibleAndIsStatic(queueConf,
+          FSQueueType.PARENT);
     }
 
     // Initialize all queues recursively
@@ -524,6 +600,35 @@ public class QueueManager {
     rootQueue.recomputeSteadyShares();
   }
 
+  private void ensureQueueExistsAndIsCompatibleAndIsStatic(
+      AllocationConfiguration queueConf, FSQueueType queueType) {
+    for (String name : queueConf.getConfiguredQueues().get(queueType)) {
+      Boolean removed =
+          removeEmptyIncompatibleQueues(name, queueType).orElse(null);
+      if (Boolean.FALSE.equals(removed)) {
+        incompatibleQueuesPendingRemoval.add(
+            new IncompatibleQueueRemovalTask(name, queueType));
+      } else {
+        FSQueue queue = getQueue(name, true, queueType, false);
+        if (queue != null) {
+          queue.setDynamic(false);
+        }
+      }
+    }
+  }
+
+  /**
+   * Setting a set of queues to dynamic.
+   * @param queueNames The names of the queues to be set to dynamic
+   */
+  protected void setQueuesToDynamic(Set<String> queueNames) {
+    synchronized (queues) {
+      for (String queueName : queueNames) {
+        queues.get(queueName).setDynamic(true);
+      }
+    }
+  }
+
   /**
    * Check whether queue name is valid,
    * return true if it is valid, otherwise return false.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86bc6425/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
index 8591d67..30b8a91 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AllocationFileLoaderService.Listener;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueuePlacementRule.NestedUserQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.allocationfile.AllocationFileWriter;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy;
@@ -32,6 +33,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.Fai
 import org.apache.hadoop.yarn.util.ControlledClock;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.Test;
+import org.mockito.Mockito;
+
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.FileWriter;
@@ -79,7 +82,8 @@ public class TestAllocationFileLoaderService {
     fs.copyFromLocalFile(new Path(fschedURL.toURI()), new Path(fsAllocPath));
     conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, fsAllocPath);
 
-    AllocationFileLoaderService allocLoader = new AllocationFileLoaderService();
+    AllocationFileLoaderService allocLoader =
+        new AllocationFileLoaderService(Mockito.mock(Listener.class));
     Path allocationFile = allocLoader.getAllocationFile(conf);
     assertEquals(fsAllocPath, allocationFile.toString());
     assertTrue(fs.exists(allocationFile));
@@ -92,7 +96,8 @@ public class TestAllocationFileLoaderService {
       throws UnsupportedFileSystemException {
     Configuration conf = new YarnConfiguration();
     conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, "badfs:///badfile");
-    AllocationFileLoaderService allocLoader = new AllocationFileLoaderService();
+    AllocationFileLoaderService allocLoader =
+        new AllocationFileLoaderService(Mockito.mock(Listener.class));
 
     allocLoader.getAllocationFile(conf);
   }
@@ -105,7 +110,7 @@ public class TestAllocationFileLoaderService {
       conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,
           TEST_FAIRSCHED_XML);
       AllocationFileLoaderService allocLoader =
-          new AllocationFileLoaderService();
+          new AllocationFileLoaderService(Mockito.mock(Listener.class));
       Path allocationFile = allocLoader.getAllocationFile(conf);
       assertEquals(TEST_FAIRSCHED_XML, allocationFile.getName());
       assertTrue(fs.exists(allocationFile));
@@ -134,12 +139,11 @@ public class TestAllocationFileLoaderService {
     Configuration conf = new Configuration();
     conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
 
-    AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(
-        clock);
+    ReloadListener confHolder = new ReloadListener();
+    AllocationFileLoaderService allocLoader =
+        new AllocationFileLoaderService(confHolder, clock);
     allocLoader.reloadIntervalMs = 5;
     allocLoader.init(conf);
-    ReloadListener confHolder = new ReloadListener();
-    allocLoader.setReloadListener(confHolder);
     allocLoader.reloadAllocations();
     AllocationConfiguration allocConf = confHolder.allocConf;
 
@@ -205,7 +209,9 @@ public class TestAllocationFileLoaderService {
   public void testAllocationFileParsing() throws Exception {
     Configuration conf = new Configuration();
     conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
-    AllocationFileLoaderService allocLoader = new AllocationFileLoaderService();
+    ReloadListener confHolder = new ReloadListener();
+    AllocationFileLoaderService allocLoader =
+        new AllocationFileLoaderService(confHolder);
 
     AllocationFileWriter
             .create()
@@ -278,8 +284,6 @@ public class TestAllocationFileLoaderService {
             .writeToFile(ALLOC_FILE);
 
     allocLoader.init(conf);
-    ReloadListener confHolder = new ReloadListener();
-    allocLoader.setReloadListener(confHolder);
     allocLoader.reloadAllocations();
     AllocationConfiguration queueConf = confHolder.allocConf;
 
@@ -427,7 +431,9 @@ public class TestAllocationFileLoaderService {
   public void testBackwardsCompatibleAllocationFileParsing() throws Exception {
     Configuration conf = new Configuration();
     conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
-    AllocationFileLoaderService allocLoader = new AllocationFileLoaderService();
+    ReloadListener confHolder = new ReloadListener();
+    AllocationFileLoaderService allocLoader =
+        new AllocationFileLoaderService(confHolder);
 
     PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
     out.println("<?xml version=\"1.0\"?>");
@@ -473,8 +479,6 @@ public class TestAllocationFileLoaderService {
     out.close();
 
     allocLoader.init(conf);
-    ReloadListener confHolder = new ReloadListener();
-    allocLoader.setReloadListener(confHolder);
     allocLoader.reloadAllocations();
     AllocationConfiguration queueConf = confHolder.allocConf;
 
@@ -550,10 +554,10 @@ public class TestAllocationFileLoaderService {
     out.println("</allocations>");
     out.close();
 
-    AllocationFileLoaderService allocLoader = new AllocationFileLoaderService();
-    allocLoader.init(conf);
     ReloadListener confHolder = new ReloadListener();
-    allocLoader.setReloadListener(confHolder);
+    AllocationFileLoaderService allocLoader =
+        new AllocationFileLoaderService(confHolder);
+    allocLoader.init(conf);
     allocLoader.reloadAllocations();
     AllocationConfiguration allocConf = confHolder.allocConf;
 
@@ -584,10 +588,10 @@ public class TestAllocationFileLoaderService {
     out.println("</allocations>");
     out.close();
 
-    AllocationFileLoaderService allocLoader = new AllocationFileLoaderService();
-    allocLoader.init(conf);
     ReloadListener confHolder = new ReloadListener();
-    allocLoader.setReloadListener(confHolder);
+    AllocationFileLoaderService allocLoader =
+        new AllocationFileLoaderService(confHolder);
+    allocLoader.init(conf);
     allocLoader.reloadAllocations();
   }
 
@@ -608,10 +612,10 @@ public class TestAllocationFileLoaderService {
     out.println("</allocations>");
     out.close();
 
-    AllocationFileLoaderService allocLoader = new AllocationFileLoaderService();
-    allocLoader.init(conf);
     ReloadListener confHolder = new ReloadListener();
-    allocLoader.setReloadListener(confHolder);
+    AllocationFileLoaderService allocLoader =
+        new AllocationFileLoaderService(confHolder);
+    allocLoader.init(conf);
     allocLoader.reloadAllocations();
   }
 
@@ -632,10 +636,10 @@ public class TestAllocationFileLoaderService {
     out.println("</allocations>");
     out.close();
 
-    AllocationFileLoaderService allocLoader = new AllocationFileLoaderService();
-    allocLoader.init(conf);
     ReloadListener confHolder = new ReloadListener();
-    allocLoader.setReloadListener(confHolder);
+    AllocationFileLoaderService allocLoader =
+        new AllocationFileLoaderService(confHolder);
+    allocLoader.init(conf);
     allocLoader.reloadAllocations();
   }
 
@@ -654,10 +658,10 @@ public class TestAllocationFileLoaderService {
     out.println("</allocations>");
     out.close();
 
-    AllocationFileLoaderService allocLoader = new AllocationFileLoaderService();
-    allocLoader.init(conf);
     ReloadListener confHolder = new ReloadListener();
-    allocLoader.setReloadListener(confHolder);
+    AllocationFileLoaderService allocLoader =
+        new AllocationFileLoaderService(confHolder);
+    allocLoader.init(conf);
     try {
       allocLoader.reloadAllocations();
     } catch (AllocationConfigurationException ex) {
@@ -685,10 +689,10 @@ public class TestAllocationFileLoaderService {
     out.println("</allocations>");
     out.close();
 
-    AllocationFileLoaderService allocLoader = new AllocationFileLoaderService();
-    allocLoader.init(conf);
     ReloadListener confHolder = new ReloadListener();
-    allocLoader.setReloadListener(confHolder);
+    AllocationFileLoaderService allocLoader =
+        new AllocationFileLoaderService(confHolder);
+    allocLoader.init(conf);
     try {
       allocLoader.reloadAllocations();
     } catch (AllocationConfigurationException ex) {
@@ -714,10 +718,10 @@ public class TestAllocationFileLoaderService {
     out.println("</allocations>");
     out.close();
 
-    AllocationFileLoaderService allocLoader = new AllocationFileLoaderService();
-    allocLoader.init(conf);
     ReloadListener confHolder = new ReloadListener();
-    allocLoader.setReloadListener(confHolder);
+    AllocationFileLoaderService allocLoader =
+        new AllocationFileLoaderService(confHolder);
+    allocLoader.init(conf);
     allocLoader.reloadAllocations();
     AllocationConfiguration queueConf = confHolder.allocConf;
     // Check whether queue 'parent' and 'child' are loaded successfully
@@ -745,10 +749,10 @@ public class TestAllocationFileLoaderService {
     out.println("</allocations>");
     out.close();
 
-    AllocationFileLoaderService allocLoader = new AllocationFileLoaderService();
-    allocLoader.init(conf);
     ReloadListener confHolder = new ReloadListener();
-    allocLoader.setReloadListener(confHolder);
+    AllocationFileLoaderService allocLoader =
+        new AllocationFileLoaderService(confHolder);
+    allocLoader.init(conf);
     allocLoader.reloadAllocations();
   }
 
@@ -767,10 +771,10 @@ public class TestAllocationFileLoaderService {
     out.println("</allocations>");
     out.close();
 
-    AllocationFileLoaderService allocLoader = new AllocationFileLoaderService();
-    allocLoader.init(conf);
     ReloadListener confHolder = new ReloadListener();
-    allocLoader.setReloadListener(confHolder);
+    AllocationFileLoaderService allocLoader =
+        new AllocationFileLoaderService(confHolder);
+    allocLoader.init(conf);
     allocLoader.reloadAllocations();
   }
 
@@ -793,10 +797,10 @@ public class TestAllocationFileLoaderService {
     out.println("</allocations>");
     out.close();
 
-    AllocationFileLoaderService allocLoader = new AllocationFileLoaderService();
-    allocLoader.init(conf);
     ReloadListener confHolder = new ReloadListener();
-    allocLoader.setReloadListener(confHolder);
+    AllocationFileLoaderService allocLoader =
+        new AllocationFileLoaderService(confHolder);
+    allocLoader.init(conf);
     allocLoader.reloadAllocations();
 
     AllocationConfiguration allocConf = confHolder.allocConf;
@@ -853,10 +857,10 @@ public class TestAllocationFileLoaderService {
     out.println("</allocations>");
     out.close();
 
-    AllocationFileLoaderService allocLoader = new AllocationFileLoaderService();
-    allocLoader.init(conf);
     ReloadListener confHolder = new ReloadListener();
-    allocLoader.setReloadListener(confHolder);
+    AllocationFileLoaderService allocLoader =
+        new AllocationFileLoaderService(confHolder);
+    allocLoader.init(conf);
     allocLoader.reloadAllocations();
   }
 
@@ -867,5 +871,9 @@ public class TestAllocationFileLoaderService {
     public void onReload(AllocationConfiguration info) {
       allocConf = info;
     }
+
+    @Override
+    public void onCheck() {
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86bc6425/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java
index eb2d402..3674ffb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java
@@ -20,15 +20,22 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 import static org.junit.Assert.*;
 import static org.mockito.Mockito.*;
 
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.Set;
 
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
 import org.apache.hadoop.yarn.util.SystemClock;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
 
+import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
 
 public class TestQueueManager {
@@ -305,4 +312,334 @@ public class TestQueueManager {
     assertEquals("createQueue() returned wrong queue",
         "root.queue1.queue2", q2.getName());
   }
+
+  @Test
+  public void testRemovalOfDynamicLeafQueue() {
+    AllocationConfiguration allocConf = scheduler.getAllocationConfiguration();
+
+    queueManager.updateAllocationConfiguration(allocConf);
+
+    FSQueue q1 = queueManager.getLeafQueue("root.test.childB.dynamic1", true);
+
+    assertNotNull("Queue root.test.childB.dynamic1 was not created", q1);
+    assertEquals("createQueue() returned wrong queue",
+        "root.test.childB.dynamic1", q1.getName());
+    assertTrue("root.test.childB.dynamic1 is not a dynamic queue",
+        q1.isDynamic());
+
+    // an application is submitted to root.test.childB.dynamic1
+    notEmptyQueues.add(q1);
+
+    // root.test.childB.dynamic1 is not empty and should not be removed
+    queueManager.removePendingIncompatibleQueues();
+    queueManager.removeEmptyDynamicQueues();
+    q1 = queueManager.getLeafQueue("root.test.childB.dynamic1", false);
+    assertNotNull("Queue root.test.childB.dynamic1 was deleted", q1);
+
+    // the application finishes, the next removeEmptyDynamicQueues() should
+    // clean root.test.childB.dynamic1 up, but keep its static parent
+    notEmptyQueues.remove(q1);
+
+    queueManager.removePendingIncompatibleQueues();
+    queueManager.removeEmptyDynamicQueues();
+    q1 = queueManager.getLeafQueue("root.test.childB.dynamic1", false);
+    assertNull("Queue root.test.childB.dynamic1 was not deleted", q1);
+    assertNotNull("The static parent of root.test.childB.dynamic1 was deleted",
+        queueManager.getParentQueue("root.test.childB", false));
+  }
+
+  @Test
+  public void testRemovalOfDynamicParentQueue() {
+    AllocationConfiguration allocConf = scheduler.getAllocationConfiguration();
+
+    queueManager.updateAllocationConfiguration(allocConf);
+
+    FSQueue q1 = queueManager.getLeafQueue("root.parent1.dynamic1", true);
+
+    assertNotNull("Queue root.parent1.dynamic1 was not created", q1);
+    assertEquals("createQueue() returned wrong queue",
+        "root.parent1.dynamic1", q1.getName());
+    assertTrue("root.parent1.dynamic1 is not a dynamic queue", q1.isDynamic());
+
+    FSQueue p1 = queueManager.getParentQueue("root.parent1", false);
+    assertNotNull("Queue root.parent1 was not created", p1);
+    assertTrue("root.parent1 is not a dynamic queue", p1.isDynamic());
+
+    queueManager.removePendingIncompatibleQueues();
+    queueManager.removeEmptyDynamicQueues();
+    q1 = queueManager.getLeafQueue("root.parent1.dynamic1", false);
+    p1 = queueManager.getParentQueue("root.parent1", false);
+
+    assertNull("Queue root.parent1.dynamic1 was not deleted", q1);
+    assertNull("Queue root.parent1 was not deleted", p1);
+  }
+
+  @Test
+  public void testNonEmptyDynamicQueueBecomingStaticQueue() {
+    AllocationConfiguration allocConf = scheduler.getAllocationConfiguration();
+
+    queueManager.updateAllocationConfiguration(allocConf);
+
+    FSLeafQueue q1 = queueManager.getLeafQueue("root.leaf1", true);
+
+    assertNotNull("Queue root.leaf1 was not created", q1);
+    assertEquals("createQueue() returned wrong queue",
+        "root.leaf1", q1.getName());
+    assertTrue("root.leaf1 is not a dynamic queue", q1.isDynamic());
+
+    // pretend that we submitted an app to the queue
+    notEmptyQueues.add(q1);
+
+    // non-empty queues should not be deleted
+    queueManager.removePendingIncompatibleQueues();
+    queueManager.removeEmptyDynamicQueues();
+    q1 = queueManager.getLeafQueue("root.leaf1", false);
+    assertNotNull("Queue root.leaf1 was deleted", q1);
+
+    // next we add leaf1 under root in the allocation config
+    allocConf.configuredQueues.get(FSQueueType.LEAF).add("root.leaf1");
+    queueManager.updateAllocationConfiguration(allocConf);
+
+    // updateAllocationConfiguration() should make root.leaf1 a dynamic queue
+    assertFalse("root.leaf1 is not a static queue", q1.isDynamic());
+
+    // application finished now and the queue is empty, but since leaf1 is a
+    // static queue at this point, hence not affected by
+    // removeEmptyDynamicQueues()
+    notEmptyQueues.clear();
+    queueManager.removePendingIncompatibleQueues();
+    queueManager.removeEmptyDynamicQueues();
+    q1 = queueManager.getLeafQueue("root.leaf1", false);
+    assertNotNull("Queue root.leaf1 was deleted", q1);
+    assertFalse("root.leaf1 is not a static queue", q1.isDynamic());
+  }
+
+  @Test
+  public void testNonEmptyStaticQueueBecomingDynamicQueue() {
+    AllocationConfiguration allocConf = scheduler.getAllocationConfiguration();
+    queueManager.updateAllocationConfiguration(allocConf);
+
+    FSLeafQueue q1 = queueManager.getLeafQueue("root.test.childA", false);
+
+    assertNotNull("Queue root.test.childA does not exist", q1);
+    assertEquals("createQueue() returned wrong queue",
+        "root.test.childA", q1.getName());
+    assertFalse("root.test.childA is not a static queue", q1.isDynamic());
+
+    // we submitted an app to the queue
+    notEmptyQueues.add(q1);
+
+    // the next removeEmptyDynamicQueues() call should not modify
+    // root.test.childA
+    queueManager.removePendingIncompatibleQueues();
+    queueManager.removeEmptyDynamicQueues();
+    q1 = queueManager.getLeafQueue("root.test.childA", false);
+    assertNotNull("Queue root.test.childA was deleted", q1);
+    assertFalse("root.test.childA is not a dynamic queue", q1.isDynamic());
+
+    // next we remove all queues from the allocation config,
+    // this causes all queues to change to dynamic
+    for (Set<String> queueNames : allocConf.configuredQueues.values()) {
+      queueManager.setQueuesToDynamic(queueNames);
+      queueNames.clear();
+    }
+    queueManager.updateAllocationConfiguration(allocConf);
+
+    q1 = queueManager.getLeafQueue("root.test.childA", false);
+    assertNotNull("Queue root.test.childA was deleted", q1);
+    assertTrue("root.test.childA is not a dynamic queue", q1.isDynamic());
+
+    // application finished - the queue does not have runnable app
+    // the next removeEmptyDynamicQueues() call should remove the queues
+    notEmptyQueues.remove(q1);
+
+    queueManager.removePendingIncompatibleQueues();
+    queueManager.removeEmptyDynamicQueues();
+
+    q1 = queueManager.getLeafQueue("root.test.childA", false);
+    assertNull("Queue root.test.childA was not deleted", q1);
+
+    FSParentQueue p1 = queueManager.getParentQueue("root.test", false);
+    assertNull("Queue root.test was not deleted", p1);
+  }
+
+  @Test
+  public void testRemovalOfChildlessParentQueue() {
+    AllocationConfiguration allocConf = scheduler.getAllocationConfiguration();
+    queueManager.updateAllocationConfiguration(allocConf);
+
+    FSParentQueue q1 = queueManager.getParentQueue("root.test.childB", false);
+
+    assertNotNull("Queue root.test.childB was not created", q1);
+    assertEquals("createQueue() returned wrong queue",
+        "root.test.childB", q1.getName());
+    assertFalse("root.test.childB is a dynamic queue", q1.isDynamic());
+
+    // static queues should not be deleted
+    queueManager.removePendingIncompatibleQueues();
+    queueManager.removeEmptyDynamicQueues();
+    q1 = queueManager.getParentQueue("root.test.childB", false);
+    assertNotNull("Queue root.test.childB was deleted", q1);
+
+    // next we remove root.test.childB from the allocation config
+    allocConf.configuredQueues.get(FSQueueType.PARENT)
+        .remove("root.test.childB");
+    queueManager.updateAllocationConfiguration(allocConf);
+    queueManager.setQueuesToDynamic(Collections.singleton("root.test.childB"));
+
+    // the next removeEmptyDynamicQueues() call should clean
+    // root.test.childB up
+    queueManager.removePendingIncompatibleQueues();
+    queueManager.removeEmptyDynamicQueues();
+    q1 = queueManager.getParentQueue("root.leaf1", false);
+    assertNull("Queue root.leaf1 was not deleted", q1);
+  }
+
+  @Test
+  public void testQueueTypeChange() {
+    AllocationConfiguration allocConf = scheduler.getAllocationConfiguration();
+    queueManager.updateAllocationConfiguration(allocConf);
+
+    FSQueue q1 = queueManager.getLeafQueue("root.parent1.leaf1", true);
+    assertNotNull("Queue root.parent1.leaf1 was not created", q1);
+    assertEquals("createQueue() returned wrong queue",
+        "root.parent1.leaf1", q1.getName());
+    assertTrue("root.parent1.leaf1 is not a dynamic queue", q1.isDynamic());
+
+    FSQueue p1 = queueManager.getParentQueue("root.parent1", false);
+    assertNotNull("Queue root.parent1 was not created", p1);
+    assertTrue("root.parent1 is not a dynamic queue", p1.isDynamic());
+
+    // adding root.parent1.leaf1 and root.parent1 to the allocation config
+    allocConf.configuredQueues.get(FSQueueType.PARENT).add("root.parent1");
+    allocConf.configuredQueues.get(FSQueueType.LEAF)
+        .add("root.parent1.leaf1");
+
+    // updateAllocationConfiguration() should change both queues over to static
+    queueManager.updateAllocationConfiguration(allocConf);
+    q1 = queueManager.getLeafQueue("root.parent1.leaf1", false);
+    assertFalse("root.parent1.leaf1 is not a static queue", q1.isDynamic());
+    p1 = queueManager.getParentQueue("root.parent1", false);
+    assertFalse("root.parent1 is not a static queue", p1.isDynamic());
+
+    // removing root.parent1.leaf1 and root.parent1 from the allocation
+    // config
+    allocConf.configuredQueues.get(FSQueueType.PARENT).remove("root.parent1");
+    allocConf.configuredQueues.get(FSQueueType.LEAF)
+        .remove("root.parent1.leaf1");
+
+    // updateAllocationConfiguration() should change both queues
+    // to dynamic
+    queueManager.updateAllocationConfiguration(allocConf);
+    queueManager.setQueuesToDynamic(
+        ImmutableSet.of("root.parent1", "root.parent1.leaf1"));
+    q1 = queueManager.getLeafQueue("root.parent1.leaf1", false);
+    assertTrue("root.parent1.leaf1 is not a dynamic queue", q1.isDynamic());
+    p1 = queueManager.getParentQueue("root.parent1", false);
+    assertTrue("root.parent1 is not a dynamic queue", p1.isDynamic());
+  }
+
+  @Test
+  public void testApplicationAssignmentPreventsRemovalOfDynamicQueue()
+      throws Exception {
+    AllocationConfiguration allocConf = scheduler.getAllocationConfiguration();
+    queueManager = new QueueManager(scheduler);
+    queueManager.initialize(conf);
+    queueManager.updateAllocationConfiguration(allocConf);
+
+    FSLeafQueue q = queueManager.getLeafQueue("root.leaf1", true);
+    assertNotNull("root.leaf1 does not exist", q);
+    assertTrue("root.leaf1 is not empty", queueManager.isEmpty(q));
+
+    // assigning an application (without an appAttempt so far) to the queue
+    // removeEmptyDynamicQueues() should not remove the queue
+    ApplicationId applicationId = ApplicationId.newInstance(1L, 0);
+    q.addAssignedApp(applicationId);
+    q = queueManager.getLeafQueue("root.leaf1", false);
+    assertFalse("root.leaf1 is empty", queueManager.isEmpty(q));
+
+    queueManager.removePendingIncompatibleQueues();
+    queueManager.removeEmptyDynamicQueues();
+    q = queueManager.getLeafQueue("root.leaf1", false);
+    assertNotNull("root.leaf1 has been removed", q);
+    assertFalse("root.leaf1 is empty", queueManager.isEmpty(q));
+
+    ApplicationAttemptId applicationAttemptId =
+        ApplicationAttemptId.newInstance(applicationId, 0);
+    ActiveUsersManager activeUsersManager =
+        Mockito.mock(ActiveUsersManager.class);
+    RMContext rmContext = Mockito.mock(RMContext.class);
+
+    // the appAttempt is created
+    // removeEmptyDynamicQueues() should not remove the queue
+    FSAppAttempt appAttempt = new FSAppAttempt(scheduler, applicationAttemptId,
+        "a_user", q, activeUsersManager, rmContext);
+    q.addApp(appAttempt, true);
+    queueManager.removeEmptyDynamicQueues();
+    q = queueManager.getLeafQueue("root.leaf1", false);
+    assertNotNull("root.leaf1 has been removed", q);
+    assertFalse("root.leaf1 is empty", queueManager.isEmpty(q));
+
+    // the appAttempt finished, the queue should be empty
+    q.removeApp(appAttempt);
+    q = queueManager.getLeafQueue("root.leaf1", false);
+    assertTrue("root.leaf1 is not empty", queueManager.isEmpty(q));
+
+    // removeEmptyDynamicQueues() should remove the queue
+    queueManager.removePendingIncompatibleQueues();
+    queueManager.removeEmptyDynamicQueues();
+    q = queueManager.getLeafQueue("root.leaf1", false);
+    assertNull("root.leaf1 has not been removed", q);
+  }
+
+  @Test
+  public void testRemovalOfIncompatibleNonEmptyQueue()
+      throws Exception {
+    AllocationConfiguration allocConf = scheduler.getAllocationConfiguration();
+    allocConf.configuredQueues.get(FSQueueType.LEAF).add("root.a");
+    scheduler.allocConf = allocConf;
+    queueManager.updateAllocationConfiguration(allocConf);
+
+    FSLeafQueue q = queueManager.getLeafQueue("root.a", true);
+    assertNotNull("root.a does not exist", q);
+    assertTrue("root.a is not empty", queueManager.isEmpty(q));
+
+    // we start to run an application on root.a
+    notEmptyQueues.add(q);
+    q = queueManager.getLeafQueue("root.a", false);
+    assertNotNull("root.a does not exist", q);
+    assertFalse("root.a is empty", queueManager.isEmpty(q));
+
+    // root.a should not be removed by removeEmptyDynamicQueues or by
+    // removePendingIncompatibleQueues
+    queueManager.removePendingIncompatibleQueues();
+    queueManager.removeEmptyDynamicQueues();
+    q = queueManager.getLeafQueue("root.a", false);
+    assertNotNull("root.a does not exist", q);
+
+    // let's introduce queue incompatibility
+    allocConf.configuredQueues.get(FSQueueType.LEAF).remove("root.a");
+    allocConf.configuredQueues.get(FSQueueType.PARENT).add("root.a");
+    allocConf.configuredQueues.get(FSQueueType.LEAF).add("root.a.b");
+    queueManager.updateAllocationConfiguration(allocConf);
+
+    // since root.a has running applications, it should be still a leaf queue
+    q = queueManager.getLeafQueue("root.a", false);
+    assertNotNull("root.a has been removed", q);
+    assertFalse("root.a is empty", queueManager.isEmpty(q));
+
+    // removePendingIncompatibleQueues should still keep root.a as a leaf queue
+    queueManager.removePendingIncompatibleQueues();
+    q = queueManager.getLeafQueue("root.a", false);
+    assertNotNull("root.a has been removed", q);
+    assertFalse("root.a is empty", queueManager.isEmpty(q));
+
+    // when the application finishes, root.a should be a parent queue
+    notEmptyQueues.clear();
+    queueManager.removePendingIncompatibleQueues();
+    queueManager.removeEmptyDynamicQueues();
+    FSParentQueue p = queueManager.getParentQueue("root.a", false);
+    assertNotNull("root.a does not exist", p);
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: HDFS-13618. Fix TestDataNodeFaultInjector test failures on Windows. Contributed by Xiao Liang.

Posted by bo...@apache.org.
HDFS-13618. Fix TestDataNodeFaultInjector test failures on Windows. Contributed by Xiao Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e0d4b1c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e0d4b1c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e0d4b1c

Branch: refs/heads/YARN-7402
Commit: 1e0d4b1c283fb98a95c60a1723f594befb3c18a9
Parents: 02322de
Author: Inigo Goiri <in...@apache.org>
Authored: Fri May 25 09:10:32 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Fri May 25 09:14:28 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java     | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e0d4b1c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
index 1507844..4afacd9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
@@ -118,7 +118,7 @@ public class TestDataNodeFaultInjector {
       final MetricsDataNodeFaultInjector mdnFaultInjector) throws Exception {
 
     final Path baseDir = new Path(
-        PathUtils.getTestDir(getClass()).getAbsolutePath(),
+        PathUtils.getTestDir(getClass()).getPath(),
         GenericTestUtils.getMethodName());
     final DataNodeFaultInjector oldDnInjector = DataNodeFaultInjector.get();
     DataNodeFaultInjector.set(mdnFaultInjector);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: HDDS-45. Removal of old OzoneRestClient. Contributed by Lokesh Jain.

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java
index 5b67657..a9b8175 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java
@@ -23,23 +23,31 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
 import static org.apache.hadoop.ozone.OzoneConsts.CHUNK_SIZE;
 import static org.junit.Assert.*;
 
+import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang.RandomStringUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.web.client.OzoneRestClient;
+import org.apache.hadoop.hdds.client.OzoneQuota;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.ozone.client.VolumeArgs;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.rpc.RpcClient;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.web.client.OzoneBucket;
-import org.apache.hadoop.ozone.web.client.OzoneVolume;
-import org.apache.hadoop.ozone.web.request.OzoneQuota;
 import org.junit.rules.Timeout;
 
+import java.io.IOException;
+import java.io.InputStream;
+
 /**
  * End-to-end testing of Ozone REST operations.
  */
@@ -52,7 +60,9 @@ public class TestOzoneRestWithMiniCluster {
 
   private static MiniOzoneCluster cluster;
   private static OzoneConfiguration conf;
-  private static OzoneRestClient ozoneClient;
+  private static ClientProtocol client;
+  private static ReplicationFactor replicationFactor = ReplicationFactor.ONE;
+  private static ReplicationType replicationType = ReplicationType.STAND_ALONE;
 
   @Rule
   public ExpectedException exception = ExpectedException.none();
@@ -62,180 +72,125 @@ public class TestOzoneRestWithMiniCluster {
     conf = new OzoneConfiguration();
     cluster = MiniOzoneCluster.newBuilder(conf).build();
     cluster.waitForClusterToBeReady();
-    int port = cluster.getHddsDatanodes().get(0)
-        .getDatanodeDetails().getOzoneRestPort();
-    ozoneClient = new OzoneRestClient(
-        String.format("http://localhost:%d", port));
-    ozoneClient.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);
+    client = new RpcClient(conf);
   }
 
   @AfterClass
-  public static void shutdown() throws InterruptedException {
+  public static void shutdown() throws InterruptedException, IOException {
     if (cluster != null) {
       cluster.shutdown();
     }
-    IOUtils.cleanupWithLogger(null, ozoneClient);
+    client.close();
   }
 
   @Test
   public void testCreateAndGetVolume() throws Exception {
-    String volumeName = nextId("volume");
-    OzoneVolume volume = ozoneClient.createVolume(volumeName, "bilbo", "100TB");
-    assertNotNull(volume);
-    assertEquals(volumeName, volume.getVolumeName());
-    assertEquals(ozoneClient.getUserAuth(), volume.getCreatedby());
-    assertEquals("bilbo", volume.getOwnerName());
-    assertNotNull(volume.getQuota());
-    assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(),
-        volume.getQuota().sizeInBytes());
-    volume = ozoneClient.getVolume(volumeName);
-    assertNotNull(volume);
-    assertEquals(volumeName, volume.getVolumeName());
-    assertEquals(ozoneClient.getUserAuth(), volume.getCreatedby());
-    assertEquals("bilbo", volume.getOwnerName());
-    assertNotNull(volume.getQuota());
-    assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(),
-        volume.getQuota().sizeInBytes());
+    createAndGetVolume();
   }
 
   @Test
   public void testCreateAndGetBucket() throws Exception {
-    String volumeName = nextId("volume");
-    String bucketName = nextId("bucket");
-    OzoneVolume volume = ozoneClient.createVolume(volumeName, "bilbo", "100TB");
-    assertNotNull(volume);
-    assertEquals(volumeName, volume.getVolumeName());
-    assertEquals(ozoneClient.getUserAuth(), volume.getCreatedby());
-    assertEquals("bilbo", volume.getOwnerName());
-    assertNotNull(volume.getQuota());
-    assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(),
-        volume.getQuota().sizeInBytes());
-    OzoneBucket bucket = volume.createBucket(bucketName);
-    assertNotNull(bucket);
-    assertEquals(bucketName, bucket.getBucketName());
-    bucket = volume.getBucket(bucketName);
-    assertNotNull(bucket);
-    assertEquals(bucketName, bucket.getBucketName());
+    OzoneVolume volume = createAndGetVolume();
+    createAndGetBucket(volume);
   }
 
   @Test
   public void testPutAndGetKey() throws Exception {
-    String volumeName = nextId("volume");
-    String bucketName = nextId("bucket");
     String keyName = nextId("key");
     String keyData = nextId("data");
-    OzoneVolume volume = ozoneClient.createVolume(volumeName, "bilbo", "100TB");
-    assertNotNull(volume);
-    assertEquals(volumeName, volume.getVolumeName());
-    assertEquals(ozoneClient.getUserAuth(), volume.getCreatedby());
-    assertEquals("bilbo", volume.getOwnerName());
-    assertNotNull(volume.getQuota());
-    assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(),
-        volume.getQuota().sizeInBytes());
-    OzoneBucket bucket = volume.createBucket(bucketName);
-    assertNotNull(bucket);
-    assertEquals(bucketName, bucket.getBucketName());
-    bucket.putKey(keyName, keyData);
-    assertEquals(keyData, bucket.getKey(keyName));
+    OzoneVolume volume = createAndGetVolume();
+    OzoneBucket bucket = createAndGetBucket(volume);
+    putKey(bucket, keyName, keyData);
+  }
+
+  private void putKey(OzoneBucket bucket, String keyName, String keyData) throws IOException {
+    try (
+        OzoneOutputStream ozoneOutputStream = bucket
+            .createKey(keyName, 0, replicationType, replicationFactor);
+        InputStream inputStream = IOUtils.toInputStream(keyData, UTF_8)) {
+      IOUtils.copy(inputStream, ozoneOutputStream);
+    }
+    try (
+        InputStream inputStream = IOUtils.toInputStream(keyData, UTF_8);
+        OzoneInputStream ozoneInputStream = bucket.readKey(keyName)) {
+      IOUtils.contentEquals(ozoneInputStream, inputStream);
+    }
   }
 
   @Test
   public void testPutAndGetEmptyKey() throws Exception {
-    String volumeName = nextId("volume");
-    String bucketName = nextId("bucket");
     String keyName = nextId("key");
     String keyData = "";
-    OzoneVolume volume = ozoneClient.createVolume(volumeName, "bilbo", "100TB");
-    assertNotNull(volume);
-    assertEquals(volumeName, volume.getVolumeName());
-    assertEquals(ozoneClient.getUserAuth(), volume.getCreatedby());
-    assertEquals("bilbo", volume.getOwnerName());
-    assertNotNull(volume.getQuota());
-    assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(),
-        volume.getQuota().sizeInBytes());
-    OzoneBucket bucket = volume.createBucket(bucketName);
-    assertNotNull(bucket);
-    assertEquals(bucketName, bucket.getBucketName());
-    bucket.putKey(keyName, keyData);
-    assertEquals(keyData, bucket.getKey(keyName));
+    OzoneVolume volume = createAndGetVolume();
+    OzoneBucket bucket = createAndGetBucket(volume);
+    putKey(bucket, keyName, keyData);
   }
 
   @Test
   public void testPutAndGetMultiChunkKey() throws Exception {
-    String volumeName = nextId("volume");
-    String bucketName = nextId("bucket");
     String keyName = nextId("key");
     int keyDataLen = 3 * CHUNK_SIZE;
     String keyData = buildKeyData(keyDataLen);
-    OzoneVolume volume = ozoneClient.createVolume(volumeName, "bilbo", "100TB");
-    assertNotNull(volume);
-    assertEquals(volumeName, volume.getVolumeName());
-    assertEquals(ozoneClient.getUserAuth(), volume.getCreatedby());
-    assertEquals("bilbo", volume.getOwnerName());
-    assertNotNull(volume.getQuota());
-    assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(),
-        volume.getQuota().sizeInBytes());
-    OzoneBucket bucket = volume.createBucket(bucketName);
-    assertNotNull(bucket);
-    assertEquals(bucketName, bucket.getBucketName());
-    bucket.putKey(keyName, keyData);
-    assertEquals(keyData, bucket.getKey(keyName));
+    OzoneVolume volume = createAndGetVolume();
+    OzoneBucket bucket = createAndGetBucket(volume);
+    putKey(bucket, keyName, keyData);
   }
 
   @Test
   public void testPutAndGetMultiChunkKeyLastChunkPartial() throws Exception {
-    String volumeName = nextId("volume");
-    String bucketName = nextId("bucket");
     String keyName = nextId("key");
     int keyDataLen = (int)(2.5 * CHUNK_SIZE);
     String keyData = buildKeyData(keyDataLen);
-    OzoneVolume volume = ozoneClient.createVolume(volumeName, "bilbo", "100TB");
-    assertNotNull(volume);
-    assertEquals(volumeName, volume.getVolumeName());
-    assertEquals(ozoneClient.getUserAuth(), volume.getCreatedby());
-    assertEquals("bilbo", volume.getOwnerName());
-    assertNotNull(volume.getQuota());
-    assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(),
-        volume.getQuota().sizeInBytes());
-    OzoneBucket bucket = volume.createBucket(bucketName);
-    assertNotNull(bucket);
-    assertEquals(bucketName, bucket.getBucketName());
-    bucket.putKey(keyName, keyData);
-    assertEquals(keyData, bucket.getKey(keyName));
+    OzoneVolume volume = createAndGetVolume();
+    OzoneBucket bucket = createAndGetBucket(volume);
+    putKey(bucket, keyName, keyData);
   }
 
   @Test
   public void testReplaceKey() throws Exception {
-    String volumeName = nextId("volume");
-    String bucketName = nextId("bucket");
     String keyName = nextId("key");
     int keyDataLen = (int)(2.5 * CHUNK_SIZE);
     String keyData = buildKeyData(keyDataLen);
-    OzoneVolume volume = ozoneClient.createVolume(volumeName, "bilbo", "100TB");
-    assertNotNull(volume);
-    assertEquals(volumeName, volume.getVolumeName());
-    assertEquals(ozoneClient.getUserAuth(), volume.getCreatedby());
-    assertEquals("bilbo", volume.getOwnerName());
-    assertNotNull(volume.getQuota());
-    assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(),
-        volume.getQuota().sizeInBytes());
-    OzoneBucket bucket = volume.createBucket(bucketName);
-    assertNotNull(bucket);
-    assertEquals(bucketName, bucket.getBucketName());
-    bucket.putKey(keyName, keyData);
-    assertEquals(keyData, bucket.getKey(keyName));
+    OzoneVolume volume = createAndGetVolume();
+    OzoneBucket bucket = createAndGetBucket(volume);
+    putKey(bucket, keyName, keyData);
 
     // Replace key with data consisting of fewer chunks.
     keyDataLen = (int)(1.5 * CHUNK_SIZE);
     keyData = buildKeyData(keyDataLen);
-    bucket.putKey(keyName, keyData);
-    assertEquals(keyData, bucket.getKey(keyName));
+    putKey(bucket, keyName, keyData);
 
     // Replace key with data consisting of more chunks.
     keyDataLen = (int)(3.5 * CHUNK_SIZE);
     keyData = buildKeyData(keyDataLen);
-    bucket.putKey(keyName, keyData);
-    assertEquals(keyData, bucket.getKey(keyName));
+    putKey(bucket, keyName, keyData);
+  }
+
+  private OzoneVolume createAndGetVolume() throws IOException {
+    String volumeName = nextId("volume");
+    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
+        .setOwner("bilbo")
+        .setQuota("100TB")
+        .setAdmin("hdfs")
+        .build();
+    client.createVolume(volumeName, volumeArgs);
+    OzoneVolume volume = client.getVolumeDetails(volumeName);
+    assertEquals(volumeName, volume.getName());
+    assertNotNull(volume);
+    assertEquals("bilbo", volume.getOwner());
+    assertNotNull(volume.getQuota());
+    assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(),
+        volume.getQuota());
+    return volume;
+  }
+
+  private OzoneBucket createAndGetBucket(OzoneVolume vol) throws IOException {
+    String bucketName = nextId("bucket");
+    vol.createBucket(bucketName);
+    OzoneBucket bucket = vol.getBucket(bucketName);
+    assertNotNull(bucket);
+    assertEquals(bucketName, bucket.getName());
+    return bucket;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java
index 64e5f71..684f4d3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java
@@ -17,12 +17,17 @@
  */
 package org.apache.hadoop.ozone.web.client;
 
-import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.client.OzoneClientException;
+import org.apache.hadoop.ozone.client.BucketArgs;
+import org.apache.hadoop.ozone.client.VolumeArgs;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
+import org.apache.hadoop.ozone.client.rpc.RpcClient;
 import org.apache.hadoop.ozone.web.request.OzoneQuota;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -36,7 +41,10 @@ import org.junit.rules.Timeout;
 import java.io.IOException;
 import java.net.URISyntaxException;
 import java.text.ParseException;
+import java.util.Arrays;
+import java.util.Iterator;
 import java.util.List;
+import java.util.stream.Collectors;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
@@ -54,7 +62,7 @@ public class TestBuckets {
   public Timeout testTimeout = new Timeout(300000);
 
   private static MiniOzoneCluster cluster = null;
-  private static OzoneRestClient ozoneRestClient = null;
+  private static ClientProtocol client = null;
 
   /**
    * Create a MiniDFSCluster for testing.
@@ -76,11 +84,10 @@ public class TestBuckets {
         OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
 
     conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    final int port = cluster.getHddsDatanodes().get(0).getDatanodeDetails()
-        .getOzoneRestPort();
-    ozoneRestClient = new OzoneRestClient(
-        String.format("http://localhost:%d", port));
+    cluster = MiniOzoneCluster.newBuilder(conf)
+        .setNumDatanodes(3)
+        .build();
+    client = new RpcClient(conf);
   }
 
   /**
@@ -95,110 +102,151 @@ public class TestBuckets {
 
   @Test
   public void testCreateBucket() throws Exception {
-    runTestCreateBucket(ozoneRestClient);
+    runTestCreateBucket(client);
   }
 
-  static void runTestCreateBucket(OzoneRestClient client)
+  static void runTestCreateBucket(ClientProtocol client)
       throws OzoneException, IOException, ParseException {
     String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    client.setUserAuth("hdfs");
-    OzoneVolume vol = client.createVolume(volumeName, "bilbo", "100TB");
+    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
+        .setOwner("bilbo")
+        .setQuota("100TB")
+        .setAdmin("hdfs")
+        .build();
+    client.createVolume(volumeName, volumeArgs);
+    OzoneVolume vol = client.getVolumeDetails(volumeName);
     String[] acls = {"user:frodo:rw", "user:samwise:rw"};
 
     // create 10 buckets under same volume
     for (int x = 0; x < 10; x++) {
       long currentTime = Time.now();
       String bucketName = OzoneUtils.getRequestID().toLowerCase();
-      OzoneBucket bucket =
-          vol.createBucket(bucketName, acls, StorageType.DEFAULT);
-      assertEquals(bucket.getBucketName(), bucketName);
+
+      List<OzoneAcl> aclList =
+          Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl))
+              .collect(Collectors.toList());
+      BucketArgs bucketArgs = BucketArgs.newBuilder()
+          .setAcls(aclList)
+          .build();
+      vol.createBucket(bucketName, bucketArgs);
+      OzoneBucket bucket = vol.getBucket(bucketName);
+      assertEquals(bucket.getName(), bucketName);
 
       // verify the bucket creation time
-      assertTrue((OzoneUtils.formatDate(bucket.getCreatedOn())
-          / 1000) >= (currentTime / 1000));
+      assertTrue((bucket.getCreationTime() / 1000) >= (currentTime / 1000));
     }
     client.close();
 
-    assertEquals(vol.getVolumeName(), volumeName);
-    assertEquals(vol.getCreatedby(), "hdfs");
-    assertEquals(vol.getOwnerName(), "bilbo");
-    assertEquals(vol.getQuota().getUnit(), OzoneQuota.Units.TB);
-    assertEquals(vol.getQuota().getSize(), 100);
+    assertEquals(vol.getName(), volumeName);
+    assertEquals(vol.getAdmin(), "hdfs");
+    assertEquals(vol.getOwner(), "bilbo");
+    assertEquals(vol.getQuota(), OzoneQuota.parseQuota("100TB").sizeInBytes());
 
     // Test create a bucket with invalid bucket name,
     // not use Rule here because the test method is static.
     try {
       String invalidBucketName = "#" + OzoneUtils.getRequestID().toLowerCase();
-      vol.createBucket(invalidBucketName, acls, StorageType.DEFAULT);
+      vol.createBucket(invalidBucketName);
       fail("Except the bucket creation to be failed because the"
           + " bucket name starts with an invalid char #");
     } catch (Exception e) {
-      assertTrue(e instanceof OzoneClientException);
-      assertTrue(e.getMessage().contains("Bucket or Volume name"
-          + " has an unsupported character : #"));
+      assertTrue(e.getMessage()
+          .contains("Bucket or Volume name has an unsupported character : #"));
     }
   }
 
   @Test
   public void testAddBucketAcls() throws Exception {
-    runTestAddBucketAcls(ozoneRestClient);
+    runTestAddBucketAcls(client);
   }
 
-  static void runTestAddBucketAcls(OzoneRestClient client)
+  static void runTestAddBucketAcls(ClientProtocol client)
       throws OzoneException, IOException, ParseException {
     String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    client.setUserAuth("hdfs");
-    OzoneVolume vol = client.createVolume(volumeName, "bilbo", "100TB");
+    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
+        .setOwner("bilbo")
+        .setQuota("100TB")
+        .setAdmin("hdfs")
+        .build();
+    client.createVolume(volumeName, volumeArgs);
+    OzoneVolume vol = client.getVolumeDetails(volumeName);
     String[] acls = {"user:frodo:rw", "user:samwise:rw"};
     String bucketName = OzoneUtils.getRequestID().toLowerCase();
     vol.createBucket(bucketName);
-    vol.addAcls(bucketName, acls);
+    OzoneBucket bucket = vol.getBucket(bucketName);
+    List<OzoneAcl> aclList =
+        Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl))
+            .collect(Collectors.toList());
+    int numAcls = bucket.getAcls().size();
+    bucket.addAcls(aclList);
     OzoneBucket updatedBucket = vol.getBucket(bucketName);
-    assertEquals(updatedBucket.getAcls().size(), 2);
+    assertEquals(updatedBucket.getAcls().size(), 2 + numAcls);
     // verify if the creation time is missing after update operation
     assertTrue(
-        (OzoneUtils.formatDate(updatedBucket.getCreatedOn()) / 1000) >= 0);
+        (updatedBucket.getCreationTime()) / 1000 >= 0);
     client.close();
   }
 
   @Test
   public void testRemoveBucketAcls() throws Exception {
-    runTestRemoveBucketAcls(ozoneRestClient);
+    runTestRemoveBucketAcls(client);
   }
 
-  static void runTestRemoveBucketAcls(OzoneRestClient client)
+  static void runTestRemoveBucketAcls(ClientProtocol client)
       throws OzoneException, IOException, ParseException {
     String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    client.setUserAuth("hdfs");
-    OzoneVolume vol = client.createVolume(volumeName, "bilbo", "100TB");
+    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
+        .setOwner("bilbo")
+        .setQuota("100TB")
+        .setAdmin("hdfs")
+        .build();
+    client.createVolume(volumeName, volumeArgs);
+    OzoneVolume vol = client.getVolumeDetails(volumeName);
     String[] acls = {"user:frodo:rw", "user:samwise:rw"};
     String bucketName = OzoneUtils.getRequestID().toLowerCase();
-    OzoneBucket bucket = vol.createBucket(bucketName, acls);
-    assertEquals(bucket.getAcls().size(), 2);
-    vol.removeAcls(bucketName, acls);
+    List<OzoneAcl> aclList =
+        Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl))
+            .collect(Collectors.toList());
+    vol.createBucket(bucketName);
+    OzoneBucket bucket = vol.getBucket(bucketName);
+    int numAcls = bucket.getAcls().size();
+    bucket.addAcls(aclList);
+    assertEquals(bucket.getAcls().size(), 2 + numAcls);
+    bucket.removeAcls(aclList);
     OzoneBucket updatedBucket = vol.getBucket(bucketName);
 
     // We removed all acls
-    assertEquals(updatedBucket.getAcls().size(), 0);
+    assertEquals(updatedBucket.getAcls().size(), numAcls);
     // verify if the creation time is missing after update operation
     assertTrue(
-        (OzoneUtils.formatDate(updatedBucket.getCreatedOn()) / 1000) >= 0);
+        (updatedBucket.getCreationTime() / 1000) >= 0);
     client.close();
   }
 
   @Test
   public void testDeleteBucket() throws OzoneException, IOException {
-    runTestDeleteBucket(ozoneRestClient);
+    runTestDeleteBucket(client);
   }
 
-  static void runTestDeleteBucket(OzoneRestClient client)
+  static void runTestDeleteBucket(ClientProtocol client)
       throws OzoneException, IOException {
     String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    client.setUserAuth("hdfs");
-    OzoneVolume vol = client.createVolume(volumeName, "bilbo", "100TB");
+    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
+        .setOwner("bilbo")
+        .setQuota("100TB")
+        .setAdmin("hdfs")
+        .build();
+    client.createVolume(volumeName, volumeArgs);
+    OzoneVolume vol = client.getVolumeDetails(volumeName);
     String[] acls = {"user:frodo:rw", "user:samwise:rw"};
     String bucketName = OzoneUtils.getRequestID().toLowerCase();
-    vol.createBucket(bucketName, acls);
+    List<OzoneAcl> aclList =
+        Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl))
+            .collect(Collectors.toList());
+    BucketArgs bucketArgs = BucketArgs.newBuilder()
+        .setAcls(aclList)
+        .build();
+    vol.createBucket(bucketName, bucketArgs);
     vol.deleteBucket(bucketName);
     try {
       OzoneBucket updatedBucket = vol.getBucket(bucketName);
@@ -212,38 +260,57 @@ public class TestBuckets {
 
   @Test
   public void testListBucket() throws Exception {
-    runTestListBucket(ozoneRestClient);
+    runTestListBucket(client);
   }
 
-  static void runTestListBucket(OzoneRestClient client)
+  static void runTestListBucket(ClientProtocol client)
       throws OzoneException, IOException, ParseException {
     String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    client.setUserAuth("hdfs");
-    OzoneVolume vol = client.createVolume(volumeName, "bilbo", "100TB");
+    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
+        .setOwner("bilbo")
+        .setQuota("100TB")
+        .setAdmin("hdfs")
+        .build();
+    client.createVolume(volumeName, volumeArgs);
+    OzoneVolume vol = client.getVolumeDetails(volumeName);
     String[] acls = {"user:frodo:rw", "user:samwise:rw"};
+    List<OzoneAcl> aclList =
+        Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl))
+            .collect(Collectors.toList());
 
     long currentTime = Time.now();
     for (int x = 0; x < 10; x++) {
       String bucketName = "listbucket-test-" + x;
-      vol.createBucket(bucketName, acls);
+      BucketArgs bucketArgs = BucketArgs.newBuilder()
+          .setAcls(aclList)
+          .build();
+      vol.createBucket(bucketName, bucketArgs);
     }
-    List<OzoneBucket> bucketList = vol.listBuckets("100", null, null);
-    assertEquals(bucketList.size(), 10);
+    Iterator<OzoneBucket> bucketIterator = vol.listBuckets(null);
+    int count = 0;
 
-    for (OzoneBucket bucket : bucketList) {
-      assertTrue((OzoneUtils.formatDate(bucket.getCreatedOn())
+    while (bucketIterator.hasNext()) {
+      assertTrue((bucketIterator.next().getCreationTime()
           / 1000) >= (currentTime / 1000));
+      count++;
     }
+    assertEquals(count, 10);
 
-    bucketList = vol.listBuckets("3", null, null);
-    assertEquals(bucketList.size(), 3);
-
-    bucketList = vol.listBuckets("100", "listbucket-test-4", null);
-    assertEquals(bucketList.size(), 5);
+    bucketIterator = vol.listBuckets(null, "listbucket-test-4");
+    assertEquals(getSize(bucketIterator), 5);
 
-    bucketList = vol.listBuckets("100", null, "listbucket-test-3");
-    assertEquals(bucketList.size(), 1);
+    bucketIterator = vol.listBuckets(null, "listbucket-test-3");
+    assertEquals(getSize(bucketIterator), 6);
 
     client.close();
   }
+
+  private static int getSize(Iterator<OzoneBucket> bucketIterator) {
+    int count = 0;
+    while (bucketIterator.hasNext()) {
+      count++;
+      bucketIterator.next();
+    }
+    return count;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java
index b913a86..9f80184 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.ozone.web.client;
 
 import org.apache.hadoop.ozone.RatisTestHelper;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -35,12 +36,12 @@ public class TestBucketsRatis {
   public Timeout testTimeout = new Timeout(300000);
 
   private static RatisTestHelper.RatisTestSuite suite;
-  private static OzoneRestClient ozoneRestClient;
+  private static ClientProtocol client;
 
   @BeforeClass
   public static void init() throws Exception {
     suite = new RatisTestHelper.RatisTestSuite(TestBucketsRatis.class);
-    ozoneRestClient = suite.newOzoneRestClient();
+    client = suite.newOzoneClient();
   }
 
   @AfterClass
@@ -52,25 +53,25 @@ public class TestBucketsRatis {
 
   @Test
   public void testCreateBucket() throws Exception {
-    TestBuckets.runTestCreateBucket(ozoneRestClient);
+    TestBuckets.runTestCreateBucket(client);
   }
 
   @Test
   public void testAddBucketAcls() throws Exception {
-    TestBuckets.runTestAddBucketAcls(ozoneRestClient);
+    TestBuckets.runTestAddBucketAcls(client);
   }
 
   @Test
   public void testRemoveBucketAcls() throws Exception {
-    TestBuckets.runTestRemoveBucketAcls(ozoneRestClient);
+    TestBuckets.runTestRemoveBucketAcls(client);
   }
 
   @Test
   public void testDeleteBucket() throws OzoneException, IOException {
-    TestBuckets.runTestDeleteBucket(ozoneRestClient);
+    TestBuckets.runTestDeleteBucket(client);
   }
   @Test
   public void testListBucket() throws Exception {
-    TestBuckets.runTestListBucket(ozoneRestClient);
+    TestBuckets.runTestListBucket(client);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
index 57d4287..f8df7dc 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
@@ -20,16 +20,29 @@ package org.apache.hadoop.ozone.web.client;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.commons.collections.IteratorUtils;
 import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.commons.lang.math.RandomUtils;
 import org.apache.commons.lang3.tuple.ImmutablePair;
 import org.apache.commons.lang3.tuple.Pair;
-import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.ozone.client.BucketArgs;
+import org.apache.hadoop.ozone.client.VolumeArgs;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneKey;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
+import org.apache.hadoop.ozone.client.rpc.RpcClient;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.helpers.KeyData;
@@ -59,15 +72,18 @@ import org.junit.rules.Timeout;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
+import java.io.InputStream;
 import java.io.IOException;
 import java.net.URISyntaxException;
 import java.nio.file.Path;
 import java.nio.file.Paths;
+import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.Random;
 import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
@@ -84,10 +100,13 @@ public class TestKeys {
   @Rule
   public Timeout testTimeout = new Timeout(300000);
 
+  private static OzoneConfiguration conf;
   private static MiniOzoneCluster ozoneCluster = null;
   private static String path;
-  private static OzoneRestClient ozoneRestClient = null;
+  private static ClientProtocol client = null;
   private static long currentTime;
+  private static ReplicationFactor replicationFactor = ReplicationFactor.ONE;
+  private static ReplicationType replicationType = ReplicationType.STAND_ALONE;
 
   /**
    * Create a MiniDFSCluster for testing.
@@ -96,7 +115,7 @@ public class TestKeys {
    */
   @BeforeClass
   public static void init() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
+    conf = new OzoneConfiguration();
 
     // Set short block deleting service interval to speed up deletions.
     conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
@@ -105,12 +124,9 @@ public class TestKeys {
     path = GenericTestUtils.getTempPath(TestKeys.class.getSimpleName());
     Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
 
-    ozoneCluster = MiniOzoneCluster.newBuilder(conf).build();
+    ozoneCluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
     ozoneCluster.waitForClusterToBeReady();
-    final int port = ozoneCluster.getHddsDatanodes().get(0)
-        .getDatanodeDetails().getOzoneRestPort();
-    ozoneRestClient = new OzoneRestClient(
-        String.format("http://localhost:%d", port));
+    client = new RpcClient(conf);
     currentTime = Time.now();
   }
 
@@ -166,7 +182,7 @@ public class TestKeys {
   }
 
   static class PutHelper {
-    private final OzoneRestClient client;
+    private final ClientProtocol client;
     private final String dir;
     private final String keyName;
 
@@ -174,11 +190,11 @@ public class TestKeys {
     private OzoneBucket bucket;
     private File file;
 
-    PutHelper(OzoneRestClient client, String dir) {
+    PutHelper(ClientProtocol client, String dir) {
       this(client, dir, OzoneUtils.getRequestID().toLowerCase());
     }
 
-    PutHelper(OzoneRestClient client, String dir, String key) {
+    PutHelper(ClientProtocol client, String dir, String key) {
       this.client = client;
       this.dir = dir;
       this.keyName = key;
@@ -202,111 +218,139 @@ public class TestKeys {
      * @return Returns the name of the new key that was created.
      * @throws OzoneException
      */
-    private KsmKeyArgs putKey() throws Exception {
+    private String putKey() throws Exception {
       String volumeName = OzoneUtils.getRequestID().toLowerCase();
-      client.setUserAuth("hdfs");
 
-      vol = client.createVolume(volumeName, "bilbo", "100TB");
+      VolumeArgs volumeArgs = VolumeArgs.newBuilder()
+          .setOwner("bilbo")
+          .setQuota("100TB")
+          .setAdmin("hdfs")
+          .build();
+      client.createVolume(volumeName, volumeArgs);
+      vol = client.getVolumeDetails(volumeName);
       String[] acls = {"user:frodo:rw", "user:samwise:rw"};
 
       String bucketName = OzoneUtils.getRequestID().toLowerCase();
-      bucket = vol.createBucket(bucketName, acls, StorageType.DEFAULT);
+      List<OzoneAcl> aclList =
+          Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl))
+              .collect(Collectors.toList());
+      BucketArgs bucketArgs = BucketArgs.newBuilder()
+          .setAcls(aclList)
+          .build();
+      vol.createBucket(bucketName, bucketArgs);
+      bucket = vol.getBucket(bucketName);
 
       String fileName = OzoneUtils.getRequestID().toLowerCase();
 
       file = createRandomDataFile(dir, fileName, 1024);
 
-      bucket.putKey(keyName, file);
-      return new KsmKeyArgs.Builder()
-          .setKeyName(keyName)
-          .setVolumeName(volumeName)
-          .setBucketName(bucketName)
-          .setDataSize(1024)
-          .build();
+      try (
+          OzoneOutputStream ozoneOutputStream = bucket
+              .createKey(keyName, 0, replicationType, replicationFactor);
+          InputStream fileInputStream = new FileInputStream(file)) {
+        IOUtils.copy(fileInputStream, ozoneOutputStream);
+      }
+      return keyName;
     }
   }
 
   @Test
   public void testPutKey() throws Exception {
     // Test non-delimited keys
-    runTestPutKey(new PutHelper(ozoneRestClient, path));
+    runTestPutKey(new PutHelper(client, path));
     // Test key delimited by a random delimiter
     String delimiter = RandomStringUtils.randomAscii(1);
-    runTestPutKey(new PutHelper(ozoneRestClient, path,
+    runTestPutKey(new PutHelper(client, path,
         getMultiPartKey(delimiter)));
   }
 
   static void runTestPutKey(PutHelper helper) throws Exception {
-    final OzoneRestClient client = helper.client;
+    final ClientProtocol client = helper.client;
     helper.putKey();
     assertNotNull(helper.getBucket());
     assertNotNull(helper.getFile());
-    List<OzoneKey> keyList = helper.getBucket().listKeys("100", null, null);
+    List<OzoneKey> keyList = client
+        .listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
+            null, 10);
     Assert.assertEquals(1, keyList.size());
 
     // test list key using a more efficient call
     String newkeyName = OzoneUtils.getRequestID().toLowerCase();
-    client.putKey(helper.getVol().getVolumeName(),
-        helper.getBucket().getBucketName(), newkeyName, helper.getFile());
-    keyList = helper.getBucket().listKeys("100", null, null);
+    OzoneOutputStream ozoneOutputStream = client
+        .createKey(helper.getVol().getName(), helper.getBucket().getName(),
+            newkeyName, 0, replicationType, replicationFactor);
+    ozoneOutputStream.close();
+    keyList = client
+        .listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
+            null, 10);
     Assert.assertEquals(2, keyList.size());
 
     // test new put key with invalid volume/bucket name
     try {
-      client.putKey("invalid-volume",
-          helper.getBucket().getBucketName(), newkeyName, helper.getFile());
+      ozoneOutputStream = client
+          .createKey("invalid-volume", helper.getBucket().getName(), newkeyName,
+              0, replicationType, replicationFactor);
+      ozoneOutputStream.close();
       fail("Put key should have thrown"
           + " when using invalid volume name.");
-    } catch (OzoneException e) {
+    } catch (IOException e) {
       GenericTestUtils.assertExceptionContains(
           Status.VOLUME_NOT_FOUND.toString(), e);
     }
 
     try {
-      client.putKey(helper.getVol().getVolumeName(), "invalid-bucket",
-          newkeyName, helper.getFile());
+      ozoneOutputStream = client
+          .createKey(helper.getVol().getName(), "invalid-bucket", newkeyName, 0,
+              replicationType, replicationFactor);
+      ozoneOutputStream.close();
       fail("Put key should have thrown "
           + "when using invalid bucket name.");
-    } catch (OzoneException e) {
+    } catch (IOException e) {
       GenericTestUtils.assertExceptionContains(
           Status.BUCKET_NOT_FOUND.toString(), e);
     }
   }
 
-  private static void restartDatanode(
-      MiniOzoneCluster cluster, int datanodeIdx, OzoneRestClient client)
+  private static void restartDatanode(MiniOzoneCluster cluster, int datanodeIdx)
       throws OzoneException, URISyntaxException {
     cluster.restartHddsDatanode(datanodeIdx);
-    // refresh the datanode endpoint uri after datanode restart
-    final int port = ozoneCluster.getHddsDatanodes().get(0)
-        .getDatanodeDetails().getOzoneRestPort();
-    client.setEndPoint(String.format("http://localhost:%d", port));
   }
 
   @Test
   public void testPutAndGetKeyWithDnRestart() throws Exception {
     runTestPutAndGetKeyWithDnRestart(
-        new PutHelper(ozoneRestClient, path), ozoneCluster);
+        new PutHelper(client, path), ozoneCluster);
     String delimiter = RandomStringUtils.randomAscii(1);
     runTestPutAndGetKeyWithDnRestart(
-        new PutHelper(ozoneRestClient, path,
+        new PutHelper(client, path,
             getMultiPartKey(delimiter)), ozoneCluster);
   }
 
   static void runTestPutAndGetKeyWithDnRestart(
       PutHelper helper, MiniOzoneCluster cluster) throws Exception {
-    String keyName = helper.putKey().getKeyName();
+    String keyName = helper.putKey();
     assertNotNull(helper.getBucket());
     assertNotNull(helper.getFile());
 
     // restart the datanode
-    restartDatanode(cluster, 0, helper.client);
+    restartDatanode(cluster, 0);
+    // TODO: Try removing sleep and adding a join for the MiniOzoneCluster start
+    // The ozoneContainer is not started and its metrics are not initialized
+    // which leads to NullPointerException in Dispatcher.
+    Thread.sleep(1000);
+    ozoneCluster.waitForClusterToBeReady();
     // verify getKey after the datanode restart
     String newFileName = helper.dir + "/"
         + OzoneUtils.getRequestID().toLowerCase();
     Path newPath = Paths.get(newFileName);
-
-    helper.getBucket().getKey(keyName, newPath);
+    try (
+        FileOutputStream newOutputStream = new FileOutputStream(
+            newPath.toString());
+        OzoneInputStream ozoneInputStream = helper.client
+            .getKey(helper.getVol().getName(), helper.getBucket().getName(),
+                keyName)) {
+      IOUtils.copy(ozoneInputStream, newOutputStream);
+    }
 
     try (
         FileInputStream original = new FileInputStream(helper.getFile());
@@ -321,16 +365,16 @@ public class TestKeys {
 
   @Test
   public void testPutAndGetKey() throws Exception {
-    runTestPutAndGetKey(new PutHelper(ozoneRestClient, path));
+    runTestPutAndGetKey(new PutHelper(client, path));
     String delimiter = RandomStringUtils.randomAscii(1);
-    runTestPutAndGetKey(new PutHelper(ozoneRestClient, path,
+    runTestPutAndGetKey(new PutHelper(client, path,
         getMultiPartKey(delimiter)));
   }
 
   static void runTestPutAndGetKey(PutHelper helper) throws Exception {
-    final OzoneRestClient client = helper.client;
+    final ClientProtocol client = helper.client;
 
-    String keyName = helper.putKey().getKeyName();
+    String keyName = helper.putKey();
     assertNotNull(helper.getBucket());
     assertNotNull(helper.getFile());
 
@@ -342,10 +386,22 @@ public class TestKeys {
     Path newPath1 = Paths.get(newFileName1);
     Path newPath2 = Paths.get(newFileName2);
 
-    helper.getBucket().getKey(keyName, newPath1);
+    try (
+        FileOutputStream newOutputStream = new FileOutputStream(
+            newPath1.toString());
+        OzoneInputStream ozoneInputStream = helper.getBucket()
+            .readKey(keyName)) {
+      IOUtils.copy(ozoneInputStream, newOutputStream);
+    }
+
     // test get key using a more efficient call
-    client.getKey(helper.getVol().getVolumeName(),
-        helper.getBucket().getBucketName(), keyName, newPath2);
+    try (
+        FileOutputStream newOutputStream = new FileOutputStream(
+            newPath2.toString());
+        OzoneInputStream ozoneInputStream = helper.getBucket()
+            .readKey(keyName)) {
+      IOUtils.copy(ozoneInputStream, newOutputStream);
+    }
 
     try (FileInputStream original = new FileInputStream(helper.getFile());
         FileInputStream downloaded1 = new FileInputStream(newPath1.toFile());
@@ -363,19 +419,17 @@ public class TestKeys {
 
       // test new get key with invalid volume/bucket name
       try {
-        client.getKey("invalid-volume", helper.getBucket().getBucketName(),
-            keyName, newPath1);
+        client.getKey("invalid-volume", helper.getBucket().getName(), keyName);
         fail("Get key should have thrown " + "when using invalid volume name.");
-      } catch (OzoneException e) {
+      } catch (IOException e) {
         GenericTestUtils
             .assertExceptionContains(Status.KEY_NOT_FOUND.toString(), e);
       }
 
       try {
-        client.getKey(helper.getVol().getVolumeName(), "invalid-bucket",
-            keyName, newPath1);
+        client.getKey(helper.getVol().getName(), "invalid-bucket", keyName);
         fail("Get key should have thrown " + "when using invalid bucket name.");
-      } catch (OzoneException e) {
+      } catch (IOException e) {
         GenericTestUtils.assertExceptionContains(
             Status.KEY_NOT_FOUND.toString(), e);
       }
@@ -384,14 +438,14 @@ public class TestKeys {
 
   @Test
   public void testPutAndDeleteKey() throws Exception {
-    runTestPutAndDeleteKey(new PutHelper(ozoneRestClient, path));
+    runTestPutAndDeleteKey(new PutHelper(client, path));
     String delimiter = RandomStringUtils.randomAscii(1);
-    runTestPutAndDeleteKey(new PutHelper(ozoneRestClient, path,
+    runTestPutAndDeleteKey(new PutHelper(client, path,
         getMultiPartKey(delimiter)));
   }
 
   static void runTestPutAndDeleteKey(PutHelper helper) throws Exception {
-    String keyName = helper.putKey().getKeyName();
+    String keyName = helper.putKey();
     assertNotNull(helper.getBucket());
     assertNotNull(helper.getFile());
     helper.getBucket().deleteKey(keyName);
@@ -399,7 +453,7 @@ public class TestKeys {
     try {
       helper.getBucket().getKey(keyName);
       fail("Get Key on a deleted key should have thrown");
-    } catch (OzoneException ex) {
+    } catch (IOException ex) {
       GenericTestUtils.assertExceptionContains(
           Status.KEY_NOT_FOUND.toString(), ex);
     }
@@ -407,14 +461,14 @@ public class TestKeys {
 
   @Test
   public void testPutAndListKey() throws Exception {
-    runTestPutAndListKey(new PutHelper(ozoneRestClient, path));
+    runTestPutAndListKey(new PutHelper(client, path));
     String delimiter = RandomStringUtils.randomAscii(1);
-    runTestPutAndListKey(new PutHelper(ozoneRestClient, path,
+    runTestPutAndListKey(new PutHelper(client, path,
         getMultiPartKey(delimiter)));
   }
 
   static void runTestPutAndListKey(PutHelper helper) throws Exception {
-    final OzoneRestClient client = helper.client;
+    ClientProtocol client = helper.client;
     helper.putKey();
     assertNotNull(helper.getBucket());
     assertNotNull(helper.getFile());
@@ -422,13 +476,20 @@ public class TestKeys {
     // add keys [list-key0, list-key1, ..., list-key9]
     for (int x = 0; x < 10; x++) {
       String newkeyName = "list-key" + x;
-      helper.getBucket().putKey(newkeyName, helper.getFile());
+      try (
+          OzoneOutputStream ozoneOutputStream = helper.getBucket()
+              .createKey(newkeyName, 0, replicationType, replicationFactor);
+          InputStream fileInputStream = new FileInputStream(helper.getFile())) {
+        IOUtils.copy(fileInputStream, ozoneOutputStream);
+      }
     }
 
-    List<OzoneKey> keyList1 = helper.getBucket().listKeys("100", null, null);
+    List<OzoneKey> keyList1 =
+        IteratorUtils.toList(helper.getBucket().listKeys(null, null));
     // test list key using a more efficient call
-    List<OzoneKey> keyList2 = client.listKeys(helper.getVol().getVolumeName(),
-        helper.getBucket().getBucketName(), "100", null, null);
+    List<OzoneKey> keyList2 = client
+        .listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
+            null, 100);
 
     Assert.assertEquals(11, keyList1.size());
     Assert.assertEquals(11, keyList2.size());
@@ -436,57 +497,56 @@ public class TestKeys {
     // second unit since the date string reparsed to millisecond will
     // lose precision.
     for (OzoneKey key : keyList1) {
-      assertTrue((OzoneUtils.formatDate(key.getObjectInfo().getCreatedOn())
-          / 1000) >= (currentTime / 1000));
-      assertTrue((OzoneUtils.formatDate(key.getObjectInfo().getModifiedOn())
-          / 1000) >= (currentTime / 1000));
+      assertTrue((key.getCreationTime() / 1000) >= (currentTime / 1000));
+      assertTrue((key.getModificationTime() / 1000) >= (currentTime / 1000));
     }
 
     for (OzoneKey key : keyList2) {
-      assertTrue((OzoneUtils.formatDate(key.getObjectInfo().getCreatedOn())
-          / 1000) >= (currentTime / 1000));
-      assertTrue((OzoneUtils.formatDate(key.getObjectInfo().getModifiedOn())
-          / 1000) >= (currentTime / 1000));
+      assertTrue((key.getCreationTime() / 1000) >= (currentTime / 1000));
+      assertTrue((key.getModificationTime() / 1000) >= (currentTime / 1000));
     }
 
     // test maxLength parameter of list keys
-    keyList1 = helper.getBucket().listKeys("1", null, null);
-    keyList2 = client.listKeys(helper.getVol().getVolumeName(),
-        helper.getBucket().getBucketName(), "1", null, null);
-    Assert.assertEquals(1, keyList1.size());
+    keyList2 = client
+        .listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
+            null, 1);
     Assert.assertEquals(1, keyList2.size());
 
     // test startKey parameter of list keys
-    keyList1 = helper.getBucket().listKeys("100", "list-key4", "list-key");
-    keyList2 = client.listKeys(helper.getVol().getVolumeName(),
-        helper.getBucket().getBucketName(), "100", "list-key4", "list-key");
+    keyList1 = IteratorUtils
+        .toList(helper.getBucket().listKeys("list-key", "list-key4"));
+    keyList2 = client
+        .listKeys(helper.getVol().getName(), helper.getBucket().getName(),
+            "list-key", "list-key4", 100);
     Assert.assertEquals(5, keyList1.size());
     Assert.assertEquals(5, keyList2.size());
 
     // test prefix parameter of list keys
-    keyList1 = helper.getBucket().listKeys("100", null, "list-key2");
-    keyList2 = client.listKeys(helper.getVol().getVolumeName(),
-        helper.getBucket().getBucketName(), "100", null, "list-key2");
-    Assert.assertTrue(keyList1.size() == 1
-        && keyList1.get(0).getObjectInfo().getKeyName().equals("list-key2"));
-    Assert.assertTrue(keyList2.size() == 1
-        && keyList2.get(0).getObjectInfo().getKeyName().equals("list-key2"));
+    keyList1 =
+        IteratorUtils.toList(helper.getBucket().listKeys("list-key2", null));
+    keyList2 = client
+        .listKeys(helper.getVol().getName(), helper.getBucket().getName(),
+            "list-key2", null, 100);
+    Assert.assertTrue(
+        keyList1.size() == 1 && keyList1.get(0).getName().equals("list-key2"));
+    Assert.assertTrue(
+        keyList2.size() == 1 && keyList2.get(0).getName().equals("list-key2"));
 
     // test new list keys with invalid volume/bucket name
     try {
-      client.listKeys("invalid-volume", helper.getBucket().getBucketName(),
-          "100", null, null);
+      client.listKeys("invalid-volume", helper.getBucket().getName(),
+          null, null, 100);
       fail("List keys should have thrown when using invalid volume name.");
-    } catch (OzoneException e) {
+    } catch (IOException e) {
       GenericTestUtils.assertExceptionContains(
           Status.BUCKET_NOT_FOUND.toString(), e);
     }
 
     try {
-      client.listKeys(helper.getVol().getVolumeName(), "invalid-bucket", "100",
-          null, null);
+      client.listKeys(helper.getVol().getName(), "invalid-bucket", null,
+          null, 100);
       fail("List keys should have thrown when using invalid bucket name.");
-    } catch (OzoneException e) {
+    } catch (IOException e) {
       GenericTestUtils.assertExceptionContains(
           Status.BUCKET_NOT_FOUND.toString(), e);
     }
@@ -494,29 +554,27 @@ public class TestKeys {
 
   @Test
   public void testGetKeyInfo() throws Exception {
-    runTestGetKeyInfo(new PutHelper(ozoneRestClient, path));
+    runTestGetKeyInfo(new PutHelper(client, path));
     String delimiter = RandomStringUtils.randomAscii(1);
-    runTestGetKeyInfo(new PutHelper(ozoneRestClient, path,
+    runTestGetKeyInfo(new PutHelper(client, path,
         getMultiPartKey(delimiter)));
   }
 
   static void runTestGetKeyInfo(PutHelper helper) throws Exception {
-    String keyName = helper.putKey().getKeyName();
+    String keyName = helper.putKey();
     assertNotNull(helper.getBucket());
     assertNotNull(helper.getFile());
 
-    OzoneKey keyInfo = helper.getBucket().getKeyInfo(keyName);
-    assertNotNull(keyInfo.getObjectInfo());
-    assertEquals(keyName, keyInfo.getObjectInfo().getKeyName());
+    OzoneKey keyInfo = helper.getBucket().getKey(keyName);
+    assertNotNull(keyInfo);
+    assertEquals(keyName, keyInfo.getName());
 
     // Compare the time in second unit since the date string reparsed to
     // millisecond will lose precision.
+    Assert
+        .assertTrue((keyInfo.getCreationTime() / 1000) >= (currentTime / 1000));
     Assert.assertTrue(
-        (OzoneUtils.formatDate(keyInfo.getObjectInfo().getCreatedOn())
-            / 1000) >= (currentTime / 1000));
-    Assert.assertTrue(
-        (OzoneUtils.formatDate(keyInfo.getObjectInfo().getModifiedOn())
-            / 1000) >= (currentTime / 1000));
+        (keyInfo.getModificationTime() / 1000) >= (currentTime / 1000));
   }
 
   // Volume, bucket, keys info that helps for test create/delete keys.
@@ -593,12 +651,12 @@ public class TestKeys {
     int numOfExistedKeys = countKsmKeys(ksm);
 
     // Keep tracking bucket keys info while creating them
-    PutHelper helper = new PutHelper(ozoneRestClient, path);
+    PutHelper helper = new PutHelper(client, path);
     BucketKeys bucketKeys = new BucketKeys();
     for (int i = 0; i < 20; i++) {
-      KsmKeyArgs keyArgs = helper.putKey();
-      bucketKeys.addKey(keyArgs.getVolumeName(), keyArgs.getBucketName(),
-          keyArgs.getKeyName());
+      String keyName = helper.putKey();
+      bucketKeys.addKey(helper.getVol().getName(), helper.getBucket().getName(),
+          keyName);
     }
 
     // There should be 20 keys in the buckets we just created.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java
index 802cc3d..645b866 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java
@@ -21,6 +21,7 @@ import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.RatisTestHelper;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Rule;
@@ -52,7 +53,7 @@ public class TestKeysRatis {
   private static RatisTestHelper.RatisTestSuite suite;
   private static MiniOzoneCluster ozoneCluster = null;
   static private String path;
-  private static OzoneRestClient ozoneRestClient = null;
+  private static ClientProtocol client = null;
 
   @BeforeClass
   public static void init() throws Exception {
@@ -60,7 +61,7 @@ public class TestKeysRatis {
     path = suite.getConf().get(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT);
     ozoneCluster = suite.getCluster();
     ozoneCluster.waitForClusterToBeReady();
-    ozoneRestClient = suite.newOzoneRestClient();
+    client = suite.newOzoneClient();
   }
 
   /**
@@ -76,9 +77,9 @@ public class TestKeysRatis {
 
   @Test
   public void testPutKey() throws Exception {
-    runTestPutKey(new PutHelper(ozoneRestClient, path));
+    runTestPutKey(new PutHelper(client, path));
     String delimiter = RandomStringUtils.randomAlphanumeric(1);
-    runTestPutKey(new PutHelper(ozoneRestClient, path,
+    runTestPutKey(new PutHelper(client, path,
         getMultiPartKey(delimiter)));
   }
 
@@ -86,42 +87,42 @@ public class TestKeysRatis {
   @Test
   public void testPutAndGetKeyWithDnRestart() throws Exception {
     runTestPutAndGetKeyWithDnRestart(
-        new PutHelper(ozoneRestClient, path), ozoneCluster);
+        new PutHelper(client, path), ozoneCluster);
     String delimiter = RandomStringUtils.randomAlphanumeric(1);
     runTestPutAndGetKeyWithDnRestart(
-        new PutHelper(ozoneRestClient, path, getMultiPartKey(delimiter)),
+        new PutHelper(client, path, getMultiPartKey(delimiter)),
         ozoneCluster);
   }
 
   @Test
   public void testPutAndGetKey() throws Exception {
-    runTestPutAndGetKey(new PutHelper(ozoneRestClient, path));
+    runTestPutAndGetKey(new PutHelper(client, path));
     String delimiter = RandomStringUtils.randomAlphanumeric(1);
-    runTestPutAndGetKey(new PutHelper(ozoneRestClient, path,
+    runTestPutAndGetKey(new PutHelper(client, path,
         getMultiPartKey(delimiter)));
   }
 
   @Test
   public void testPutAndDeleteKey() throws Exception  {
-    runTestPutAndDeleteKey(new PutHelper(ozoneRestClient, path));
+    runTestPutAndDeleteKey(new PutHelper(client, path));
     String delimiter = RandomStringUtils.randomAlphanumeric(1);
-    runTestPutAndDeleteKey(new PutHelper(ozoneRestClient, path,
+    runTestPutAndDeleteKey(new PutHelper(client, path,
         getMultiPartKey(delimiter)));
   }
 
   @Test
   public void testPutAndListKey() throws Exception {
-    runTestPutAndListKey(new PutHelper(ozoneRestClient, path));
+    runTestPutAndListKey(new PutHelper(client, path));
     String delimiter = RandomStringUtils.randomAlphanumeric(1);
-    runTestPutAndListKey(new PutHelper(ozoneRestClient, path,
+    runTestPutAndListKey(new PutHelper(client, path,
         getMultiPartKey(delimiter)));
   }
 
   @Test
   public void testGetKeyInfo() throws Exception {
-    runTestGetKeyInfo(new PutHelper(ozoneRestClient, path));
+    runTestGetKeyInfo(new PutHelper(client, path));
     String delimiter = RandomStringUtils.randomAlphanumeric(1);
-    runTestGetKeyInfo(new PutHelper(ozoneRestClient, path,
+    runTestGetKeyInfo(new PutHelper(client, path,
         getMultiPartKey(delimiter)));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
index 2d3cea9..f8c7eec 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
@@ -20,50 +20,42 @@ package org.apache.hadoop.ozone.web.client;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.RandomStringUtils;
-import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.hdds.client.OzoneQuota;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.OzoneClientException;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.Status;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
+import org.apache.hadoop.ozone.client.VolumeArgs;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
+import org.apache.hadoop.ozone.client.rpc.RpcClient;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.web.request.OzoneQuota;
+import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
-import org.apache.http.client.methods.HttpUriRequest;
-import org.apache.http.impl.client.CloseableHttpClient;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
-import org.mockito.Mockito;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.Ignore;
 
 import java.io.File;
 import java.io.IOException;
 import java.text.ParseException;
-import java.util.ArrayList;
 import java.util.List;
-import java.util.concurrent.atomic.AtomicInteger;
 
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
-import org.junit.Test;
-import org.junit.Ignore;
-import static org.junit.Assert.fail;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
 
 /**
  * Test Ozone Volumes Lifecycle.
  */
 public class TestVolume {
   private static MiniOzoneCluster cluster = null;
-  private static OzoneRestClient ozoneRestClient = null;
+  private static ClientProtocol client = null;
 
   /**
    * Create a MiniDFSCluster for testing.
@@ -92,8 +84,7 @@ public class TestVolume {
     final int port = cluster.getHddsDatanodes().get(0)
         .getDatanodeDetails().getOzoneRestPort();
 
-    ozoneRestClient = new OzoneRestClient(
-        String.format("http://localhost:%d", port));
+    client = new RpcClient(conf);
   }
 
   /**
@@ -108,128 +99,122 @@ public class TestVolume {
 
   @Test
   public void testCreateVolume() throws Exception {
-    runTestCreateVolume(ozoneRestClient);
+    runTestCreateVolume(client);
   }
 
-  static void runTestCreateVolume(OzoneRestClient client)
+  static void runTestCreateVolume(ClientProtocol client)
       throws OzoneException, IOException, ParseException {
     String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    client.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);
 
     long currentTime = Time.now();
-    OzoneRestClient mockClient = Mockito.spy(client);
-    List<CloseableHttpClient> mockedClients = mockHttpClients(mockClient);
-    OzoneVolume vol = mockClient.createVolume(volumeName, "bilbo", "100TB");
-    // Verify http clients are properly closed.
-    verifyHttpConnectionClosed(mockedClients);
-
-    assertEquals(vol.getVolumeName(), volumeName);
-    assertEquals(vol.getCreatedby(), "hdfs");
-    assertEquals(vol.getOwnerName(), "bilbo");
-    assertEquals(vol.getQuota().getUnit(), OzoneQuota.Units.TB);
-    assertEquals(vol.getQuota().getSize(), 100);
+
+    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
+        .setOwner("bilbo")
+        .setQuota("100TB")
+        .setAdmin("hdfs")
+        .build();
+    client.createVolume(volumeName, volumeArgs);
+    OzoneVolume vol = client.getVolumeDetails(volumeName);
+
+    assertEquals(vol.getName(), volumeName);
+    assertEquals(vol.getAdmin(), "hdfs");
+    assertEquals(vol.getOwner(), "bilbo");
+    assertEquals(vol.getQuota(), OzoneQuota.parseQuota("100TB").sizeInBytes());
 
     // verify the key creation time
-    assertTrue((OzoneUtils.formatDate(vol.getCreatedOn())
+    assertTrue((vol.getCreationTime()
         / 1000) >= (currentTime / 1000));
 
     // Test create a volume with invalid volume name,
     // not use Rule here because the test method is static.
     try {
       String invalidVolumeName = "#" + OzoneUtils.getRequestID().toLowerCase();
-      client.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-      mockClient.createVolume(invalidVolumeName, "bilbo", "100TB");
+      client.createVolume(invalidVolumeName);
+      /*
+      //TODO: RestClient and RpcClient should use HddsClientUtils to verify name
       fail("Except the volume creation be failed because the"
-          + " volume name starts with an invalid char #");
+          + " volume name starts with an invalid char #");*/
     } catch (Exception e) {
-      assertTrue(e instanceof OzoneClientException);
       assertTrue(e.getMessage().contains("Bucket or Volume name"
           + " has an unsupported character : #"));
     }
   }
 
   @Test
-  public void testCreateDuplicateVolume() throws OzoneException {
-    runTestCreateDuplicateVolume(ozoneRestClient);
+  public void testCreateDuplicateVolume() throws OzoneException, IOException {
+    runTestCreateDuplicateVolume(client);
   }
 
-  static void runTestCreateDuplicateVolume(OzoneRestClient client)
-      throws OzoneException {
+  static void runTestCreateDuplicateVolume(ClientProtocol client)
+      throws OzoneException, IOException {
     try {
-      client.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-      client.createVolume("testvol", "bilbo", "100TB");
-      client.createVolume("testvol", "bilbo", "100TB");
+      client.createVolume("testvol");
+      client.createVolume("testvol");
       assertFalse(true);
-    } catch (OzoneException ex) {
-      // Ozone will throw saying volume already exists
-      GenericTestUtils.assertExceptionContains(
-          Status.VOLUME_ALREADY_EXISTS.toString(), ex);
+    } catch (IOException ioe) {
+      Assert.assertTrue(ioe.getMessage()
+          .contains("Volume creation failed, error:VOLUME_ALREADY_EXISTS"));
     }
   }
 
   @Test
-  public void testDeleteVolume() throws OzoneException {
-    runTestDeleteVolume(ozoneRestClient);
+  public void testDeleteVolume() throws OzoneException, IOException {
+    runTestDeleteVolume(client);
   }
 
-  static void runTestDeleteVolume(OzoneRestClient client)
-      throws OzoneException {
+  static void runTestDeleteVolume(ClientProtocol client)
+      throws OzoneException, IOException {
     String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    client.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-    OzoneVolume vol = client.createVolume(volumeName, "bilbo", "100TB");
-    client.deleteVolume(vol.getVolumeName());
+    client.createVolume(volumeName);
+    client.deleteVolume(volumeName);
   }
 
   @Test
   public void testChangeOwnerOnVolume() throws Exception {
-    runTestChangeOwnerOnVolume(ozoneRestClient);
+    runTestChangeOwnerOnVolume(client);
   }
 
-  static void runTestChangeOwnerOnVolume(OzoneRestClient client)
-      throws OzoneException, ParseException {
+  static void runTestChangeOwnerOnVolume(ClientProtocol client)
+      throws OzoneException, ParseException, IOException {
     String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    client.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-    OzoneVolume vol = client.createVolume(volumeName, "bilbo", "100TB");
+    client.createVolume(volumeName);
+    client.getVolumeDetails(volumeName);
     client.setVolumeOwner(volumeName, "frodo");
-    OzoneVolume newVol = client.getVolume(volumeName);
-    assertEquals(newVol.getOwnerName(), "frodo");
+    OzoneVolume newVol = client.getVolumeDetails(volumeName);
+    assertEquals(newVol.getOwner(), "frodo");
     // verify if the creation time is missing after setting owner operation
-    assertTrue(OzoneUtils.formatDate(newVol.getCreatedOn()) > 0);
+    assertTrue(newVol.getCreationTime() > 0);
   }
 
   @Test
   public void testChangeQuotaOnVolume() throws Exception {
-    runTestChangeQuotaOnVolume(ozoneRestClient);
+    runTestChangeQuotaOnVolume(client);
   }
 
-  static void runTestChangeQuotaOnVolume(OzoneRestClient client)
+  static void runTestChangeQuotaOnVolume(ClientProtocol client)
       throws OzoneException, IOException, ParseException {
     String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    client.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-    OzoneVolume vol = client.createVolume(volumeName, "bilbo", "100TB");
-    client.setVolumeQuota(volumeName, "1000MB");
-    OzoneVolume newVol = client.getVolume(volumeName);
-    assertEquals(newVol.getQuota().getSize(), 1000);
-    assertEquals(newVol.getQuota().getUnit(), OzoneQuota.Units.MB);
+    client.createVolume(volumeName);
+    client.setVolumeQuota(volumeName, OzoneQuota.parseQuota("1000MB"));
+    OzoneVolume newVol = client.getVolumeDetails(volumeName);
+    assertEquals(newVol.getQuota(), OzoneQuota.parseQuota("1000MB").sizeInBytes());
     // verify if the creation time is missing after setting quota operation
-    assertTrue(OzoneUtils.formatDate(newVol.getCreatedOn()) > 0);
+    assertTrue(newVol.getCreationTime() > 0);
   }
 
   @Test
   public void testListVolume() throws OzoneException, IOException {
-    runTestListVolume(ozoneRestClient);
+    runTestListVolume(client);
   }
 
-  static void runTestListVolume(OzoneRestClient client)
+  static void runTestListVolume(ClientProtocol client)
       throws OzoneException, IOException {
-    client.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);
     for (int x = 0; x < 10; x++) {
       String volumeName = OzoneUtils.getRequestID().toLowerCase();
-      OzoneVolume vol = client.createVolume(volumeName, "frodo", "100TB");
-      assertNotNull(vol);
+      client.createVolume(volumeName);
     }
 
-    List<OzoneVolume> ovols = client.listVolumes("frodo");
+    List<OzoneVolume> ovols = client.listVolumes(null, null, 100);
     assertTrue(ovols.size() >= 10);
   }
 
@@ -237,27 +222,24 @@ public class TestVolume {
   @Ignore("Takes 3m to run, disable for now.")
   @Test
   public void testListVolumePagination() throws OzoneException, IOException {
-    runTestListVolumePagination(ozoneRestClient);
+    runTestListVolumePagination(client);
   }
 
-  static void runTestListVolumePagination(OzoneRestClient client)
+  static void runTestListVolumePagination(ClientProtocol client)
       throws OzoneException, IOException {
     final int volCount = 2000;
     final int step = 100;
-    client.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);
     for (int x = 0; x < volCount; x++) {
       String volumeName = OzoneUtils.getRequestID().toLowerCase();
-      OzoneVolume vol = client.createVolume(volumeName, "frodo", "100TB");
-      assertNotNull(vol);
+      client.createVolume(volumeName);
     }
-    OzoneVolume prevKey = null;
+    String prevKey = null;
     int count = 0;
     int pagecount = 0;
     while (count < volCount) {
-      List<OzoneVolume> ovols = client.listVolumes("frodo", null, step,
-          prevKey);
+      List<OzoneVolume> ovols = client.listVolumes(null, prevKey, step);
       count += ovols.size();
-      prevKey = ovols.get(ovols.size() - 1);
+      prevKey = ovols.get(ovols.size() - 1).getName();
       pagecount++;
     }
     assertEquals(volCount / step, pagecount);
@@ -267,30 +249,35 @@ public class TestVolume {
   @Ignore
   @Test
   public void testListAllVolumes() throws OzoneException, IOException {
-    runTestListAllVolumes(ozoneRestClient);
+    runTestListAllVolumes(client);
   }
 
-  static void runTestListAllVolumes(OzoneRestClient client)
+  static void runTestListAllVolumes(ClientProtocol client)
       throws OzoneException, IOException {
     final int volCount = 200;
     final int step = 10;
-    client.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);
     for (int x = 0; x < volCount; x++) {
       String userName =
           "frodo" + RandomStringUtils.randomAlphabetic(5).toLowerCase();
       String volumeName =
           "vol" + RandomStringUtils.randomAlphabetic(5).toLowerCase();
-      OzoneVolume vol = client.createVolume(volumeName, userName, "100TB");
+      VolumeArgs volumeArgs = VolumeArgs.newBuilder()
+          .setOwner(userName)
+          .setQuota("100TB")
+          .setAdmin("hdfs")
+          .build();
+      client.createVolume(volumeName, volumeArgs);
+      OzoneVolume vol = client.getVolumeDetails(volumeName);
       assertNotNull(vol);
     }
-    OzoneVolume prevKey = null;
+    String prevKey = null;
     int count = 0;
     int pagecount = 0;
     while (count < volCount) {
-      List<OzoneVolume> ovols = client.listAllVolumes(null, step, prevKey);
+      List<OzoneVolume> ovols = client.listVolumes(null, prevKey, step);
       count += ovols.size();
       if (ovols.size() > 0) {
-        prevKey = ovols.get(ovols.size() - 1);
+        prevKey = ovols.get(ovols.size() - 1).getName();
       }
       pagecount++;
     }
@@ -301,17 +288,16 @@ public class TestVolume {
 
   @Test
   public void testListVolumes() throws Exception {
-    runTestListVolumes(ozoneRestClient);
+    runTestListVolumes(client);
   }
 
-  static void runTestListVolumes(OzoneRestClient client)
+  static void runTestListVolumes(ClientProtocol client)
       throws OzoneException, IOException, ParseException {
     final int volCount = 20;
     final String user1 = "test-user-a";
     final String user2 = "test-user-b";
 
     long currentTime = Time.now();
-    client.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);
     // Create 20 volumes, 10 for user1 and another 10 for user2.
     for (int x = 0; x < volCount; x++) {
       String volumeName;
@@ -326,109 +312,40 @@ public class TestVolume {
         userName = user2;
         volumeName = "test-vol" + x;
       }
-      OzoneVolume vol = client.createVolume(volumeName, userName, "100TB");
+      VolumeArgs volumeArgs = VolumeArgs.newBuilder()
+          .setOwner(userName)
+          .setQuota("100TB")
+          .setAdmin("hdfs")
+          .build();
+      client.createVolume(volumeName, volumeArgs);
+      OzoneVolume vol = client.getVolumeDetails(volumeName);
       assertNotNull(vol);
     }
 
     // list all the volumes belong to user1
-    List<OzoneVolume> volumeList = client.listVolumes(user1,
-        null, 100, StringUtils.EMPTY);
+    List<OzoneVolume> volumeList = client.listVolumes(user1, null, null, 100);
     assertEquals(10, volumeList.size());
     // verify the owner name and creation time of volume
     for (OzoneVolume vol : volumeList) {
-      assertTrue(vol.getOwnerName().equals(user1));
-      assertTrue((OzoneUtils.formatDate(vol.getCreatedOn())
+      assertTrue(vol.getOwner().equals(user1));
+      assertTrue((vol.getCreationTime()
           / 1000) >= (currentTime / 1000));
     }
 
     // test max key parameter of listing volumes
-    volumeList = client.listVolumes(user1, null, 2, StringUtils.EMPTY);
+    volumeList = client.listVolumes(user1, null, null, 2);
     assertEquals(2, volumeList.size());
 
     // test prefix parameter of listing volumes
-    volumeList = client.listVolumes(user1, "test-vol10", 100,
-        StringUtils.EMPTY);
+    volumeList = client.listVolumes(user1, "test-vol10", null, 10);
     assertTrue(volumeList.size() == 1
-        && volumeList.get(0).getVolumeName().equals("test-vol10"));
+        && volumeList.get(0).getName().equals("test-vol10"));
 
-    volumeList = client.listVolumes(user1, "test-vol1",
-        100, StringUtils.EMPTY);
+    volumeList = client.listVolumes(user1, "test-vol1", null, 10);
     assertEquals(5, volumeList.size());
 
     // test start key parameter of listing volumes
-    volumeList = client.listVolumes(user2, null, 100, "test-vol15");
+    volumeList = client.listVolumes(user2, null, "test-vol15", 10);
     assertEquals(2, volumeList.size());
   }
-
-  /**
-   * Returns a list of mocked {@link CloseableHttpClient} used for testing.
-   * The mocked client replaces the actual calls in
-   * {@link OzoneRestClient#newHttpClient()}, it is used to verify
-   * if the invocation of this client is expected. <b>Note</b>, the output
-   * of this method is always used as the input of
-   * {@link TestVolume#verifyHttpConnectionClosed(List)}.
-   *
-   * @param mockedClient mocked ozone client.
-   * @return a list of mocked {@link CloseableHttpClient}.
-   * @throws IOException
-   */
-  private static List<CloseableHttpClient> mockHttpClients(
-      OzoneRestClient mockedClient)
-      throws IOException {
-    List<CloseableHttpClient> spyHttpClients = new ArrayList<>();
-    for (int i = 0; i < 5; i++) {
-      CloseableHttpClient spyHttpClient = Mockito
-          .spy(HddsClientUtils.newHttpClient());
-      spyHttpClients.add(spyHttpClient);
-    }
-
-    List<CloseableHttpClient> nextReturns =
-        new ArrayList<>(spyHttpClients.subList(1, spyHttpClients.size()));
-    Mockito.when(mockedClient.newHttpClient()).thenReturn(
-        spyHttpClients.get(0),
-        nextReturns.toArray(new CloseableHttpClient[nextReturns.size()]));
-    return spyHttpClients;
-  }
-
-  /**
-   * This method is used together with
-   * {@link TestVolume#mockHttpClients(OzoneRestClient)} to verify
-   * if the http client is properly closed. It verifies that as long as
-   * a client calls {@link CloseableHttpClient#execute(HttpUriRequest)} to
-   * send request, then it must calls {@link CloseableHttpClient#close()}
-   * close the http connection.
-   *
-   * @param mockedHttpClients
-   */
-  private static void verifyHttpConnectionClosed(
-      List<CloseableHttpClient> mockedHttpClients) {
-    final AtomicInteger totalCalled = new AtomicInteger();
-    assertTrue(mockedHttpClients.stream().allMatch(closeableHttpClient -> {
-      boolean clientUsed = false;
-      try {
-        verify(closeableHttpClient, times(1)).execute(Mockito.any());
-        totalCalled.incrementAndGet();
-        clientUsed = true;
-      } catch (Throwable e) {
-        // There might be some redundant instances in mockedHttpClients,
-        // it is allowed that a client is not used.
-        return true;
-      }
-
-      if (clientUsed) {
-        try {
-          // If a client is used, ensure the close function is called.
-          verify(closeableHttpClient, times(1)).close();
-          return true;
-        } catch (IOException e) {
-          return false;
-        }
-      } else {
-        return true;
-      }
-    }));
-    System.out.println("Successful connections " + totalCalled.get());
-    assertTrue("The mocked http client should be called at least once.",
-        totalCalled.get() > 0);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java
index 8314851..bc4ba25 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java
@@ -23,7 +23,9 @@ import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
+import org.apache.hadoop.ozone.client.rpc.RpcClient;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
@@ -38,7 +40,7 @@ import java.io.IOException;
 public class TestVolumeRatis {
   @Rule
   public Timeout testTimeout = new Timeout(300000);
-  private static OzoneRestClient ozoneClient;
+  private static ClientProtocol client;
   private static MiniOzoneCluster cluster;
 
   @BeforeClass
@@ -63,8 +65,7 @@ public class TestVolumeRatis {
     final int port = cluster.getHddsDatanodes().get(0)
         .getDatanodeDetails().getOzoneRestPort();
 
-    ozoneClient = new OzoneRestClient(
-        String.format("http://localhost:%d", port));
+    client = new RpcClient(conf);
   }
 
   @AfterClass
@@ -77,53 +78,53 @@ public class TestVolumeRatis {
 
   @Test
   public void testCreateVolume() throws Exception {
-    TestVolume.runTestCreateVolume(ozoneClient);
+    TestVolume.runTestCreateVolume(client);
   }
 
   @Test
-  public void testCreateDuplicateVolume() throws OzoneException {
-    TestVolume.runTestCreateDuplicateVolume(ozoneClient);
+  public void testCreateDuplicateVolume() throws OzoneException, IOException {
+    TestVolume.runTestCreateDuplicateVolume(client);
   }
 
   @Test
-  public void testDeleteVolume() throws OzoneException {
-    TestVolume.runTestDeleteVolume(ozoneClient);
+  public void testDeleteVolume() throws OzoneException, IOException {
+    TestVolume.runTestDeleteVolume(client);
   }
 
   @Test
   public void testChangeOwnerOnVolume() throws Exception {
-    TestVolume.runTestChangeOwnerOnVolume(ozoneClient);
+    TestVolume.runTestChangeOwnerOnVolume(client);
   }
 
   @Test
   public void testChangeQuotaOnVolume() throws Exception {
-    TestVolume.runTestChangeQuotaOnVolume(ozoneClient);
+    TestVolume.runTestChangeQuotaOnVolume(client);
   }
 
   // TODO: remove @Ignore below once the problem has been resolved.
   @Ignore("listVolumes not implemented in DistributedStorageHandler")
   @Test
   public void testListVolume() throws OzoneException, IOException {
-    TestVolume.runTestListVolume(ozoneClient);
+    TestVolume.runTestListVolume(client);
   }
 
   // TODO: remove @Ignore below once the problem has been resolved.
   @Ignore("See TestVolume.testListVolumePagination()")
   @Test
   public void testListVolumePagination() throws OzoneException, IOException {
-    TestVolume.runTestListVolumePagination(ozoneClient);
+    TestVolume.runTestListVolumePagination(client);
   }
 
   // TODO: remove @Ignore below once the problem has been resolved.
   @Ignore("See TestVolume.testListAllVolumes()")
   @Test
   public void testListAllVolumes() throws Exception {
-    TestVolume.runTestListAllVolumes(ozoneClient);
+    TestVolume.runTestListAllVolumes(client);
   }
 
   @Ignore("Disabling Ratis tests for pipeline work.")
   @Test
   public void testListVolumes() throws Exception {
-    TestVolume.runTestListVolumes(ozoneClient);
+    TestVolume.runTestListVolumes(client);
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: HADOOP-15498. TestHadoopArchiveLogs (#testGenerateScript, #testPrepareWorkingDir) fails on Windows. Contributed by Anbang Hu.

Posted by bo...@apache.org.
HADOOP-15498. TestHadoopArchiveLogs (#testGenerateScript, #testPrepareWorkingDir) fails on Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fdc993a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fdc993a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fdc993a

Branch: refs/heads/YARN-7402
Commit: 8fdc993a993728c65084d7dc3ac469059cb1f603
Parents: 9dbf4f0
Author: Inigo Goiri <in...@apache.org>
Authored: Mon May 28 16:45:42 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Mon May 28 16:45:42 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/tools/TestHadoopArchiveLogs.java  | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fdc993a/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java b/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
index 2ddd4c5..a1b662c 100644
--- a/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
+++ b/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
@@ -278,7 +279,7 @@ public class TestHadoopArchiveLogs {
     hal.generateScript(localScript);
     Assert.assertTrue(localScript.exists());
     String script = IOUtils.toString(localScript.toURI());
-    String[] lines = script.split(System.lineSeparator());
+    String[] lines = script.split("\n");
     Assert.assertEquals(22, lines.length);
     Assert.assertEquals("#!/bin/bash", lines[0]);
     Assert.assertEquals("set -e", lines[1]);
@@ -368,7 +369,8 @@ public class TestHadoopArchiveLogs {
     Assert.assertTrue(dirPrepared);
     Assert.assertTrue(fs.exists(workingDir));
     Assert.assertEquals(
-        new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true),
+        new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL,
+            !Shell.WINDOWS),
         fs.getFileStatus(workingDir).getPermission());
     // Throw a file in the dir
     Path dummyFile = new Path(workingDir, "dummy.txt");
@@ -381,7 +383,8 @@ public class TestHadoopArchiveLogs {
     Assert.assertTrue(fs.exists(workingDir));
     Assert.assertTrue(fs.exists(dummyFile));
     Assert.assertEquals(
-        new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true),
+        new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL,
+            !Shell.WINDOWS),
         fs.getFileStatus(workingDir).getPermission());
     // -force is true and the dir exists, so it will recreate it and the dummy
     // won't exist anymore
@@ -390,7 +393,8 @@ public class TestHadoopArchiveLogs {
     Assert.assertTrue(dirPrepared);
     Assert.assertTrue(fs.exists(workingDir));
     Assert.assertEquals(
-        new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true),
+        new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL,
+            !Shell.WINDOWS),
         fs.getFileStatus(workingDir).getPermission());
     Assert.assertFalse(fs.exists(dummyFile));
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: HADOOP-15494. TestRawLocalFileSystemContract fails on Windows. Contributed by Anbang Hu.

Posted by bo...@apache.org.
HADOOP-15494. TestRawLocalFileSystemContract fails on Windows.
Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bddfe796
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bddfe796
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bddfe796

Branch: refs/heads/YARN-7402
Commit: bddfe796f2f992fc1dcc8a1dd44d64ff2b3c9cf4
Parents: 86bc642
Author: Steve Loughran <st...@apache.org>
Authored: Fri May 25 11:12:47 2018 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Fri May 25 11:12:47 2018 +0100

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe796/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
index ebf9ea7..908e330 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
@@ -42,7 +42,7 @@ public class TestRawLocalFileSystemContract extends FileSystemContractBaseTest {
   private static final Logger LOG =
       LoggerFactory.getLogger(TestRawLocalFileSystemContract.class);
   private final static Path TEST_BASE_DIR =
-      new Path(GenericTestUtils.getTempPath(""));
+      new Path(GenericTestUtils.getRandomizedTestDir().getAbsolutePath());
 
   @Before
   public void setUp() throws Exception {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: MAPREDUCE-7097. MapReduce JHS should honor yarn.webapp.filter-entity-list-by-user. Contributed by Sunil Govindan.

Posted by bo...@apache.org.
MAPREDUCE-7097. MapReduce JHS should honor yarn.webapp.filter-entity-list-by-user. Contributed by  Sunil Govindan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88cbe57c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88cbe57c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88cbe57c

Branch: refs/heads/YARN-7402
Commit: 88cbe57c069a1d2dd3bfb32e3ad742566470a10b
Parents: d14e26b
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Mon May 28 12:45:07 2018 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Mon May 28 14:05:49 2018 +0530

----------------------------------------------------------------------
 .../mapreduce/v2/hs/webapp/HsJobBlock.java      | 18 ++++++++++++++-
 .../mapreduce/v2/hs/webapp/TestHsJobBlock.java  | 20 ++++++++++++++--
 .../apache/hadoop/yarn/webapp/Controller.java   |  4 ++++
 .../org/apache/hadoop/yarn/webapp/View.java     | 24 +++++++++++++-------
 4 files changed, 55 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cbe57c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
index 18040f0..9b845cd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
@@ -27,6 +27,8 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
 import java.util.Date;
 import java.util.List;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.TaskID;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
@@ -39,8 +41,10 @@ import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
@@ -56,9 +60,14 @@ import com.google.inject.Inject;
  */
 public class HsJobBlock extends HtmlBlock {
   final AppContext appContext;
+  private UserGroupInformation ugi;
+  private boolean isFilterAppListByUserEnabled;
 
-  @Inject HsJobBlock(AppContext appctx) {
+  @Inject HsJobBlock(Configuration conf, AppContext appctx, ViewContext ctx) {
+    super(ctx);
     appContext = appctx;
+    isFilterAppListByUserEnabled = conf
+        .getBoolean(YarnConfiguration.FILTER_ENTITY_LIST_BY_USER, false);
   }
 
   /*
@@ -78,6 +87,13 @@ public class HsJobBlock extends HtmlBlock {
       html.p().__("Sorry, ", jid, " not found.").__();
       return;
     }
+    ugi = getCallerUGI();
+    if (isFilterAppListByUserEnabled && ugi != null
+        && !j.checkAccess(ugi, JobACL.VIEW_JOB)) {
+      html.p().__("Sorry, ", jid, " could not be viewed for '",
+          ugi.getUserName(), "'.").__();
+      return;
+    }
     if(j instanceof UnparsedJob) {
       final int taskCount = j.getTotalMaps() + j.getTotalReduces();
       UnparsedJob oversizedJob = (UnparsedJob) j;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cbe57c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java
index 7fa238e..48e3d3b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java
@@ -33,8 +33,10 @@ import org.apache.hadoop.mapreduce.v2.hs.UnparsedJob;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.util.StringHelper;
+import org.apache.hadoop.yarn.webapp.Controller;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
 import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.View.ViewContext;
 import org.apache.hadoop.yarn.webapp.view.BlockForTest;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlockForTest;
@@ -49,6 +51,8 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Map;
 
+import javax.servlet.http.HttpServletRequest;
+
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -69,7 +73,13 @@ public class TestHsJobBlock {
         new JobHistoryStubWithAllOversizeJobs(maxAllowedTaskNum);
     jobHistory.init(config);
 
-    HsJobBlock jobBlock = new HsJobBlock(jobHistory) {
+    Controller.RequestContext rc = mock(Controller.RequestContext.class);
+    ViewContext view = mock(ViewContext.class);
+    HttpServletRequest req =mock(HttpServletRequest.class);
+    when(rc.getRequest()).thenReturn(req);
+    when(view.requestContext()).thenReturn(rc);
+
+    HsJobBlock jobBlock = new HsJobBlock(config, jobHistory, view) {
       // override this so that job block can fetch a job id.
       @Override
       public Map<String, String> moreParams() {
@@ -101,7 +111,13 @@ public class TestHsJobBlock {
     JobHistory jobHistory = new JobHitoryStubWithAllNormalSizeJobs();
     jobHistory.init(config);
 
-    HsJobBlock jobBlock = new HsJobBlock(jobHistory) {
+    Controller.RequestContext rc = mock(Controller.RequestContext.class);
+    ViewContext view = mock(ViewContext.class);
+    HttpServletRequest req =mock(HttpServletRequest.class);
+    when(rc.getRequest()).thenReturn(req);
+    when(view.requestContext()).thenReturn(rc);
+
+    HsJobBlock jobBlock = new HsJobBlock(config, jobHistory, view) {
       // override this so that the job block can fetch a job id.
       @Override
       public Map<String, String> moreParams() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cbe57c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Controller.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Controller.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Controller.java
index dc4eee2..1b25b84 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Controller.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Controller.java
@@ -108,6 +108,10 @@ public abstract class Controller implements Params {
     }
 
     public String prefix() { return prefix; }
+
+    public HttpServletRequest getRequest() {
+      return request;
+    }
   }
 
   private RequestContext context;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cbe57c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/View.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/View.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/View.java
index c16787d..666a0bd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/View.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/View.java
@@ -96,28 +96,36 @@ public abstract class View implements Params {
     return vc;
   }
 
-  public Throwable error() { return context().rc.error; }
+  public Throwable error() {
+    return context().requestContext().error;
+  }
 
-  public int status() { return context().rc.status; }
+  public int status() {
+    return context().requestContext().status;
+  }
 
-  public boolean inDevMode() { return context().rc.devMode; }
+  public boolean inDevMode() {
+    return context().requestContext().devMode;
+  }
 
-  public Injector injector() { return context().rc.injector; }
+  public Injector injector() {
+    return context().requestContext().injector;
+  }
 
   public <T> T getInstance(Class<T> cls) {
     return injector().getInstance(cls);
   }
 
   public HttpServletRequest request() {
-    return context().rc.request;
+    return context().requestContext().getRequest();
   }
 
   public HttpServletResponse response() {
-    return context().rc.response;
+    return context().requestContext().response;
   }
 
   public Map<String, String> moreParams() {
-    return context().rc.moreParams();
+    return context().requestContext().moreParams();
   }
 
   /**
@@ -125,7 +133,7 @@ public abstract class View implements Params {
    * @return the cookies map
    */
   public Map<String, Cookie> cookies() {
-    return context().rc.cookies();
+    return context().requestContext().cookies();
   }
 
   public ServletOutputStream outputStream() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: HADOOP-15497. TestTrash should use proper test path to avoid failing on Windows. Contributed by Anbang Hu.

Posted by bo...@apache.org.
HADOOP-15497. TestTrash should use proper test path to avoid failing on Windows. Contributed by Anbang Hu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c75f8e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c75f8e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c75f8e4

Branch: refs/heads/YARN-7402
Commit: 3c75f8e4933221fa60a87e86a3db5e4727530b6f
Parents: 31ab960
Author: Inigo Goiri <in...@apache.org>
Authored: Tue May 29 09:11:08 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue May 29 09:11:08 2018 -0700

----------------------------------------------------------------------
 .../src/test/java/org/apache/hadoop/fs/TestTrash.java     | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c75f8e4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 12aed29..fa2d21f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -49,9 +49,11 @@ import org.apache.hadoop.util.Time;
  */
 public class TestTrash {
 
-  private final static Path TEST_DIR = new Path(GenericTestUtils.getTempPath(
+  private final static File BASE_PATH = new File(GenericTestUtils.getTempPath(
       "testTrash"));
 
+  private final static Path TEST_DIR = new Path(BASE_PATH.getAbsolutePath());
+
   @Before
   public void setUp() throws IOException {
     // ensure each test initiates a FileSystem instance,
@@ -682,7 +684,7 @@ public class TestTrash {
   static class TestLFS extends LocalFileSystem {
     Path home;
     TestLFS() {
-      this(new Path(TEST_DIR, "user/test"));
+      this(TEST_DIR);
     }
     TestLFS(final Path home) {
       super(new RawLocalFileSystem() {
@@ -809,8 +811,8 @@ public class TestTrash {
    */
   public static void verifyTrashPermission(FileSystem fs, Configuration conf)
       throws IOException {
-    Path caseRoot = new Path(
-        GenericTestUtils.getTempPath("testTrashPermission"));
+    Path caseRoot = new Path(BASE_PATH.getPath(),
+        "testTrashPermission");
     try (FileSystem fileSystem = fs){
       Trash trash = new Trash(fileSystem, conf);
       FileSystemTestWrapper wrapper =


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org