You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by bo...@apache.org on 2018/08/02 17:17:24 UTC

[01/50] [abbrv] hadoop git commit: YARN-8566. Add diagnostic message for unschedulable containers (snemeth via rkanter) [Forced Update!]

Repository: hadoop
Updated Branches:
  refs/heads/YARN-7402 3e1c46077 -> b8e718082 (forced update)


YARN-8566. Add diagnostic message for unschedulable containers (snemeth via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fecbac49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fecbac49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fecbac49

Branch: refs/heads/YARN-7402
Commit: fecbac499e2ae6b3334773a997d454a518f43e01
Parents: b429f19
Author: Robert Kanter <rk...@apache.org>
Authored: Fri Jul 27 14:32:34 2018 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Fri Jul 27 14:32:34 2018 -0700

----------------------------------------------------------------------
 .../src/site/markdown/ResourceManagerRest.md    | 285 +++++++++++++++++++
 1 file changed, 285 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fecbac49/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
index a30677c..24c2319 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
@@ -2326,6 +2326,291 @@ Response Body:
 </appAttempts>
 ```
 
+Containers for an Application Attempt API
+-----------------------------------------
+
+With Containers for an Application Attempt API you can obtain the list of containers, which belongs to an Application Attempt.
+
+### URI
+
+      * http://rm-http-address:port/ws/v1/cluster/apps/{appid}/appattempts/{appAttemptId}/containers
+
+### HTTP Operations Supported
+
+      * GET
+
+### Query Parameters Supported
+
+      None
+
+### Elements of the *containers* object
+
+When you make a request for the list of containers, the information will be returned as an array of container objects.
+
+containers:
+
+| Item | Data Type | Description |
+|:---- |:---- |:---- |
+| containers | array of app container objects(JSON)/zero or more container objects(XML) | The collection of app container objects |
+
+### Elements of the *container* object
+
+| Item | Data Type | Description |
+|:---- |:---- |:---- |
+| containerId | string | The container id |
+| allocatedMB | long | The amount of memory allocated for the container in MB |
+| allocatedVCores | int | The amount of virtual cores allocated for the container |
+| assignedNodeId | string | The node id of the node the attempt ran on |
+| priority | int | Allocated priority of the container |
+| startedTime | long | The start time of the attempt (in ms since epoch) |
+| finishedTime | long | The finish time of the attempt (in ms since epoch) 0 if not finished |
+| elapsedTime | long | The elapsed time in ms since the startedTime |
+| logUrl | string | The web URL that can be used to check the log for the container |
+| containerExitStatus | int | Final exit status of the container |
+| containerState | string | State of the container, can be NEW, RUNNING, or COMPLETE |
+| nodeHttpAddress | string | The node http address of the node the attempt ran on ||
+| nodeId | string | The node id of the node the attempt ran on |
+| allocatedResources |array of resource(JSON)/zero or more resource objects(XML) | Allocated resources for the container |
+
+### Elements of the *resource* object
+| Item | Data Type | Description |
+|:---- |:---- |:---- |
+| memory | int | The maximum memory for the container |
+| vCores | int | The maximum number of vcores for the container |
+
+**JSON response**
+
+HTTP Request:
+
+      GET http://rm-http-address:port/ws/v1/cluster/apps/{appid}/appattempts/{appAttemptId}/containers
+
+Response Header:
+
+      HTTP/1.1 200 OK
+      Content-Type: application/json
+      Transfer-Encoding: chunked
+      Server: Jetty(6.1.26)
+
+Response Body:
+
+```json
+{
+  "containers" : {
+    "container": [
+      {
+      "containerId": "container_1531404209605_0008_01_000001",
+      "allocatedMB": "1536",
+      "allocatedVCores": "1",
+      "assignedNodeId": "host.domain.com:37814",
+      "priority": "0",
+      "startedTime": "1531405909444",
+      "finishedTime": "0",
+      "elapsedTime": "4112",
+      "logUrl": "http://host.domain.com:8042/node/containerlogs/container_1531404209605_0008_01_000001/systest",
+      "containerExitStatus": "0",
+      "containerState": "RUNNING",
+      "nodeHttpAddress": "http://host.domain.com:8042",
+      "nodeId": "host.domain.com:37814",
+      "allocatedResources": [
+         {
+            "key": "memory-mb",
+            "value": "1536"
+         },
+         {
+            "key": "vcores",
+            "value": "1"
+         }
+       ]
+      }
+    ]
+  }
+}
+```
+
+**XML response**
+
+HTTP Request:
+
+      GET http://rm-http-address:port/ws/v1/cluster/apps/{appid}/appattempts/{appAttemptId}/containers
+      Accept: application/xml
+
+Response Header:
+
+      HTTP/1.1 200 OK
+      Content-Type: application/xml
+      Content-Length: 1104
+      Server: Jetty(6.1.26)
+
+Response Body:
+
+```xml
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<containers>
+  <container>
+    <containerId>container_1531404209605_0008_01_000001</containerId>
+    <allocatedMB>1536</allocatedMB>
+    <allocatedVCores>1</allocatedVCores>
+    <assignedNodeId>host.domain.com:37814</assignedNodeId>
+    <priority>0</priority>
+    <startedTime>1531405909444</startedTime>
+    <finishedTime>0</finishedTime>
+    <elapsedTime>4112</elapsedTime>
+    <logUrl>
+    http://host.domain.com:8042/node/containerlogs/container_1531404209605_0008_01_000001/systest
+    </logUrl>
+    <containerExitStatus>0</containerExitStatus>
+    <containerState>RUNNING</containerState>
+    <nodeHttpAddress>http://host.domain.com:8042</nodeHttpAddress>
+    <nodeId>host.domain.com:37814</nodeId>
+    <allocatedResources>
+      <entry>
+        <key>memory-mb</key>
+        <value>1536</value>
+      </entry>
+      <entry>
+        <key>vcores</key>
+        <value>1</value>
+      </entry>
+    </allocatedResources>
+  </container>
+</containers>
+```
+
+Specific Container for an Application Attempt API
+-------------------------------------------------
+
+With Specific Container for an Application Attempt API you can obtain information about a specific container, which belongs to an Application Attempt and selected by the container id.
+
+### URI
+
+      * http://rm-http-address:port/ws/v1/cluster/apps/{appid}/appattempts/{appAttemptId}/containers/{containerId}
+
+### HTTP Operations Supported
+
+      * GET
+
+### Query Parameters Supported
+
+      None
+
+### Elements of the *container* object
+
+| Item | Data Type | Description |
+|:---- |:---- |:---- |
+| containerId | string | The container id |
+| allocatedMB | long | The amount of memory allocated for the container in MB |
+| allocatedVCores | int | The amount of virtual cores allocated for the container |
+| assignedNodeId | string | The node id of the node the attempt ran on |
+| priority | int | Allocated priority of the container |
+| startedTime | long | The start time of the attempt (in ms since epoch) |
+| finishedTime | long | The finish time of the attempt (in ms since epoch) 0 if not finished |
+| elapsedTime | long | The elapsed time in ms since the startedTime |
+| logUrl | string | The web URL that can be used to check the log for the container |
+| containerExitStatus | int | Final exit status of the container |
+| containerState | string | State of the container, can be NEW, RUNNING, or COMPLETE |
+| nodeHttpAddress | string | The node http address of the node the attempt ran on ||
+| nodeId | string | The node id of the node the attempt ran on |
+| allocatedResources |array of resource(JSON)/zero or more resource objects(XML) | Allocated resources for the container |
+
+### Elements of the *resource* object
+| Item | Data Type | Description |
+|:---- |:---- |:---- |
+| memory | int | The maximum memory for the container |
+| vCores | int | The maximum number of vcores for the container |
+
+**JSON response**
+
+HTTP Request:
+
+      GET http://rm-http-address:port/ws/v1/cluster/apps/{appid}/appattempts/{appAttemptId}/containers/{containerId}
+
+Response Header:
+
+      HTTP/1.1 200 OK
+      Content-Type: application/json
+      Transfer-Encoding: chunked
+      Server: Jetty(6.1.26)
+
+Response Body:
+
+```json
+{
+  "container": {
+    "containerId": "container_1531404209605_0008_01_000001",
+    "allocatedMB": "1536",
+    "allocatedVCores": "1",
+    "assignedNodeId": "host.domain.com:37814",
+    "priority": "0",
+    "startedTime": "1531405909444",
+    "finishedTime": "0",
+    "elapsedTime": "4112",
+    "logUrl": "http://host.domain.com:8042/node/containerlogs/container_1531404209605_0008_01_000001/systest",
+    "containerExitStatus": "0",
+    "containerState": "RUNNING",
+    "nodeHttpAddress": "http://host.domain.com:8042",
+    "nodeId": "host.domain.com:37814",
+    "allocatedResources": [
+       {
+          "key": "memory-mb",
+          "value": "1536"
+       },
+       {
+          "key": "vcores",
+          "value": "1"
+       }
+    ]
+  }
+}
+```
+
+**XML response**
+
+HTTP Request:
+
+      GET http://rm-http-address:port/ws/v1/cluster/apps/{appid}/appattempts/{appAttemptId}/containers/{containerId}
+      Accept: application/xml
+
+Response Header:
+
+      HTTP/1.1 200 OK
+      Content-Type: application/xml
+      Content-Length: 1104
+      Server: Jetty(6.1.26)
+
+Response Body:
+
+```xml
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+
+<container>
+  <containerId>container_1531404209605_0008_01_000001</containerId>
+  <allocatedMB>1536</allocatedMB>
+  <allocatedVCores>1</allocatedVCores>
+  <assignedNodeId>host.domain.com:37814</assignedNodeId>
+  <priority>0</priority>
+  <startedTime>1531405909444</startedTime>
+  <finishedTime>0</finishedTime>
+  <elapsedTime>4112</elapsedTime>
+  <logUrl>
+  http://host.domain.com:8042/node/containerlogs/container_1531404209605_0008_01_000001/systest
+  </logUrl>
+  <containerExitStatus>0</containerExitStatus>
+  <containerState>RUNNING</containerState>
+  <nodeHttpAddress>http://host.domain.com:8042</nodeHttpAddress>
+  <nodeId>host.domain.com:37814</nodeId>
+  <allocatedResources>
+    <entry>
+      <key>memory-mb</key>
+      <value>1536</value>
+    </entry>
+    <entry>
+      <key>vcores</key>
+      <value>1</value>
+    </entry>
+  </allocatedResources>
+</container>
+```
+
 Cluster Nodes API
 -----------------
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: YARN-8403. Change the log level for fail to download resource from INFO to ERROR. Contributed by Eric Yang

Posted by bo...@apache.org.
YARN-8403. Change the log level for fail to download resource from INFO to ERROR. Contributed by Eric Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/67c65da2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/67c65da2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/67c65da2

Branch: refs/heads/YARN-7402
Commit: 67c65da261464a0dccb63dc27668109a52e05714
Parents: d920b9d
Author: Billie Rinaldi <bi...@apache.org>
Authored: Wed Aug 1 08:51:18 2018 -0700
Committer: Billie Rinaldi <bi...@apache.org>
Committed: Wed Aug 1 08:51:40 2018 -0700

----------------------------------------------------------------------
 .../localizer/ResourceLocalizationService.java      | 16 +++++++++++-----
 .../localizer/TestResourceLocalizationService.java  |  3 +++
 2 files changed, 14 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/67c65da2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 4ca6720..3834ece 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -969,11 +969,17 @@ public class ResourceLocalizationService extends CompositeService
                 .getDU(new File(local.toUri()))));
               assoc.getResource().unlock();
             } catch (ExecutionException e) {
-              LOG.info("Failed to download resource " + assoc.getResource(),
-                  e.getCause());
-              LocalResourceRequest req = assoc.getResource().getRequest();
-              publicRsrc.handle(new ResourceFailedLocalizationEvent(req,
-                  e.getMessage()));
+              String user = assoc.getContext().getUser();
+              ApplicationId applicationId = assoc.getContext().getContainerId().getApplicationAttemptId().getApplicationId();
+              LocalResourcesTracker tracker =
+                getLocalResourcesTracker(LocalResourceVisibility.APPLICATION, user, applicationId);
+              final String diagnostics = "Failed to download resource " +
+                  assoc.getResource() + " " + e.getCause();
+              tracker.handle(new ResourceFailedLocalizationEvent(
+                  assoc.getResource().getRequest(), diagnostics));
+              publicRsrc.handle(new ResourceFailedLocalizationEvent(
+                  assoc.getResource().getRequest(), diagnostics));
+              LOG.error(diagnostics);
               assoc.getResource().unlock();
             } catch (CancellationException e) {
               // ignore; shutting down

http://git-wip-us.apache.org/repos/asf/hadoop/blob/67c65da2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
index 4d03f15..2b9148e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
@@ -2398,6 +2398,9 @@ public class TestResourceLocalizationService {
       // Waiting for resource to change into FAILED state.
       Assert.assertTrue(waitForResourceState(lr, spyService, req,
         LocalResourceVisibility.PUBLIC, user, null, ResourceState.FAILED, 5000));
+      Assert.assertTrue(waitForResourceState(lr, spyService, req,
+          LocalResourceVisibility.APPLICATION, user, appId, ResourceState.FAILED, 5000));
+
       // releasing lock as a part of download failed process.
       lr.unlock();
       // removing pending download request.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: YARN-8605. TestDominantResourceFairnessPolicy.testModWhileSorting is flaky. (Wilfred Spiegelenburg via Haibo Chen)

Posted by bo...@apache.org.
YARN-8605. TestDominantResourceFairnessPolicy.testModWhileSorting is flaky. (Wilfred Spiegelenburg via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8aa93a57
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8aa93a57
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8aa93a57

Branch: refs/heads/YARN-7402
Commit: 8aa93a575e896c609b97ddab58853b1eb95f0dee
Parents: 9fea5c9
Author: Haibo Chen <ha...@apache.org>
Authored: Tue Jul 31 11:32:40 2018 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Tue Jul 31 11:32:40 2018 -0700

----------------------------------------------------------------------
 .../TestDominantResourceFairnessPolicy.java     | 38 +++++++-------------
 1 file changed, 12 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8aa93a57/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
index 55b7163..c963e0d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
@@ -24,7 +24,6 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
@@ -458,33 +457,20 @@ public class TestDominantResourceFairnessPolicy {
     }
     Comparator DRFComparator = createComparator(100000, 50000);
 
-    // To simulate unallocated resource changes
-    Thread modThread = modificationThread(schedulableList);
-    modThread.start();
+    /*
+     * The old sort should fail, but timing it makes testing to flaky.
+     * TimSort which is used does not handle the concurrent modification of
+     * objects it is sorting. This is the test that should fail:
+     *  modThread.start();
+     *  try {
+     *    Collections.sort(schedulableList, DRFComparator);
+     *  } catch (IllegalArgumentException iae) {
+     *    // failed sort
+     *  }
+     */
 
-    // This should fail: make sure that we do test correctly
-    // TimSort which is used does not handle the concurrent modification of
-    // objects it is sorting.
-    try {
-      Collections.sort(schedulableList, DRFComparator);
-      fail("Sorting should have failed and did not");
-    } catch (IllegalArgumentException iae) {
-      assertEquals(iae.getMessage(), "Comparison method violates its general contract!");
-    }
-    try {
-      modThread.join();
-    } catch (InterruptedException ie) {
-      fail("ModThread join failed: " + ie.getMessage());
-    }
-
-    // clean up and try again using TreeSet which should work
-    schedulableList.clear();
-    for (int i=0; i<10000; i++) {
-      schedulableList.add(
-          (FakeSchedulable)createSchedulable((i%10)*100, (i%3)*2));
-    }
     TreeSet<Schedulable> sortedSchedulable = new TreeSet<>(DRFComparator);
-    modThread = modificationThread(schedulableList);
+    Thread modThread = modificationThread(schedulableList);
     modThread.start();
     sortedSchedulable.addAll(schedulableList);
     try {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: YARN-8584. Several typos in Log Aggregation related classes. Contributed by Szilard Nemeth.

Posted by bo...@apache.org.
YARN-8584. Several typos in Log Aggregation related classes. Contributed by Szilard Nemeth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b39ad26
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b39ad26
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b39ad26

Branch: refs/heads/YARN-7402
Commit: 2b39ad26984d641bad57db2cfcc0b7515ef95f46
Parents: e8f952e
Author: bibinchundatt <bi...@apache.org>
Authored: Mon Jul 30 23:25:19 2018 +0530
Committer: bibinchundatt <bi...@apache.org>
Committed: Mon Jul 30 23:25:19 2018 +0530

----------------------------------------------------------------------
 .../AggregatedLogDeletionService.java           |  4 +--
 .../logaggregation/AggregatedLogFormat.java     |  8 +++---
 .../LogAggregationFileController.java           |  6 ++---
 .../ifile/IndexedFileAggregatedLogsBlock.java   |  6 ++---
 .../LogAggregationIndexedFileController.java    | 26 ++++++++++----------
 .../tfile/LogAggregationTFileController.java    |  2 +-
 .../TestAggregatedLogDeletionService.java       |  6 ++---
 .../logaggregation/AppLogAggregatorImpl.java    |  2 +-
 .../logaggregation/LogAggregationService.java   |  6 ++---
 .../tracker/NMLogAggregationStatusTracker.java  |  4 +--
 10 files changed, 35 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b39ad26/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
index 562bd2c..841b870 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
@@ -258,7 +258,7 @@ public class AggregatedLogDeletionService extends AbstractService {
       return;
     }
     setLogAggCheckIntervalMsecs(retentionSecs);
-    task = new LogDeletionTask(conf, retentionSecs, creatRMClient());
+    task = new LogDeletionTask(conf, retentionSecs, createRMClient());
     timer = new Timer();
     timer.scheduleAtFixedRate(task, 0, checkIntervalMsecs);
   }
@@ -281,7 +281,7 @@ public class AggregatedLogDeletionService extends AbstractService {
   // We have already marked ApplicationClientProtocol.getApplicationReport
   // as @Idempotent, it will automatically take care of RM restart/failover.
   @VisibleForTesting
-  protected ApplicationClientProtocol creatRMClient() throws IOException {
+  protected ApplicationClientProtocol createRMClient() throws IOException {
     return ClientRMProxy.createRMProxy(getConfig(),
       ApplicationClientProtocol.class);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b39ad26/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
index 4ee5c8a..d9b4c1e4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
@@ -178,7 +178,7 @@ public class AggregatedLogFormat {
      * The set of log files that are older than retention policy that will
      * not be uploaded but ready for deletion.
      */
-    private final Set<File> obseleteRetentionLogFiles = new HashSet<File>();
+    private final Set<File> obsoleteRetentionLogFiles = new HashSet<File>();
 
     // TODO Maybe add a version string here. Instead of changing the version of
     // the entire k-v format
@@ -324,7 +324,7 @@ public class AggregatedLogFormat {
       // if log files are older than retention policy, do not upload them.
       // but schedule them for deletion.
       if(logRetentionContext != null && !logRetentionContext.shouldRetainLog()){
-        obseleteRetentionLogFiles.addAll(candidates);
+        obsoleteRetentionLogFiles.addAll(candidates);
         candidates.clear();
         return candidates;
       }
@@ -396,9 +396,9 @@ public class AggregatedLogFormat {
       return info;
     }
 
-    public Set<Path> getObseleteRetentionLogFiles() {
+    public Set<Path> getObsoleteRetentionLogFiles() {
       Set<Path> path = new HashSet<Path>();
-      for(File file: this.obseleteRetentionLogFiles) {
+      for(File file: this.obsoleteRetentionLogFiles) {
         path.add(new Path(file.getAbsolutePath()));
       }
       return path;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b39ad26/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
index 5005b39..b047b1c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
@@ -115,16 +115,16 @@ public abstract class LogAggregationFileController {
    */
   public void initialize(Configuration conf, String controllerName) {
     this.conf = conf;
-    int configuredRentionSize = conf.getInt(
+    int configuredRetentionSize = conf.getInt(
         YarnConfiguration.NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP,
         YarnConfiguration
             .DEFAULT_NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP);
-    if (configuredRentionSize <= 0) {
+    if (configuredRetentionSize <= 0) {
       this.retentionSize =
           YarnConfiguration
               .DEFAULT_NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP;
     } else {
-      this.retentionSize = configuredRentionSize;
+      this.retentionSize = configuredRetentionSize;
     }
     this.fileControllerName = controllerName;
     initInternal(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b39ad26/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
index c53ffcc..4ef429d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java
@@ -187,8 +187,8 @@ public class IndexedFileAggregatedLogsBlock extends LogAggregationHtmlBlock {
         FSDataInputStream fsin = fileContext.open(thisNodeFile.getPath());
         int bufferSize = 65536;
         for (IndexedFileLogMeta candidate : candidates) {
-          if (candidate.getLastModificatedTime() < startTime
-              || candidate.getLastModificatedTime() > endTime) {
+          if (candidate.getLastModifiedTime() < startTime
+              || candidate.getLastModifiedTime() > endTime) {
             continue;
           }
           byte[] cbuf = new byte[bufferSize];
@@ -205,7 +205,7 @@ public class IndexedFileAggregatedLogsBlock extends LogAggregationHtmlBlock {
             html.pre().__("\n\n").__();
             html.p().__("Log Type: " + candidate.getFileName()).__();
             html.p().__("Log Upload Time: " + Times.format(
-                candidate.getLastModificatedTime())).__();
+                candidate.getLastModifiedTime())).__();
             html.p().__("Log Length: " + Long.toString(
                 logLength)).__();
             long startIndex = start < 0

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b39ad26/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
index 59b8e2c..78b0c13 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
@@ -404,7 +404,7 @@ public class LogAggregationIndexedFileController
         meta.setStartIndex(outputStreamState.getStartPos());
         meta.setFileSize(fileLength);
       }
-      meta.setLastModificatedTime(logFile.lastModified());
+      meta.setLastModifiedTime(logFile.lastModified());
       metas.add(meta);
     }
     logsMetaInThisCycle.addContainerLogMeta(containerId, metas);
@@ -499,12 +499,12 @@ public class LogAggregationIndexedFileController
         .getRemoteNodeFileDir(conf, appId, logRequest.getAppOwner(),
         this.remoteRootLogDir, this.remoteRootLogDirSuffix);
     if (!nodeFiles.hasNext()) {
-      throw new IOException("There is no available log fils for "
+      throw new IOException("There is no available log file for "
           + "application:" + appId);
     }
     List<FileStatus> allFiles = getAllNodeFiles(nodeFiles, appId);
     if (allFiles.isEmpty()) {
-      throw new IOException("There is no available log fils for "
+      throw new IOException("There is no available log file for "
           + "application:" + appId);
     }
     Map<String, Long> checkSumFiles = parseCheckSumFiles(allFiles);
@@ -581,7 +581,7 @@ public class LogAggregationIndexedFileController
               decompressor, getFSInputBufferSize(conf));
           LogToolUtils.outputContainerLog(candidate.getContainerId(),
               nodeName, candidate.getFileName(), candidate.getFileSize(), size,
-              Times.format(candidate.getLastModificatedTime()),
+              Times.format(candidate.getLastModifiedTime()),
               in, os, buf, ContainerLogAggregationType.AGGREGATED);
           byte[] b = aggregatedLogSuffix(candidate.getFileName())
               .getBytes(Charset.forName("UTF-8"));
@@ -618,12 +618,12 @@ public class LogAggregationIndexedFileController
         .getRemoteNodeFileDir(conf, appId, appOwner, this.remoteRootLogDir,
         this.remoteRootLogDirSuffix);
     if (!nodeFiles.hasNext()) {
-      throw new IOException("There is no available log fils for "
+      throw new IOException("There is no available log file for "
           + "application:" + appId);
     }
     List<FileStatus> allFiles = getAllNodeFiles(nodeFiles, appId);
     if (allFiles.isEmpty()) {
-      throw new IOException("There is no available log fils for "
+      throw new IOException("There is no available log file for "
           + "application:" + appId);
     }
     Map<String, Long> checkSumFiles = parseCheckSumFiles(allFiles);
@@ -660,7 +660,7 @@ public class LogAggregationIndexedFileController
             for (IndexedFileLogMeta aMeta : log.getValue()) {
               meta.addLogMeta(aMeta.getFileName(), Long.toString(
                   aMeta.getFileSize()),
-                  Times.format(aMeta.getLastModificatedTime()));
+                  Times.format(aMeta.getLastModifiedTime()));
             }
             containersLogMeta.add(meta);
           }
@@ -671,7 +671,7 @@ public class LogAggregationIndexedFileController
               logMeta.getContainerLogMeta(containerIdStr)) {
             meta.addLogMeta(log.getFileName(), Long.toString(
                 log.getFileSize()),
-                Times.format(log.getLastModificatedTime()));
+                Times.format(log.getLastModifiedTime()));
           }
           containersLogMeta.add(meta);
         }
@@ -1002,7 +1002,7 @@ public class LogAggregationIndexedFileController
     private String fileName;
     private long fileSize;
     private long fileCompressedSize;
-    private long lastModificatedTime;
+    private long lastModifiedTime;
     private long startIndex;
 
     public String getFileName() {
@@ -1026,11 +1026,11 @@ public class LogAggregationIndexedFileController
       this.fileCompressedSize = fileCompressedSize;
     }
 
-    public long getLastModificatedTime() {
-      return lastModificatedTime;
+    public long getLastModifiedTime() {
+      return lastModifiedTime;
     }
-    public void setLastModificatedTime(long lastModificatedTime) {
-      this.lastModificatedTime = lastModificatedTime;
+    public void setLastModifiedTime(long lastModifiedTime) {
+      this.lastModifiedTime = lastModifiedTime;
     }
 
     public long getStartIndex() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b39ad26/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java
index e87af7f..b3103d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java
@@ -275,7 +275,7 @@ public class LogAggregationTFileController
     RemoteIterator<FileStatus> nodeFiles = LogAggregationUtils
         .getRemoteNodeFileDir(conf, appId, appOwner);
     if (nodeFiles == null) {
-      throw new IOException("There is no available log fils for "
+      throw new IOException("There is no available log file for "
           + "application:" + appId);
     }
     while (nodeFiles.hasNext()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b39ad26/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java
index 4e2d302..f36ebf4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java
@@ -160,7 +160,7 @@ public class TestAggregatedLogDeletionService {
     AggregatedLogDeletionService deletionService =
         new AggregatedLogDeletionService() {
           @Override
-          protected ApplicationClientProtocol creatRMClient()
+          protected ApplicationClientProtocol createRMClient()
               throws IOException {
             try {
               return createMockRMClient(finishedApplications,
@@ -262,7 +262,7 @@ public class TestAggregatedLogDeletionService {
         return conf;
       }
       @Override
-      protected ApplicationClientProtocol creatRMClient()
+      protected ApplicationClientProtocol createRMClient()
           throws IOException {
         try {
           return createMockRMClient(finishedApplications, null);
@@ -353,7 +353,7 @@ public class TestAggregatedLogDeletionService {
     AggregatedLogDeletionService deletionSvc =
         new AggregatedLogDeletionService() {
       @Override
-      protected ApplicationClientProtocol creatRMClient()
+      protected ApplicationClientProtocol createRMClient()
           throws IOException {
         try {
           return createMockRMClient(finishedApplications, null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b39ad26/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index 5956823..6630ba6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -632,7 +632,7 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
 
       // need to return files uploaded or older-than-retention clean up.
       return Sets.union(logValue.getCurrentUpLoadedFilesPath(),
-          logValue.getObseleteRetentionLogFiles());
+          logValue.getObsoleteRetentionLogFiles());
 
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b39ad26/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
index 4938939..dcc165f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
@@ -117,9 +117,9 @@ public class LogAggregationService extends AbstractService implements
         LOG.info("Log aggregation debug mode enabled. rollingMonitorInterval = "
             + rollingMonitorInterval);
       } else {
-        LOG.warn("rollingMonitorIntervall should be more than or equal to "
-            + MIN_LOG_ROLLING_INTERVAL + " seconds. Using "
-            + MIN_LOG_ROLLING_INTERVAL + " seconds instead.");
+        LOG.warn("rollingMonitorInterval should be more than or equal to {} " +
+                "seconds. Using {} seconds instead.",
+                MIN_LOG_ROLLING_INTERVAL, MIN_LOG_ROLLING_INTERVAL);
         this.rollingMonitorInterval = MIN_LOG_ROLLING_INTERVAL;
       }
     } else if (rollingMonitorInterval <= 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b39ad26/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/logaggregation/tracker/NMLogAggregationStatusTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/logaggregation/tracker/NMLogAggregationStatusTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/logaggregation/tracker/NMLogAggregationStatusTracker.java
index 510d6d8..eb2aaf5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/logaggregation/tracker/NMLogAggregationStatusTracker.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/logaggregation/tracker/NMLogAggregationStatusTracker.java
@@ -110,7 +110,7 @@ public class NMLogAggregationStatusTracker extends CompositeService {
       LogAggregationStatus logAggregationStatus, long updateTime,
       String diagnosis, boolean finalized) {
     if (disabled) {
-      LOG.warn("The log aggregation is diabled. No need to update "
+      LOG.warn("The log aggregation is disabled. No need to update "
           + "the log aggregation status");
     }
     // In NM, each application has exactly one appLogAggregator thread
@@ -164,7 +164,7 @@ public class NMLogAggregationStatusTracker extends CompositeService {
   public List<LogAggregationReport> pullCachedLogAggregationReports() {
     List<LogAggregationReport> reports = new ArrayList<>();
     if (disabled) {
-      LOG.warn("The log aggregation is diabled."
+      LOG.warn("The log aggregation is disabled."
           + "There is no cached log aggregation status.");
       return reports;
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: YARN-8579. Recover NMToken of previous attempted component data. Contributed by Gour Saha

Posted by bo...@apache.org.
YARN-8579.  Recover NMToken of previous attempted component data.
            Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7ebcd76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7ebcd76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7ebcd76

Branch: refs/heads/YARN-7402
Commit: c7ebcd76bf3dd14127336951f2be3de772e7826a
Parents: 4b540bb
Author: Eric Yang <ey...@apache.org>
Authored: Tue Jul 31 18:01:02 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Tue Jul 31 18:01:02 2018 -0400

----------------------------------------------------------------------
 .../hadoop/yarn/service/ServiceScheduler.java     |  1 +
 .../scheduler/SchedulerApplicationAttempt.java    |  3 ++-
 .../scheduler/fair/FairScheduler.java             |  8 ++++++--
 .../applicationsmanager/TestAMRestart.java        | 18 ++++++++++++++----
 4 files changed, 23 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ebcd76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
index cfaf356..0801ad0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
@@ -649,6 +649,7 @@ public class ServiceScheduler extends CompositeService {
     @Override
     public void onContainersReceivedFromPreviousAttempts(
         List<Container> containers) {
+      LOG.info("Containers recovered after AM registered: {}", containers);
       if (containers == null || containers.isEmpty()) {
         return;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ebcd76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index dd6d38f..f9df2b8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -785,6 +785,7 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
       List<Container> returnContainerList = new ArrayList<>
           (recoveredPreviousAttemptContainers);
       recoveredPreviousAttemptContainers.clear();
+      updateNMTokens(returnContainerList);
       return returnContainerList;
     } finally {
       writeLock.unlock();
@@ -1466,4 +1467,4 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
   public Map<String, String> getApplicationSchedulingEnvs() {
     return this.applicationSchedulingEnvs;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ebcd76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 20d1afe..037cebf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.NMToken;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
@@ -950,12 +951,15 @@ public class FairScheduler extends
     Resource headroom = application.getHeadroom();
     application.setApplicationHeadroomForMetrics(headroom);
 
+    List<Container> previousAttemptContainers = application
+        .pullPreviousAttemptContainers();
+    List<NMToken> updatedNMTokens = application.pullUpdatedNMTokens();
     return new Allocation(newlyAllocatedContainers, headroom,
         preemptionContainerIds, null, null,
-        application.pullUpdatedNMTokens(), null, null,
+        updatedNMTokens, null, null,
         application.pullNewlyPromotedContainers(),
         application.pullNewlyDemotedContainers(),
-        application.pullPreviousAttemptContainers());
+        previousAttemptContainers);
   }
 
   private List<MaxResourceValidationResult> validateResourceRequests(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ebcd76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
index 4add186..9f122cb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
@@ -1048,12 +1048,12 @@ public class TestAMRestart extends ParameterizedSchedulerTestBase {
     rm1.start();
     YarnScheduler scheduler = rm1.getResourceScheduler();
 
-    MockNM nm1 = new MockNM("127.0.0.1:1234", 10240,
-        rm1.getResourceTrackerService());
+    String nm1Address = "127.0.0.1:1234";
+    MockNM nm1 = new MockNM(nm1Address, 10240, rm1.getResourceTrackerService());
     nm1.registerNode();
 
-    MockNM nm2 = new MockNM("127.0.0.1:2351", 4089,
-        rm1.getResourceTrackerService());
+    String nm2Address = "127.0.0.1:2351";
+    MockNM nm2 = new MockNM(nm2Address, 4089, rm1.getResourceTrackerService());
     nm2.registerNode();
 
     RMApp app1 = rm1.submitApp(200, "name", "user",
@@ -1120,6 +1120,11 @@ public class TestAMRestart extends ParameterizedSchedulerTestBase {
         registerResponse.getContainersFromPreviousAttempts().size());
     Assert.assertEquals("container 2", containerId2,
         registerResponse.getContainersFromPreviousAttempts().get(0).getId());
+    List<NMToken> prevNMTokens = registerResponse
+        .getNMTokensFromPreviousAttempts();
+    Assert.assertEquals(1, prevNMTokens.size());
+    // container 2 is running on node 1
+    Assert.assertEquals(nm1Address, prevNMTokens.get(0).getNodeId().toString());
 
     rm2.waitForState(app1.getApplicationId(), RMAppState.RUNNING);
 
@@ -1145,6 +1150,11 @@ public class TestAMRestart extends ParameterizedSchedulerTestBase {
               allocateResponse.getContainersFromPreviousAttempts());
           Assert.assertEquals("new containers should not be allocated",
               0, allocateResponse.getAllocatedContainers().size());
+          List<NMToken> nmTokens = allocateResponse.getNMTokens();
+          Assert.assertEquals(1, nmTokens.size());
+          // container 3 is running on node 2
+          Assert.assertEquals(nm2Address,
+              nmTokens.get(0).getNodeId().toString());
           return true;
         }
       } catch (Exception e) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: YARN-7974. Allow updating application tracking url after registration. Contributed by Jonathan Hung

Posted by bo...@apache.org.
YARN-7974. Allow updating application tracking url after registration. Contributed by Jonathan Hung


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e06a5dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e06a5dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e06a5dc

Branch: refs/heads/YARN-7402
Commit: 3e06a5dcea8224ba71aec284df23b47d536bb06d
Parents: ee53602
Author: Jonathan Hung <jh...@linkedin.com>
Authored: Mon Jul 30 17:41:01 2018 -0700
Committer: Jonathan Hung <jh...@linkedin.com>
Committed: Mon Jul 30 17:44:18 2018 -0700

----------------------------------------------------------------------
 .../api/protocolrecords/AllocateRequest.java    | 47 +++++++++++-
 .../src/main/proto/yarn_service_protos.proto    |  1 +
 .../hadoop/yarn/client/api/AMRMClient.java      | 11 +++
 .../yarn/client/api/async/AMRMClientAsync.java  | 11 +++
 .../api/async/impl/AMRMClientAsyncImpl.java     |  5 ++
 .../yarn/client/api/impl/AMRMClientImpl.java    | 11 +++
 .../yarn/client/api/impl/TestAMRMClient.java    | 77 ++++++++++++++++++++
 .../impl/pb/AllocateRequestPBImpl.java          | 27 ++++++-
 .../resourcemanager/DefaultAMSProcessor.java    |  2 +-
 .../rmapp/attempt/RMAppAttemptImpl.java         | 20 +++++
 .../event/RMAppAttemptStatusupdateEvent.java    | 11 +++
 .../TestApplicationMasterService.java           | 34 +++++++++
 .../server/resourcemanager/TestRMRestart.java   | 45 ++++++++++++
 13 files changed, 298 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e06a5dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
index eee50e3..799088b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
@@ -73,7 +73,21 @@ public abstract class AllocateRequest {
         .releaseList(containersToBeReleased)
         .resourceBlacklistRequest(resourceBlacklistRequest).build();
   }
-  
+
+  @Public
+  @Unstable
+  public static AllocateRequest newInstance(int responseID, float appProgress,
+      List<ResourceRequest> resourceAsk,
+      List<ContainerId> containersToBeReleased,
+      ResourceBlacklistRequest resourceBlacklistRequest,
+      String trackingUrl) {
+    return AllocateRequest.newBuilder().responseId(responseID)
+        .progress(appProgress).askList(resourceAsk)
+        .releaseList(containersToBeReleased)
+        .resourceBlacklistRequest(resourceBlacklistRequest)
+        .trackingUrl(trackingUrl).build();
+  }
+
   @Public
   @Unstable
   public static AllocateRequest newInstance(int responseID, float appProgress,
@@ -240,6 +254,22 @@ public abstract class AllocateRequest {
       List<SchedulingRequest> schedulingRequests) {
   }
 
+  /**
+   * Get the tracking url update for this heartbeat.
+   * @return tracking url to update this application with
+   */
+  @Public
+  @Unstable
+  public abstract String getTrackingUrl();
+
+  /**
+   * Set the new tracking url for this application.
+   * @param trackingUrl the new tracking url
+   */
+  @Public
+  @Unstable
+  public abstract void setTrackingUrl(String trackingUrl);
+
   @Public
   @Unstable
   public static AllocateRequestBuilder newBuilder() {
@@ -356,6 +386,19 @@ public abstract class AllocateRequest {
     }
 
     /**
+     * Set the <code>trackingUrl</code> of the request.
+     * @see AllocateRequest#setTrackingUrl(String)
+     * @param trackingUrl new tracking url
+     * @return {@link AllocateRequestBuilder}
+     */
+    @Public
+    @Unstable
+    public AllocateRequestBuilder trackingUrl(String trackingUrl) {
+      allocateRequest.setTrackingUrl(trackingUrl);
+      return this;
+    }
+
+    /**
      * Return generated {@link AllocateRequest} object.
      * @return {@link AllocateRequest}
      */
@@ -365,4 +408,4 @@ public abstract class AllocateRequest {
       return allocateRequest;
     }
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e06a5dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index 92a65ad..acd452d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -92,6 +92,7 @@ message AllocateRequestProto {
   optional float progress = 5;
   repeated UpdateContainerRequestProto update_requests = 7;
   repeated SchedulingRequestProto scheduling_requests = 10;
+  optional string tracking_url = 11;
 }
 
 message NMTokenProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e06a5dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
index 32aa21d..59b3353 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
@@ -805,6 +805,17 @@ public abstract class AMRMClient<T extends AMRMClient.ContainerRequest> extends
   }
 
   /**
+   * Update application's tracking url on next heartbeat.
+   *
+   * @param trackingUrl new tracking url for this application
+   */
+  @Public
+  @InterfaceStability.Unstable
+  public void updateTrackingUrl(String trackingUrl) {
+    // Unimplemented.
+  }
+
+  /**
    * Wait for <code>check</code> to return true for each 1000 ms.
    * See also {@link #waitFor(java.util.function.Supplier, int)}
    * and {@link #waitFor(java.util.function.Supplier, int, int)}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e06a5dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
index 0af687b..3dd2f71 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
@@ -413,6 +413,17 @@ extends AbstractService {
                                        List<String> blacklistRemovals);
 
   /**
+   * Update application's tracking url on next heartbeat.
+   *
+   * @param trackingUrl new tracking url for this application
+   */
+  @Public
+  @Unstable
+  public void updateTrackingUrl(String trackingUrl) {
+    // Unimplemented.
+  }
+
+  /**
    * Wait for <code>check</code> to return true for each 1000 ms.
    * See also {@link #waitFor(java.util.function.Supplier, int)}
    * and {@link #waitFor(java.util.function.Supplier, int, int)}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e06a5dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
index 4f04b66..3cf2c34 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
@@ -286,6 +286,11 @@ extends AMRMClientAsync<T> {
                               List<String> blacklistRemovals) {
     client.updateBlacklist(blacklistAdditions, blacklistRemovals);
   }
+
+  @Override
+  public void updateTrackingUrl(String trackingUrl) {
+    client.updateTrackingUrl(trackingUrl);
+  }
   
   private class HeartbeatThread extends Thread {
     public HeartbeatThread() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e06a5dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
index 7265d24..6dcecde 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
@@ -99,6 +99,7 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
   protected String appHostName;
   protected int appHostPort;
   protected String appTrackingUrl;
+  protected String newTrackingUrl;
 
   protected ApplicationMasterProtocol rmClient;
   protected Resource clusterAvailableResources;
@@ -308,6 +309,11 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
             .releaseList(releaseList).updateRequests(updateList)
             .schedulingRequests(schedulingRequestList).build();
 
+        if (this.newTrackingUrl != null) {
+          allocateRequest.setTrackingUrl(this.newTrackingUrl);
+          this.appTrackingUrl = this.newTrackingUrl;
+          this.newTrackingUrl = null;
+        }
         // clear blacklistAdditions and blacklistRemovals before
         // unsynchronized part
         blacklistAdditions.clear();
@@ -1008,6 +1014,11 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
     }
   }
 
+  @Override
+  public synchronized void updateTrackingUrl(String trackingUrl) {
+    this.newTrackingUrl = trackingUrl;
+  }
+
   private void updateAMRMToken(Token token) throws IOException {
     org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> amrmToken =
         new org.apache.hadoop.security.token.Token<AMRMTokenIdentifier>(token

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e06a5dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
index 8dda8b4..cf83779 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
@@ -20,10 +20,12 @@ package org.apache.hadoop.yarn.client.api.impl;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
 import java.io.IOException;
@@ -79,6 +81,7 @@ import org.junit.Assume;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
+import org.mockito.ArgumentCaptor;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 import org.eclipse.jetty.util.log.Log;
@@ -1994,4 +1997,78 @@ public class TestAMRMClient extends BaseAMRMClientTest{
       }
     }
   }
+
+  @Test(timeout = 60000)
+  public void testNoUpdateTrackingUrl()  {
+    try {
+      AMRMClientImpl<ContainerRequest> amClient = null;
+      amClient = new AMRMClientImpl<>();
+      amClient.init(conf);
+      amClient.start();
+      amClient.registerApplicationMaster("Host", 10000, "");
+
+      assertEquals("", amClient.appTrackingUrl);
+
+      ApplicationMasterProtocol mockRM = mock(ApplicationMasterProtocol.class);
+      AllocateResponse mockResponse = mock(AllocateResponse.class);
+      when(mockRM.allocate(any(AllocateRequest.class)))
+          .thenReturn(mockResponse);
+      ApplicationMasterProtocol realRM = amClient.rmClient;
+      amClient.rmClient = mockRM;
+      // Do allocate without updated tracking url
+      amClient.allocate(0.1f);
+      ArgumentCaptor<AllocateRequest> argument =
+          ArgumentCaptor.forClass(AllocateRequest.class);
+      verify(mockRM).allocate(argument.capture());
+      assertNull(argument.getValue().getTrackingUrl());
+
+      amClient.rmClient = realRM;
+      amClient
+          .unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null,
+              null);
+    } catch (IOException | YarnException e) {
+      throw new AssertionError(
+          "testNoUpdateTrackingUrl unexpectedly threw exception: " + e);
+    }
+  }
+
+  @Test(timeout = 60000)
+  public void testUpdateTrackingUrl() {
+    try {
+      AMRMClientImpl<ContainerRequest> amClient = null;
+      amClient = new AMRMClientImpl<>();
+      amClient.init(conf);
+      amClient.start();
+      amClient.registerApplicationMaster("Host", 10000, "");
+
+      String trackingUrl = "hadoop.apache.org";
+      assertEquals("", amClient.appTrackingUrl);
+
+      ApplicationMasterProtocol mockRM = mock(ApplicationMasterProtocol.class);
+      AllocateResponse mockResponse = mock(AllocateResponse.class);
+      when(mockRM.allocate(any(AllocateRequest.class)))
+          .thenReturn(mockResponse);
+      ApplicationMasterProtocol realRM = amClient.rmClient;
+      amClient.rmClient = mockRM;
+      // Do allocate with updated tracking url
+      amClient.updateTrackingUrl(trackingUrl);
+      assertEquals(trackingUrl, amClient.newTrackingUrl);
+      assertEquals("", amClient.appTrackingUrl);
+      amClient.allocate(0.1f);
+      assertNull(amClient.newTrackingUrl);
+      assertEquals(trackingUrl, amClient.appTrackingUrl);
+      ArgumentCaptor<AllocateRequest> argument
+          = ArgumentCaptor.forClass(AllocateRequest.class);
+      verify(mockRM).allocate(argument.capture());
+      assertEquals(trackingUrl, argument.getValue().getTrackingUrl());
+
+      amClient.rmClient = realRM;
+      amClient
+          .unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null,
+              null);
+    } catch (IOException | YarnException e) {
+      throw new AssertionError(
+          "testUpdateTrackingUrl unexpectedly threw exception: " + e);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e06a5dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
index 50672a3..b5360a5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java
@@ -58,6 +58,7 @@ public class AllocateRequestPBImpl extends AllocateRequest {
   private List<UpdateContainerRequest> updateRequests = null;
   private List<SchedulingRequest> schedulingRequests = null;
   private ResourceBlacklistRequest blacklistRequest = null;
+  private String trackingUrl = null;
   
   public AllocateRequestPBImpl() {
     builder = AllocateRequestProto.newBuilder();
@@ -111,6 +112,9 @@ public class AllocateRequestPBImpl extends AllocateRequest {
     if (this.blacklistRequest != null) {
       builder.setBlacklistRequest(convertToProtoFormat(this.blacklistRequest));
     }
+    if (this.trackingUrl != null) {
+      builder.setTrackingUrl(this.trackingUrl);
+    }
   }
 
   private void mergeLocalToProto() {
@@ -398,7 +402,28 @@ public class AllocateRequestPBImpl extends AllocateRequest {
       this.release.add(convertFromProtoFormat(c));
     }
   }
-  
+
+  @Override
+  public String getTrackingUrl() {
+    AllocateRequestProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.trackingUrl != null) {
+      return this.trackingUrl;
+    }
+    if (p.hasTrackingUrl()) {
+      this.trackingUrl = p.getTrackingUrl();
+    }
+    return this.trackingUrl;
+  }
+
+  @Override
+  public void setTrackingUrl(String trackingUrl) {
+    maybeInitBuilder();
+    if (trackingUrl == null) {
+      builder.clearTrackingUrl();
+    }
+    this.trackingUrl = trackingUrl;
+  }
+
   private void addReleasesToProto() {
     maybeInitBuilder();
     builder.clearRelease();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e06a5dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
index 43f73e4..4cd5925 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
@@ -401,7 +401,7 @@ final class DefaultAMSProcessor implements ApplicationMasterServiceProcessor {
     // Send the status update to the appAttempt.
     getRmContext().getDispatcher().getEventHandler().handle(
         new RMAppAttemptStatusupdateEvent(appAttemptId, request
-            .getProgress()));
+            .getProgress(), request.getTrackingUrl()));
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e06a5dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 32f275f..3ec9c49 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -1823,6 +1823,26 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
       // Update progress
       appAttempt.progress = statusUpdateEvent.getProgress();
 
+      // Update tracking url if changed and save it to state store
+      String newTrackingUrl = statusUpdateEvent.getTrackingUrl();
+      if (newTrackingUrl != null &&
+          !newTrackingUrl.equals(appAttempt.originalTrackingUrl)) {
+        appAttempt.originalTrackingUrl = newTrackingUrl;
+        ApplicationAttemptStateData attemptState = ApplicationAttemptStateData
+            .newInstance(appAttempt.applicationAttemptId,
+                appAttempt.getMasterContainer(),
+                appAttempt.rmContext.getStateStore()
+                    .getCredentialsFromAppAttempt(appAttempt),
+                appAttempt.startTime, appAttempt.recoveredFinalState,
+                newTrackingUrl, appAttempt.getDiagnostics(), null,
+                ContainerExitStatus.INVALID, appAttempt.getFinishTime(),
+                appAttempt.attemptMetrics.getAggregateAppResourceUsage()
+                    .getResourceUsageSecondsMap(),
+                appAttempt.attemptMetrics.getPreemptedResourceSecondsMap());
+        appAttempt.rmContext.getStateStore()
+            .updateApplicationAttemptState(attemptState);
+      }
+
       // Ping to AMLivelinessMonitor
       appAttempt.rmContext.getAMLivelinessMonitor().receivedPing(
           statusUpdateEvent.getApplicationAttemptId());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e06a5dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptStatusupdateEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptStatusupdateEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptStatusupdateEvent.java
index b1b63b1..1b7442d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptStatusupdateEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptStatusupdateEvent.java
@@ -25,15 +25,26 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptE
 public class RMAppAttemptStatusupdateEvent extends RMAppAttemptEvent {
 
   private final float progress;
+  private final String trackingUrl;
 
   public RMAppAttemptStatusupdateEvent(ApplicationAttemptId appAttemptId,
       float progress) {
+    this(appAttemptId, progress, null);
+  }
+
+  public RMAppAttemptStatusupdateEvent(ApplicationAttemptId appAttemptId,
+                                       float progress, String trackingUrl) {
     super(appAttemptId, RMAppAttemptEventType.STATUS_UPDATE);
     this.progress = progress;
+    this.trackingUrl = trackingUrl;
   }
 
   public float getProgress() {
     return this.progress;
   }
 
+  public String getTrackingUrl() {
+    return this.trackingUrl;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e06a5dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
index 9696741..562ba5d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
@@ -956,4 +956,38 @@ public class TestApplicationMasterService {
       fail("Cannot find RMContainer");
     }
   }
+
+  @Test(timeout = 300000)
+  public void testUpdateTrackingUrl() throws Exception {
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+    MockRM rm = new MockRM(conf);
+    rm.start();
+
+    // Register node1
+    MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
+
+    RMApp app1 = rm.submitApp(2048);
+
+    nm1.nodeHeartbeat(true);
+    RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
+    MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
+    am1.registerAppAttempt();
+    Assert.assertEquals("N/A", rm.getRMContext().getRMApps().get(
+        app1.getApplicationId()).getOriginalTrackingUrl());
+
+    AllocateRequestPBImpl allocateRequest = new AllocateRequestPBImpl();
+    String newTrackingUrl = "hadoop.apache.org";
+    allocateRequest.setTrackingUrl(newTrackingUrl);
+
+    am1.allocate(allocateRequest);
+    Assert.assertEquals(newTrackingUrl, rm.getRMContext().getRMApps().get(
+        app1.getApplicationId()).getOriginalTrackingUrl());
+
+    // Send it again
+    am1.allocate(allocateRequest);
+    Assert.assertEquals(newTrackingUrl, rm.getRMContext().getRMApps().get(
+        app1.getApplicationId()).getOriginalTrackingUrl());
+    rm.stop();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e06a5dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
index 07c5268..9aa5c53 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
@@ -2698,6 +2698,51 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
     rm2.stop();
   }
 
+  @Test(timeout = 20000)
+  public void testRMRestartAfterUpdateTrackingUrl() throws Exception {
+    MockRM rm = new MockRM(conf);
+    rm.start();
+
+    MemoryRMStateStore memStore = (MemoryRMStateStore) rm.getRMStateStore();
+
+    // Register node1
+    MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * 1024);
+
+    RMApp app1 = rm.submitApp(2048);
+
+    nm1.nodeHeartbeat(true);
+    RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
+    MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
+    am1.registerAppAttempt();
+
+    AllocateRequestPBImpl allocateRequest = new AllocateRequestPBImpl();
+    String newTrackingUrl = "hadoop.apache.org";
+    allocateRequest.setTrackingUrl(newTrackingUrl);
+
+    am1.allocate(allocateRequest);
+    // Check in-memory and stored tracking url
+    Assert.assertEquals(newTrackingUrl, rm.getRMContext().getRMApps().get(
+        app1.getApplicationId()).getOriginalTrackingUrl());
+    Assert.assertEquals(newTrackingUrl, rm.getRMContext().getRMApps().get(
+        app1.getApplicationId()).getCurrentAppAttempt()
+        .getOriginalTrackingUrl());
+    Assert.assertEquals(newTrackingUrl, memStore.getState()
+        .getApplicationState().get(app1.getApplicationId())
+        .getAttempt(attempt1.getAppAttemptId()).getFinalTrackingUrl());
+
+    // Start new RM, should recover updated tracking url
+    MockRM rm2 = new MockRM(conf, memStore);
+    rm2.start();
+    Assert.assertEquals(newTrackingUrl, rm.getRMContext().getRMApps().get(
+        app1.getApplicationId()).getOriginalTrackingUrl());
+    Assert.assertEquals(newTrackingUrl, rm.getRMContext().getRMApps().get(
+        app1.getApplicationId()).getCurrentAppAttempt()
+        .getOriginalTrackingUrl());
+
+    rm.stop();
+    rm2.stop();
+  }
+
   private Credentials getCreds() throws IOException {
     Credentials ts = new Credentials();
     DataOutputBuffer dob = new DataOutputBuffer();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: HDDS-279. DeleteBlocks command should not be sent for open containers. Contributed by Lokesh Jain.

Posted by bo...@apache.org.
HDDS-279. DeleteBlocks command should not be sent for open containers. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b28bdc7e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b28bdc7e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b28bdc7e

Branch: refs/heads/YARN-7402
Commit: b28bdc7e8b488ef0df62a92bcfe7eb74bbe177c1
Parents: 7631e0a
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Tue Jul 31 19:50:40 2018 +0530
Committer: Mukul Kumar Singh <ms...@apache.org>
Committed: Tue Jul 31 19:50:40 2018 +0530

----------------------------------------------------------------------
 .../block/DatanodeDeletedBlockTransactions.java | 18 ++--
 .../hdds/scm/block/DeletedBlockLogImpl.java     |  8 +-
 .../org/apache/hadoop/ozone/OzoneTestUtils.java | 92 ++++++++++++++++++++
 .../ozone/TestStorageContainerManager.java      |  8 ++
 .../common/TestBlockDeletingService.java        | 17 +++-
 .../commandhandler/TestBlockDeletion.java       | 47 ++++------
 .../hadoop/ozone/web/client/TestKeys.java       |  3 +
 7 files changed, 152 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28bdc7e/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
index e33a700..25420fe 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
@@ -28,6 +28,8 @@ import java.util.List;
 import java.util.Set;
 import java.util.UUID;
 import java.util.stream.Collectors;
+
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 
 /**
@@ -53,21 +55,26 @@ public class DatanodeDeletedBlockTransactions {
     this.nodeNum = nodeNum;
   }
 
-  public void addTransaction(DeletedBlocksTransaction tx,
-      Set<UUID> dnsWithTransactionCommitted) throws IOException {
+  public boolean addTransaction(DeletedBlocksTransaction tx,
+      Set<UUID> dnsWithTransactionCommitted) {
     Pipeline pipeline = null;
     try {
-      pipeline = mappingService.getContainerWithPipeline(tx.getContainerID())
-          .getPipeline();
+      ContainerWithPipeline containerWithPipeline =
+          mappingService.getContainerWithPipeline(tx.getContainerID());
+      if (containerWithPipeline.getContainerInfo().isContainerOpen()) {
+        return false;
+      }
+      pipeline = containerWithPipeline.getPipeline();
     } catch (IOException e) {
       SCMBlockDeletingService.LOG.warn("Got container info error.", e);
+      return false;
     }
 
     if (pipeline == null) {
       SCMBlockDeletingService.LOG.warn(
           "Container {} not found, continue to process next",
           tx.getContainerID());
-      return;
+      return false;
     }
 
     for (DatanodeDetails dd : pipeline.getMachines()) {
@@ -78,6 +85,7 @@ public class DatanodeDeletedBlockTransactions {
         addTransactionToDN(dnID, tx);
       }
     }
+    return true;
   }
 
   private void addTransactionToDN(UUID dnID, DeletedBlocksTransaction tx) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28bdc7e/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index 752c9c7..ca4e1d0 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -386,9 +386,11 @@ public class DeletedBlockLogImpl implements DeletedBlockLog {
               .parseFrom(value);
 
           if (block.getCount() > -1 && block.getCount() <= maxRetry) {
-            Set<UUID> dnsWithTransactionCommitted = transactionToDNsCommitMap
-                .putIfAbsent(block.getTxID(), new ConcurrentHashSet<>());
-            transactions.addTransaction(block, dnsWithTransactionCommitted);
+            if (transactions.addTransaction(block,
+                transactionToDNsCommitMap.get(block.getTxID()))) {
+              transactionToDNsCommitMap
+                  .putIfAbsent(block.getTxID(), new ConcurrentHashSet<>());
+            }
           }
           return !transactions.isFull();
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28bdc7e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
new file mode 100644
index 0000000..7787b53
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone;
+
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.function.Consumer;
+
+public class OzoneTestUtils {
+
+  /**
+   * Close containers which contain the blocks listed in
+   * omKeyLocationInfoGroups.
+   *
+   * @param omKeyLocationInfoGroups locationInfos for a key.
+   * @param scm StorageContainerManager instance.
+   * @return true if close containers is successful.
+   * @throws IOException
+   */
+  public static boolean closeContainers(
+      List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups,
+      StorageContainerManager scm) throws IOException {
+    return performOperationOnKeyContainers((blockID) -> {
+      try {
+        scm.getScmContainerManager()
+            .updateContainerState(blockID.getContainerID(),
+                HddsProtos.LifeCycleEvent.FINALIZE);
+        scm.getScmContainerManager()
+            .updateContainerState(blockID.getContainerID(),
+                HddsProtos.LifeCycleEvent.CLOSE);
+        Assert.assertFalse(scm.getScmContainerManager()
+            .getContainerWithPipeline(blockID.getContainerID())
+            .getContainerInfo().isContainerOpen());
+      } catch (IOException e) {
+        e.printStackTrace();
+      }
+    }, omKeyLocationInfoGroups);
+  }
+
+  /**
+   * Performs the provided consumer on containers which contain the blocks
+   * listed in omKeyLocationInfoGroups.
+   *
+   * @param consumer Consumer which accepts BlockID as argument.
+   * @param omKeyLocationInfoGroups locationInfos for a key.
+   * @return true if consumer is successful.
+   * @throws IOException
+   */
+  public static boolean performOperationOnKeyContainers(
+      Consumer<BlockID> consumer,
+      List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups) throws IOException {
+
+    try {
+      for (OmKeyLocationInfoGroup omKeyLocationInfoGroup :
+          omKeyLocationInfoGroups) {
+        List<OmKeyLocationInfo> omKeyLocationInfos =
+            omKeyLocationInfoGroup.getLocationList();
+        for (OmKeyLocationInfo omKeyLocationInfo : omKeyLocationInfos) {
+          BlockID blockID = omKeyLocationInfo.getBlockID();
+          consumer.accept(blockID);
+        }
+      }
+    } catch (Error e) {
+      e.printStackTrace();
+      return false;
+    }
+    return true;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28bdc7e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 7ca5fa1..c5d8747 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -212,6 +212,10 @@ public class TestStorageContainerManager {
       TestStorageContainerManagerHelper helper =
           new TestStorageContainerManagerHelper(cluster, conf);
       Map<String, OmKeyInfo> keyLocations = helper.createKeys(numKeys, 4096);
+      for (OmKeyInfo keyInfo : keyLocations.values()) {
+        OzoneTestUtils.closeContainers(keyInfo.getKeyLocationVersions(),
+            cluster.getStorageContainerManager());
+      }
 
       Map<Long, List<Long>> containerBlocks = createDeleteTXLog(delLog,
           keyLocations, helper);
@@ -294,6 +298,10 @@ public class TestStorageContainerManager {
     TestStorageContainerManagerHelper helper =
         new TestStorageContainerManagerHelper(cluster, conf);
     Map<String, OmKeyInfo> keyLocations = helper.createKeys(numKeys, 4096);
+    for (OmKeyInfo keyInfo : keyLocations.values()) {
+      OzoneTestUtils.closeContainers(keyInfo.getKeyLocationVersions(),
+          cluster.getStorageContainerManager());
+    }
 
     createDeleteTXLog(delLog, keyLocations, helper);
     // Verify a few TX gets created in the TX log.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28bdc7e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index a6e53c2..4ca4124 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -61,6 +61,7 @@ import java.util.Map;
 import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import static org.apache.hadoop.ozone.OzoneConfigKeys
     .OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
@@ -356,10 +357,18 @@ public class TestBlockDeletingService {
       // 1st interval processes 1 container 1 block and 10 chunks
       deleteAndWait(service, 1);
       Assert.assertEquals(10, getNumberOfChunksInContainers(containerSet));
-      deleteAndWait(service, 2);
-      deleteAndWait(service, 3);
-      deleteAndWait(service, 4);
-      deleteAndWait(service, 5);
+
+      AtomicInteger timesToProcess = new AtomicInteger(1);
+      GenericTestUtils.waitFor(() -> {
+        try {
+          timesToProcess.incrementAndGet();
+          deleteAndWait(service, timesToProcess.get());
+          if (getNumberOfChunksInContainers(containerSet) == 0) {
+            return true;
+          }
+        } catch (Exception e) {}
+        return false;
+      }, 100, 100000);
       Assert.assertEquals(0, getNumberOfChunksInContainers(containerSet));
     } finally {
       service.shutdown();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28bdc7e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index 4ae827b..ee9aed2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
 
 import com.google.common.primitives.Longs;
 import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -29,6 +28,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.OzoneTestUtils;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
@@ -40,7 +40,6 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.ozShell.TestOzoneShell;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -53,13 +52,15 @@ import java.io.File;
 import java.io.IOException;
 import java.util.*;
 import java.util.concurrent.TimeUnit;
-import java.util.function.Consumer;
 
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
 
 public class TestBlockDeletion {
   private static OzoneConfiguration conf = null;
   private static ObjectStore store;
+  private static MiniOzoneCluster cluster = null;
   private static ContainerSet dnContainerSet = null;
   private static StorageContainerManager scm = null;
   private static OzoneManager om = null;
@@ -81,9 +82,10 @@ public class TestBlockDeletion {
     conf.setQuietMode(false);
     conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
         TimeUnit.MILLISECONDS);
+    conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200,
+        TimeUnit.MILLISECONDS);
 
-    MiniOzoneCluster cluster =
-        MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
+    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
     cluster.waitForClusterToBeReady();
     store = OzoneClientFactory.getRpcClient(conf).getObjectStore();
     dnContainerSet = cluster.getHddsDatanodes().get(0)
@@ -127,6 +129,14 @@ public class TestBlockDeletion {
     matchContainerTransactionIds();
     om.deleteKey(keyArgs);
     Thread.sleep(5000);
+    // The blocks should not be deleted in the DN as the container is open
+    Assert.assertTrue(!verifyBlocksDeleted(omKeyLocationInfoGroupList));
+
+    // close the containers which hold the blocks for the key
+    Assert
+        .assertTrue(
+            OzoneTestUtils.closeContainers(omKeyLocationInfoGroupList, scm));
+    Thread.sleep(5000);
     // The blocks should be deleted in the DN.
     Assert.assertTrue(verifyBlocksDeleted(omKeyLocationInfoGroupList));
 
@@ -157,7 +167,7 @@ public class TestBlockDeletion {
   private boolean verifyBlocksCreated(
       List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups)
       throws IOException {
-    return performOperationOnKeyContainers((blockID) -> {
+    return OzoneTestUtils.performOperationOnKeyContainers((blockID) -> {
       try {
         MetadataStore db = KeyUtils.getDB((KeyValueContainerData)
                 dnContainerSet.getContainer(blockID.getContainerID())
@@ -172,7 +182,7 @@ public class TestBlockDeletion {
   private boolean verifyBlocksDeleted(
       List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups)
       throws IOException {
-    return performOperationOnKeyContainers((blockID) -> {
+    return OzoneTestUtils.performOperationOnKeyContainers((blockID) -> {
       try {
         MetadataStore db = KeyUtils.getDB((KeyValueContainerData)
             dnContainerSet.getContainer(blockID.getContainerID())
@@ -188,25 +198,4 @@ public class TestBlockDeletion {
       }
     }, omKeyLocationInfoGroups);
   }
-
-  private boolean performOperationOnKeyContainers(Consumer<BlockID> consumer,
-      List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups)
-      throws IOException {
-
-    try {
-      for (OmKeyLocationInfoGroup omKeyLocationInfoGroup :
-          omKeyLocationInfoGroups) {
-        List<OmKeyLocationInfo> omKeyLocationInfos =
-            omKeyLocationInfoGroup.getLocationList();
-        for (OmKeyLocationInfo omKeyLocationInfo : omKeyLocationInfos) {
-          BlockID blockID = omKeyLocationInfo.getBlockID();
-          consumer.accept(blockID);
-        }
-      }
-    } catch (Error e) {
-      e.printStackTrace();
-      return false;
-    }
-    return true;
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28bdc7e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
index 540a564..2d6abe0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.ozone.OzoneTestUtils;
 import org.apache.hadoop.ozone.client.BucketArgs;
 import org.apache.hadoop.ozone.client.VolumeArgs;
 import org.apache.hadoop.ozone.client.OzoneBucket;
@@ -698,6 +699,8 @@ public class TestKeys {
       for (OmKeyInfo keyInfo : createdKeys) {
         List<OmKeyLocationInfo> locations =
             keyInfo.getLatestVersionLocations().getLocationList();
+        OzoneTestUtils.closeContainers(keyInfo.getKeyLocationVersions(),
+            ozoneCluster.getStorageContainerManager());
         for (OmKeyLocationInfo location : locations) {
           KeyValueHandler  keyValueHandler = (KeyValueHandler) cm
               .getDispatcher().getHandler(ContainerProtos.ContainerType


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: YARN-8595. [UI2] Container diagnostic information is missing from container page. Contributed by Akhil PB.

Posted by bo...@apache.org.
YARN-8595. [UI2] Container diagnostic information is missing from container page. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d920b9db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d920b9db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d920b9db

Branch: refs/heads/YARN-7402
Commit: d920b9db77be44adc4f8a2a0c2df889af82be04f
Parents: a48a0cc
Author: Sunil G <su...@apache.org>
Authored: Wed Aug 1 14:27:54 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Wed Aug 1 14:27:54 2018 +0530

----------------------------------------------------------------------
 .../main/webapp/app/models/yarn-app-attempt.js  |  1 +
 .../app/models/yarn-timeline-container.js       |  1 +
 .../webapp/app/serializers/yarn-app-attempt.js  |  3 +-
 .../app/serializers/yarn-timeline-container.js  |  6 +--
 .../src/main/webapp/app/styles/app.scss         |  9 ++++
 .../templates/components/app-attempt-table.hbs  |  6 +++
 .../app/templates/components/timeline-view.hbs  | 44 ++++++++++++++------
 7 files changed, 51 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d920b9db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
index cffe198..f483695 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
@@ -32,6 +32,7 @@ export default DS.Model.extend({
   logsLink: DS.attr('string'),
   state: DS.attr('string'),
   appAttemptId: DS.attr('string'),
+  diagnosticsInfo: DS.attr('string'),
 
   appId: Ember.computed("id",function () {
     var id = this.get("id");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d920b9db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js
index 7482a2f..9384418 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js
@@ -31,6 +31,7 @@ export default DS.Model.extend({
   containerState: DS.attr('string'),
   nodeHttpAddress: DS.attr('string'),
   nodeId: DS.attr('string'),
+  diagnosticsInfo: DS.attr('string'),
 
   startTs: function() {
     return Converter.dateToTimeStamp(this.get("startedTime"));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d920b9db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
index f8f598b..55f484b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
@@ -40,7 +40,8 @@ export default DS.JSONAPISerializer.extend({
           hosts: payload.host,
           state: payload.appAttemptState,
           logsLink: payload.logsLink,
-          appAttemptId: payload.appAttemptId
+          appAttemptId: payload.appAttemptId,
+          diagnosticsInfo: payload.diagnosticsInfo
         }
       };
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d920b9db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
index 1322972..99ab6c4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
@@ -22,11 +22,6 @@ import Converter from 'yarn-ui/utils/converter';
 
 export default DS.JSONAPISerializer.extend({
   internalNormalizeSingleResponse(store, primaryModelClass, payload) {
-    var payloadEvents = payload.events,
-        createdEvent = payloadEvents.filterBy('id', 'YARN_CONTAINER_CREATED')[0],
-        startedTime = createdEvent? createdEvent.timestamp : Date.now(),
-        finishedEvent = payloadEvents.filterBy('id', 'YARN_CONTAINER_FINISHED')[0],
-        finishedTime = finishedEvent? finishedEvent.timestamp : Date.now()
 
     var fixedPayload = {
       id: payload.id,
@@ -42,6 +37,7 @@ export default DS.JSONAPISerializer.extend({
         containerExitStatus: payload.info.YARN_CONTAINER_EXIT_STATUS,
         containerState: payload.info.YARN_CONTAINER_STATE,
         nodeId: payload.info.YARN_CONTAINER_ALLOCATED_HOST + ':' + payload.info.YARN_CONTAINER_ALLOCATED_PORT,
+        diagnosticsInfo: payload.info.YARN_CONTAINER_DIAGNOSTICS_INFO
       }
     };
     return fixedPayload;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d920b9db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss
index a85e0eb..c0aaebe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss
@@ -728,3 +728,12 @@ div.service-action-mask img {
   word-wrap: nowrap;
   overflow: scroll;
 }
+
+.diagnostic-info {
+  pre {
+    margin-bottom: 0;
+    white-space: pre-wrap;
+    border: none;
+    border-radius: 2px;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d920b9db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/app-attempt-table.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/app-attempt-table.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/app-attempt-table.hbs
index c02c6f7..dc0397a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/app-attempt-table.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/app-attempt-table.hbs
@@ -62,5 +62,11 @@
       <td><a href="{{prepend-protocol attempt.logsLink}}" target="_blank">Link</a></td>
     </tr>
     {{/if}}
+    {{#if attempt.diagnosticsInfo}}
+    <tr>
+      <td>Diagnostics Info</td>
+      <td>{{attempt.diagnosticsInfo}}</td>
+    </tr>
+    {{/if}}
   </tbody>
 </table>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d920b9db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/timeline-view.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/timeline-view.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/timeline-view.hbs
index 0a1209d..7e7f783 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/timeline-view.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/timeline-view.hbs
@@ -37,24 +37,42 @@
       <div class="tab-content">
         <div role="tabpanel" class="tab-pane {{if (eq viewType "graph") "active" ""}}" id="graphViewTab">
           <br/><br/>
-          <div class="col-md-7 container-fluid" id={{parent-id}}></div>
-          <!-- diag info -->
-          <div class="col-md-5 container-fluid">
-            <div class="panel panel-default add-ellipsis attempt-info-panel">
-              <div class="panel-heading">
-                {{#if selected.link}}
-                  {{#link-to selected.linkname selected.id (query-params service=serviceName)}}{{selected.id}}{{/link-to}}
+          <div class="row">
+            <div class="col-md-7 container-fluid" id={{parent-id}}></div>
+            <!-- diag info -->
+            <div class="col-md-5 container-fluid">
+              <div class="panel panel-default add-ellipsis attempt-info-panel">
+                <div class="panel-heading">
+                  {{#if selected.link}}
+                    {{#link-to selected.linkname selected.id (query-params service=serviceName)}}{{selected.id}}{{/link-to}}
+                  {{else}}
+                    {{selected.id}}
+                  {{/if}}
+                </div>
+                {{#if attemptModel}}
+                  {{app-attempt-table attempt=selected}}
                 {{else}}
-                  {{selected.id}}
+                  {{container-table container=selected}}
                 {{/if}}
               </div>
-              {{#if attemptModel}}
-                {{app-attempt-table attempt=selected}}
-              {{else}}
-                {{container-table container=selected}}
-              {{/if}}
             </div>
           </div>
+          {{#unless attemptModel}}
+            {{#if selected.diagnosticsInfo}}
+            <div class="row">
+              <div class="col-md-12">
+                <div class="panel panel-default">
+                  <div class="panel-heading">
+                    Diagnostic Info for {{selected.id}}
+                  </div>
+                  <div class="diagnostic-info">
+                    <pre>{{selected.diagnosticsInfo}}</pre>
+                  </div>
+                </div>
+              </div>
+            </div>
+            {{/if}}
+          {{/unless}}
         </div>
         <div role="tabpanel" class="tab-pane {{if (eq viewType "grid") "active" ""}}" id="gridViewTab">
           {{em-table columns=gridColumns rows=gridRows definition=tableDefinition}}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: YARN-8318. [UI2] IP address in component page shows N/A. Contributed by Yesha Vora.

Posted by bo...@apache.org.
YARN-8318. [UI2] IP address in component page shows N/A. Contributed by Yesha Vora.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5033d7da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5033d7da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5033d7da

Branch: refs/heads/YARN-7402
Commit: 5033d7da8f6f703d8774492c42e31e9b9cb692a5
Parents: e83719c
Author: Sunil G <su...@apache.org>
Authored: Thu Aug 2 20:09:24 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Thu Aug 2 20:09:24 2018 +0530

----------------------------------------------------------------------
 .../src/main/webapp/app/templates/yarn-component-instance/info.hbs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5033d7da/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs
index ef517d0..553f4e8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs
@@ -59,7 +59,7 @@
           </tr>
           <tr>
             <td>IP Address</td>
-            <td>{{check-availability model.container.ip}}</td>
+            <td>{{check-availability model.container.ipAddr}}</td>
           </tr>
           <tr>
             <td>Exit Status Code</td>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: HADOOP-15636. Follow-up from HADOOP-14918; restoring test under new name. Contributed by Gabor Bota.

Posted by bo...@apache.org.
HADOOP-15636. Follow-up from HADOOP-14918; restoring test under new name. Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59adeb8d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59adeb8d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59adeb8d

Branch: refs/heads/YARN-7402
Commit: 59adeb8d7f2f04bc56d37b2a2e65596fee6e4894
Parents: ed9d60e
Author: Sean Mackrory <ma...@apache.org>
Authored: Thu Jul 26 10:25:47 2018 -0600
Committer: Sean Mackrory <ma...@apache.org>
Committed: Fri Jul 27 18:23:29 2018 -0600

----------------------------------------------------------------------
 .../s3a/s3guard/ITestDynamoDBMetadataStore.java | 649 +++++++++++++++++++
 1 file changed, 649 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59adeb8d/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java
new file mode 100644
index 0000000..a597858
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java
@@ -0,0 +1,649 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.s3guard;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import com.amazonaws.services.dynamodbv2.document.DynamoDB;
+import com.amazonaws.services.dynamodbv2.document.Item;
+import com.amazonaws.services.dynamodbv2.document.PrimaryKey;
+import com.amazonaws.services.dynamodbv2.document.Table;
+import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughputDescription;
+import com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException;
+import com.amazonaws.services.dynamodbv2.model.TableDescription;
+
+import com.google.common.collect.Lists;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.hadoop.fs.contract.s3a.S3AContract;
+import org.apache.hadoop.fs.s3a.Constants;
+import org.apache.hadoop.fs.s3a.Tristate;
+
+import org.apache.hadoop.io.IOUtils;
+import org.junit.AfterClass;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.S3AFileStatus;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import static org.apache.hadoop.fs.s3a.Constants.*;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
+import static org.apache.hadoop.fs.s3a.s3guard.PathMetadataDynamoDBTranslation.*;
+import static org.apache.hadoop.fs.s3a.s3guard.DynamoDBMetadataStore.*;
+import static org.apache.hadoop.test.LambdaTestUtils.*;
+
+/**
+ * Test that {@link DynamoDBMetadataStore} implements {@link MetadataStore}.
+ *
+ * In this integration test, we use a real AWS DynamoDB. A
+ * {@link DynamoDBMetadataStore} object is created in the @BeforeClass method,
+ * and shared for all test in the @BeforeClass method. You will be charged
+ * bills for AWS S3 or DynamoDB when you run these tests.
+ *
+ * According to the base class, every test case will have independent contract
+ * to create a new {@link S3AFileSystem} instance and initializes it.
+ * A table will be created and shared between the tests,
+ */
+public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ITestDynamoDBMetadataStore.class);
+  public static final PrimaryKey
+      VERSION_MARKER_PRIMARY_KEY = createVersionMarkerPrimaryKey(
+      DynamoDBMetadataStore.VERSION_MARKER);
+
+  private S3AFileSystem fileSystem;
+  private S3AContract s3AContract;
+
+  private URI fsUri;
+
+  private String bucket;
+
+  private static DynamoDBMetadataStore ddbmsStatic;
+
+  private static String TEST_DYNAMODB_TABLE_NAME;
+
+  /**
+   * Create a path under the test path provided by
+   * the FS contract.
+   * @param filepath path string in
+   * @return a path qualified by the test filesystem
+   */
+  protected Path path(String filepath) {
+    return getFileSystem().makeQualified(
+        new Path(s3AContract.getTestPath(), filepath));
+  }
+
+  @Override
+  public void setUp() throws Exception {
+    Configuration conf = prepareTestConfiguration(new Configuration());
+    assertThatDynamoMetadataStoreImpl(conf);
+    Assume.assumeTrue("Test DynamoDB table name should be set to run "
+            + "integration tests.", TEST_DYNAMODB_TABLE_NAME != null);
+    conf.set(S3GUARD_DDB_TABLE_NAME_KEY, TEST_DYNAMODB_TABLE_NAME);
+
+    s3AContract = new S3AContract(conf);
+    s3AContract.init();
+
+    fileSystem = (S3AFileSystem) s3AContract.getTestFileSystem();
+    assume("No test filesystem", s3AContract.isEnabled());
+    assertNotNull("No test filesystem", fileSystem);
+    fsUri = fileSystem.getUri();
+    bucket = fileSystem.getBucket();
+
+    try{
+      super.setUp();
+    } catch (FileNotFoundException e){
+      LOG.warn("MetadataStoreTestBase setup failed. Waiting for table to be "
+          + "deleted before trying again.");
+      ddbmsStatic.getTable().waitForDelete();
+      super.setUp();
+    }
+  }
+
+
+  @BeforeClass
+  public static void beforeClassSetup() throws IOException {
+    Configuration conf = prepareTestConfiguration(new Configuration());
+    assertThatDynamoMetadataStoreImpl(conf);
+    TEST_DYNAMODB_TABLE_NAME = conf.get(S3GUARD_DDB_TEST_TABLE_NAME_KEY);
+    Assume.assumeTrue("Test DynamoDB table name should be set to run "
+        + "integration tests.", TEST_DYNAMODB_TABLE_NAME != null);
+    conf.set(S3GUARD_DDB_TABLE_NAME_KEY, TEST_DYNAMODB_TABLE_NAME);
+
+    LOG.debug("Creating static ddbms which will be shared between tests.");
+    ddbmsStatic = new DynamoDBMetadataStore();
+    ddbmsStatic.initialize(conf);
+  }
+
+  @AfterClass
+  public static void afterClassTeardown() {
+    LOG.debug("Destroying static DynamoDBMetadataStore.");
+    if (ddbmsStatic != null) {
+      try {
+        ddbmsStatic.destroy();
+      } catch (Exception e) {
+        LOG.warn("Failed to destroy tables in teardown", e);
+      }
+      IOUtils.closeStream(ddbmsStatic);
+      ddbmsStatic = null;
+    }
+  }
+
+  private static void assertThatDynamoMetadataStoreImpl(Configuration conf){
+    Assume.assumeTrue("Test only applies when DynamoDB is used for S3Guard",
+        conf.get(Constants.S3_METADATA_STORE_IMPL).equals(
+            Constants.S3GUARD_METASTORE_DYNAMO));
+  }
+
+
+  @Override
+  public void tearDown() throws Exception {
+    LOG.info("Removing data from ddbms table in teardown.");
+    // The following is a way to be sure the table will be cleared and there
+    // will be no leftovers after the test.
+    PathMetadata meta = ddbmsStatic.get(strToPath("/"));
+    if (meta != null){
+      for (DescendantsIterator desc = new DescendantsIterator(ddbmsStatic, meta);
+           desc.hasNext();) {
+        ddbmsStatic.forgetMetadata(desc.next().getPath());
+      }
+    }
+
+    fileSystem.close();
+  }
+
+  /**
+   * Each contract has its own S3AFileSystem and DynamoDBMetadataStore objects.
+   */
+  private class DynamoDBMSContract extends AbstractMSContract {
+
+    DynamoDBMSContract(Configuration conf) {
+    }
+
+    DynamoDBMSContract() {
+      this(new Configuration());
+    }
+
+    @Override
+    public S3AFileSystem getFileSystem() {
+      return ITestDynamoDBMetadataStore.this.fileSystem;
+    }
+
+    @Override
+    public DynamoDBMetadataStore getMetadataStore() {
+      return ITestDynamoDBMetadataStore.ddbmsStatic;
+    }
+  }
+
+  @Override
+  public DynamoDBMSContract createContract() {
+    return new DynamoDBMSContract();
+  }
+
+  @Override
+  public DynamoDBMSContract createContract(Configuration conf) {
+    return new DynamoDBMSContract(conf);
+  }
+
+  @Override
+  FileStatus basicFileStatus(Path path, int size, boolean isDir)
+      throws IOException {
+    String owner = UserGroupInformation.getCurrentUser().getShortUserName();
+    return isDir
+        ? new S3AFileStatus(true, path, owner)
+        : new S3AFileStatus(size, getModTime(), path, BLOCK_SIZE, owner);
+  }
+
+  private DynamoDBMetadataStore getDynamoMetadataStore() throws IOException {
+    return (DynamoDBMetadataStore) getContract().getMetadataStore();
+  }
+
+  private S3AFileSystem getFileSystem() {
+    return this.fileSystem;
+  }
+
+  /**
+   * This tests that after initialize() using an S3AFileSystem object, the
+   * instance should have been initialized successfully, and tables are ACTIVE.
+   */
+  @Test
+  public void testInitialize() throws IOException {
+    final S3AFileSystem s3afs = this.fileSystem;
+    final String tableName = "testInitialize";
+    final Configuration conf = s3afs.getConf();
+    conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName);
+    try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) {
+      ddbms.initialize(s3afs);
+      verifyTableInitialized(tableName, ddbms.getDynamoDB());
+      assertNotNull(ddbms.getTable());
+      assertEquals(tableName, ddbms.getTable().getTableName());
+      String expectedRegion = conf.get(S3GUARD_DDB_REGION_KEY,
+          s3afs.getBucketLocation(bucket));
+      assertEquals("DynamoDB table should be in configured region or the same" +
+              " region as S3 bucket",
+          expectedRegion,
+          ddbms.getRegion());
+      ddbms.destroy();
+    }
+  }
+
+  /**
+   * This tests that after initialize() using a Configuration object, the
+   * instance should have been initialized successfully, and tables are ACTIVE.
+   */
+  @Test
+  public void testInitializeWithConfiguration() throws IOException {
+    final String tableName = "testInitializeWithConfiguration";
+    final Configuration conf = getFileSystem().getConf();
+    conf.unset(S3GUARD_DDB_TABLE_NAME_KEY);
+    String savedRegion = conf.get(S3GUARD_DDB_REGION_KEY,
+        getFileSystem().getBucketLocation());
+    conf.unset(S3GUARD_DDB_REGION_KEY);
+    try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) {
+      ddbms.initialize(conf);
+      fail("Should have failed because the table name is not set!");
+    } catch (IllegalArgumentException ignored) {
+    }
+    // config table name
+    conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName);
+    try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) {
+      ddbms.initialize(conf);
+      fail("Should have failed because as the region is not set!");
+    } catch (IllegalArgumentException ignored) {
+    }
+    // config region
+    conf.set(S3GUARD_DDB_REGION_KEY, savedRegion);
+    try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) {
+      ddbms.initialize(conf);
+      verifyTableInitialized(tableName, ddbms.getDynamoDB());
+      assertNotNull(ddbms.getTable());
+      assertEquals(tableName, ddbms.getTable().getTableName());
+      assertEquals("Unexpected key schema found!",
+          keySchema(),
+          ddbms.getTable().describe().getKeySchema());
+      ddbms.destroy();
+    }
+  }
+
+  /**
+   * Test that for a large batch write request, the limit is handled correctly.
+   */
+  @Test
+  public void testBatchWrite() throws IOException {
+    final int[] numMetasToDeleteOrPut = {
+        -1, // null
+        0, // empty collection
+        1, // one path
+        S3GUARD_DDB_BATCH_WRITE_REQUEST_LIMIT, // exact limit of a batch request
+        S3GUARD_DDB_BATCH_WRITE_REQUEST_LIMIT + 1 // limit + 1
+    };
+    DynamoDBMetadataStore ms = getDynamoMetadataStore();
+    for (int numOldMetas : numMetasToDeleteOrPut) {
+      for (int numNewMetas : numMetasToDeleteOrPut) {
+        doTestBatchWrite(numOldMetas, numNewMetas, ms);
+      }
+    }
+  }
+
+  private void doTestBatchWrite(int numDelete, int numPut,
+      DynamoDBMetadataStore ms) throws IOException {
+    Path path = new Path(
+        "/ITestDynamoDBMetadataStore_testBatchWrite_" + numDelete + '_'
+            + numPut);
+    final Path root = fileSystem.makeQualified(path);
+    final Path oldDir = new Path(root, "oldDir");
+    final Path newDir = new Path(root, "newDir");
+    LOG.info("doTestBatchWrite: oldDir={}, newDir={}", oldDir, newDir);
+
+    ms.put(new PathMetadata(basicFileStatus(oldDir, 0, true)));
+    ms.put(new PathMetadata(basicFileStatus(newDir, 0, true)));
+
+    final List<PathMetadata> oldMetas = numDelete < 0 ? null :
+        new ArrayList<>(numDelete);
+    for (int i = 0; i < numDelete; i++) {
+      oldMetas.add(new PathMetadata(
+          basicFileStatus(new Path(oldDir, "child" + i), i, true)));
+    }
+    final List<PathMetadata> newMetas = numPut < 0 ? null :
+        new ArrayList<>(numPut);
+    for (int i = 0; i < numPut; i++) {
+      newMetas.add(new PathMetadata(
+          basicFileStatus(new Path(newDir, "child" + i), i, false)));
+    }
+
+    Collection<Path> pathsToDelete = null;
+    if (oldMetas != null) {
+      // put all metadata of old paths and verify
+      ms.put(new DirListingMetadata(oldDir, oldMetas, false));
+      assertEquals(0, ms.listChildren(newDir).withoutTombstones().numEntries());
+      assertTrue(CollectionUtils
+          .isEqualCollection(oldMetas, ms.listChildren(oldDir).getListing()));
+
+      pathsToDelete = new ArrayList<>(oldMetas.size());
+      for (PathMetadata meta : oldMetas) {
+        pathsToDelete.add(meta.getFileStatus().getPath());
+      }
+    }
+
+    // move the old paths to new paths and verify
+    ms.move(pathsToDelete, newMetas);
+    assertEquals(0, ms.listChildren(oldDir).withoutTombstones().numEntries());
+    if (newMetas != null) {
+      assertTrue(CollectionUtils
+          .isEqualCollection(newMetas, ms.listChildren(newDir).getListing()));
+    }
+  }
+
+  @Test
+  public void testInitExistingTable() throws IOException {
+    final DynamoDBMetadataStore ddbms = getDynamoMetadataStore();
+    final String tableName = ddbms.getTable().getTableName();
+    verifyTableInitialized(tableName, ddbms.getDynamoDB());
+    // create existing table
+    ddbms.initTable();
+    verifyTableInitialized(tableName, ddbms.getDynamoDB());
+  }
+
+  /**
+   * Test the low level version check code.
+   */
+  @Test
+  public void testItemVersionCompatibility() throws Throwable {
+    verifyVersionCompatibility("table",
+        createVersionMarker(VERSION_MARKER, VERSION, 0));
+  }
+
+  /**
+   * Test that a version marker entry without the version number field
+   * is rejected as incompatible with a meaningful error message.
+   */
+  @Test
+  public void testItemLacksVersion() throws Throwable {
+    intercept(IOException.class, E_NOT_VERSION_MARKER,
+        () -> verifyVersionCompatibility("table",
+            new Item().withPrimaryKey(
+                createVersionMarkerPrimaryKey(VERSION_MARKER))));
+  }
+
+  /**
+   * Delete the version marker and verify that table init fails.
+   */
+  @Test
+  public void testTableVersionRequired() throws Exception {
+    String tableName = "testTableVersionRequired";
+    Configuration conf = getFileSystem().getConf();
+    int maxRetries = conf.getInt(S3GUARD_DDB_MAX_RETRIES,
+        S3GUARD_DDB_MAX_RETRIES_DEFAULT);
+    conf.setInt(S3GUARD_DDB_MAX_RETRIES, 3);
+    conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName);
+
+    try(DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) {
+      ddbms.initialize(conf);
+      Table table = verifyTableInitialized(tableName, ddbms.getDynamoDB());
+      table.deleteItem(VERSION_MARKER_PRIMARY_KEY);
+
+      // create existing table
+      intercept(IOException.class, E_NO_VERSION_MARKER,
+          () -> ddbms.initTable());
+
+      conf.setInt(S3GUARD_DDB_MAX_RETRIES, maxRetries);
+      ddbms.destroy();
+    }
+  }
+
+  /**
+   * Set the version value to a different number and verify that
+   * table init fails.
+   */
+  @Test
+  public void testTableVersionMismatch() throws Exception {
+    String tableName = "testTableVersionMismatch";
+    Configuration conf = getFileSystem().getConf();
+    conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName);
+
+    try(DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) {
+      ddbms.initialize(conf);
+      Table table = verifyTableInitialized(tableName, ddbms.getDynamoDB());
+      table.deleteItem(VERSION_MARKER_PRIMARY_KEY);
+      Item v200 = createVersionMarker(VERSION_MARKER, 200, 0);
+      table.putItem(v200);
+
+      // create existing table
+      intercept(IOException.class, E_INCOMPATIBLE_VERSION,
+          () -> ddbms.initTable());
+      ddbms.destroy();
+    }
+  }
+
+
+
+
+  /**
+   * Test that initTable fails with IOException when table does not exist and
+   * table auto-creation is disabled.
+   */
+  @Test
+  public void testFailNonexistentTable() throws IOException {
+    final String tableName = "testFailNonexistentTable";
+    final S3AFileSystem s3afs = getFileSystem();
+    final Configuration conf = s3afs.getConf();
+    conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName);
+    conf.unset(S3GUARD_DDB_TABLE_CREATE_KEY);
+    try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) {
+      ddbms.initialize(s3afs);
+      fail("Should have failed as table does not exist and table auto-creation"
+          + " is disabled");
+    } catch (IOException ignored) {
+    }
+  }
+
+  /**
+   * Test cases about root directory as it is not in the DynamoDB table.
+   */
+  @Test
+  public void testRootDirectory() throws IOException {
+    final DynamoDBMetadataStore ddbms = getDynamoMetadataStore();
+    Path rootPath = new Path(new Path(fsUri), "/");
+    verifyRootDirectory(ddbms.get(rootPath), true);
+
+    ddbms.put(new PathMetadata(new S3AFileStatus(true,
+        new Path(rootPath, "foo"),
+        UserGroupInformation.getCurrentUser().getShortUserName())));
+    verifyRootDirectory(ddbms.get(rootPath), false);
+  }
+
+  private void verifyRootDirectory(PathMetadata rootMeta, boolean isEmpty) {
+    assertNotNull(rootMeta);
+    final FileStatus status = rootMeta.getFileStatus();
+    assertNotNull(status);
+    assertTrue(status.isDirectory());
+    // UNKNOWN is always a valid option, but true / false should not contradict
+    if (isEmpty) {
+      assertNotSame("Should not be marked non-empty",
+          Tristate.FALSE,
+          rootMeta.isEmptyDirectory());
+    } else {
+      assertNotSame("Should not be marked empty",
+          Tristate.TRUE,
+          rootMeta.isEmptyDirectory());
+    }
+  }
+
+  /**
+   * Test that when moving nested paths, all its ancestors up to destination
+   * root will also be created.
+   * Here is the directory tree before move:
+   * <pre>
+   * testMovePopulateAncestors
+   * ├── a
+   * │   └── b
+   * │       └── src
+   * │           ├── dir1
+   * │           │   └── dir2
+   * │           └── file1.txt
+   * └── c
+   *     └── d
+   *         └── dest
+   *</pre>
+   * As part of rename(a/b/src, d/c/dest), S3A will enumerate the subtree at
+   * a/b/src.  This test verifies that after the move, the new subtree at
+   * 'dest' is reachable from the root (i.e. c/ and c/d exist in the table.
+   * DynamoDBMetadataStore depends on this property to do recursive delete
+   * without a full table scan.
+   */
+  @Test
+  public void testMovePopulatesAncestors() throws IOException {
+    final DynamoDBMetadataStore ddbms = getDynamoMetadataStore();
+    final String testRoot = "/testMovePopulatesAncestors";
+    final String srcRoot = testRoot + "/a/b/src";
+    final String destRoot = testRoot + "/c/d/e/dest";
+
+    final Path nestedPath1 = strToPath(srcRoot + "/file1.txt");
+    ddbms.put(new PathMetadata(basicFileStatus(nestedPath1, 1024, false)));
+    final Path nestedPath2 = strToPath(srcRoot + "/dir1/dir2");
+    ddbms.put(new PathMetadata(basicFileStatus(nestedPath2, 0, true)));
+
+    // We don't put the destRoot path here, since put() would create ancestor
+    // entries, and we want to ensure that move() does it, instead.
+
+    // Build enumeration of src / dest paths and do the move()
+    final Collection<Path> fullSourcePaths = Lists.newArrayList(
+        strToPath(srcRoot),
+        strToPath(srcRoot + "/dir1"),
+        strToPath(srcRoot + "/dir1/dir2"),
+        strToPath(srcRoot + "/file1.txt")
+    );
+    final Collection<PathMetadata> pathsToCreate = Lists.newArrayList(
+        new PathMetadata(basicFileStatus(strToPath(destRoot),
+            0, true)),
+        new PathMetadata(basicFileStatus(strToPath(destRoot + "/dir1"),
+            0, true)),
+        new PathMetadata(basicFileStatus(strToPath(destRoot + "/dir1/dir2"),
+            0, true)),
+        new PathMetadata(basicFileStatus(strToPath(destRoot + "/file1.txt"),
+            1024, false))
+    );
+
+    ddbms.move(fullSourcePaths, pathsToCreate);
+
+    // assert that all the ancestors should have been populated automatically
+    assertCached(testRoot + "/c");
+    assertCached(testRoot + "/c/d");
+    assertCached(testRoot + "/c/d/e");
+    assertCached(destRoot /* /c/d/e/dest */);
+
+    // Also check moved files while we're at it
+    assertCached(destRoot + "/dir1");
+    assertCached(destRoot + "/dir1/dir2");
+    assertCached(destRoot + "/file1.txt");
+  }
+
+  @Test
+  public void testProvisionTable() throws IOException {
+    final String tableName = "testProvisionTable";
+    Configuration conf = getFileSystem().getConf();
+    conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName);
+
+    try(DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) {
+      ddbms.initialize(conf);
+      DynamoDB dynamoDB = ddbms.getDynamoDB();
+      final ProvisionedThroughputDescription oldProvision =
+          dynamoDB.getTable(tableName).describe().getProvisionedThroughput();
+      ddbms.provisionTable(oldProvision.getReadCapacityUnits() * 2,
+          oldProvision.getWriteCapacityUnits() * 2);
+      ddbms.initTable();
+      final ProvisionedThroughputDescription newProvision =
+          dynamoDB.getTable(tableName).describe().getProvisionedThroughput();
+      LOG.info("Old provision = {}, new provision = {}", oldProvision,
+          newProvision);
+      assertEquals(oldProvision.getReadCapacityUnits() * 2,
+          newProvision.getReadCapacityUnits().longValue());
+      assertEquals(oldProvision.getWriteCapacityUnits() * 2,
+          newProvision.getWriteCapacityUnits().longValue());
+      ddbms.destroy();
+    }
+  }
+
+  @Test
+  public void testDeleteTable() throws Exception {
+    final String tableName = "testDeleteTable";
+    Path testPath = new Path(new Path(fsUri), "/" + tableName);
+    final S3AFileSystem s3afs = getFileSystem();
+    final Configuration conf = s3afs.getConf();
+    conf.set(S3GUARD_DDB_TABLE_NAME_KEY, tableName);
+    try (DynamoDBMetadataStore ddbms = new DynamoDBMetadataStore()) {
+      ddbms.initialize(s3afs);
+      // we can list the empty table
+      ddbms.listChildren(testPath);
+      DynamoDB dynamoDB = ddbms.getDynamoDB();
+      ddbms.destroy();
+      verifyTableNotExist(tableName, dynamoDB);
+
+      // delete table once more; be ResourceNotFoundException swallowed silently
+      ddbms.destroy();
+      verifyTableNotExist(tableName, dynamoDB);
+      try {
+        // we can no longer list the destroyed table
+        ddbms.listChildren(testPath);
+        fail("Should have failed after the table is destroyed!");
+      } catch (IOException ignored) {
+      }
+      ddbms.destroy();
+    }
+  }
+
+  /**
+   * This validates the table is created and ACTIVE in DynamoDB.
+   *
+   * This should not rely on the {@link DynamoDBMetadataStore} implementation.
+   * Return the table
+   */
+  private Table verifyTableInitialized(String tableName, DynamoDB dynamoDB) {
+    final Table table = dynamoDB.getTable(tableName);
+    final TableDescription td = table.describe();
+    assertEquals(tableName, td.getTableName());
+    assertEquals("ACTIVE", td.getTableStatus());
+    return table;
+  }
+
+  /**
+   * This validates the table is not found in DynamoDB.
+   *
+   * This should not rely on the {@link DynamoDBMetadataStore} implementation.
+   */
+  private void verifyTableNotExist(String tableName, DynamoDB dynamoDB) throws
+      Exception{
+    intercept(ResourceNotFoundException.class,
+        () -> dynamoDB.getTable(tableName).describe());
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: HDFS-13322 fuse dfs - uid persists when switching between ticket caches. Contributed by Istvan Fajth.

Posted by bo...@apache.org.
HDFS-13322 fuse dfs - uid persists when switching between ticket caches.  Contributed by Istvan Fajth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40f9b0c5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40f9b0c5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40f9b0c5

Branch: refs/heads/YARN-7402
Commit: 40f9b0c5c13f40921b6976589543a04efa489f93
Parents: c835fc0
Author: Aaron Fabbri <fa...@apache.org>
Authored: Tue Jul 31 15:21:38 2018 -0700
Committer: Aaron Fabbri <fa...@apache.org>
Committed: Tue Jul 31 18:44:49 2018 -0700

----------------------------------------------------------------------
 .../src/main/native/fuse-dfs/fuse_connect.c        | 17 +++++++++++++----
 1 file changed, 13 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40f9b0c5/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c
index 6ee4ad5..f08917a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c
@@ -192,7 +192,7 @@ int fuseConnectInit(const char *nnUri, int port)
 }
 
 /**
- * Compare two libhdfs connections by username
+ * Compare two libhdfs connections by username and Kerberos ticket cache path
  *
  * @param a                The first libhdfs connection
  * @param b                The second libhdfs connection
@@ -201,22 +201,26 @@ int fuseConnectInit(const char *nnUri, int port)
  */
 static int hdfsConnCompare(const struct hdfsConn *a, const struct hdfsConn *b)
 {
-  return strcmp(a->usrname, b->usrname);
+  int rc = strcmp(a->usrname, b->usrname);
+  if (rc) return rc;
+  return gHdfsAuthConf == AUTH_CONF_KERBEROS && strcmp(a->kpath, b->kpath);
 }
 
 /**
  * Find a libhdfs connection by username
  *
  * @param usrname         The username to look up
+ * @param kpath           The Kerberos ticket cache file path
  *
  * @return                The connection, or NULL if none could be found
  */
-static struct hdfsConn* hdfsConnFind(const char *usrname)
+static struct hdfsConn* hdfsConnFind(const char *usrname, const char *kpath)
 {
   struct hdfsConn exemplar;
 
   memset(&exemplar, 0, sizeof(exemplar));
   exemplar.usrname = (char*)usrname;
+  exemplar.kpath = (char*)kpath;
   return RB_FIND(hdfsConnTree, &gConnTree, &exemplar);
 }
 
@@ -542,8 +546,13 @@ static int fuseConnect(const char *usrname, struct fuse_context *ctx,
   int ret;
   struct hdfsConn* conn;
 
+  char kpath[PATH_MAX] = { 0 };
+  if (gHdfsAuthConf == AUTH_CONF_KERBEROS) {
+    findKerbTicketCachePath(ctx, kpath, sizeof(kpath));
+  }
+
   pthread_mutex_lock(&gConnMutex);
-  conn = hdfsConnFind(usrname);
+  conn = hdfsConnFind(usrname, kpath);
   if (!conn) {
     ret = fuseNewConnect(usrname, ctx, &conn);
     if (ret) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: YARN-8606. Opportunistic scheduling does not work post RM failover. Contributed by Bibin A Chundatt.

Posted by bo...@apache.org.
YARN-8606. Opportunistic scheduling does not work post RM failover. Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a48a0cc7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a48a0cc7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a48a0cc7

Branch: refs/heads/YARN-7402
Commit: a48a0cc7fd8e7ac1c07b260e6078077824f27c35
Parents: 5cc8e99
Author: Sunil G <su...@apache.org>
Authored: Wed Aug 1 12:17:18 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Wed Aug 1 12:17:18 2018 +0530

----------------------------------------------------------------------
 ...pportunisticContainerAllocatorAMService.java |  4 +-
 .../server/resourcemanager/ResourceManager.java | 37 ++++++++++------
 .../yarn/server/resourcemanager/TestRMHA.java   | 44 ++++++++++++++++++++
 3 files changed, 72 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a48a0cc7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
index 9b13627..15c2a89 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -417,7 +418,8 @@ public class OpportunisticContainerAllocatorAMService
     return nodeMonitor.getThresholdCalculator();
   }
 
-  private synchronized List<RemoteNode> getLeastLoadedNodes() {
+  @VisibleForTesting
+  synchronized List<RemoteNode> getLeastLoadedNodes() {
     long currTime = System.currentTimeMillis();
     if ((currTime - lastCacheUpdateTime > cacheRefreshInterval)
         || (cachedNodes == null)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a48a0cc7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 0b7e87c..f14d440 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -757,9 +757,11 @@ public class ResourceManager extends CompositeService implements Recoverable {
       }
 
       masterService = createApplicationMasterService();
+      createAndRegisterOpportunisticDispatcher(masterService);
       addService(masterService) ;
       rmContext.setApplicationMasterService(masterService);
 
+
       applicationACLsManager = new ApplicationACLsManager(conf);
 
       queueACLsManager = createQueueACLsManager(scheduler, conf);
@@ -807,6 +809,23 @@ public class ResourceManager extends CompositeService implements Recoverable {
       super.serviceInit(conf);
     }
 
+    private void createAndRegisterOpportunisticDispatcher(
+        ApplicationMasterService service) {
+      if (!isOpportunisticSchedulingEnabled(conf)) {
+        return;
+      }
+      EventDispatcher oppContainerAllocEventDispatcher = new EventDispatcher(
+          (OpportunisticContainerAllocatorAMService) service,
+          OpportunisticContainerAllocatorAMService.class.getName());
+      // Add an event dispatcher for the
+      // OpportunisticContainerAllocatorAMService to handle node
+      // additions, updates and removals. Since the SchedulerEvent is currently
+      // a super set of theses, we register interest for it.
+      addService(oppContainerAllocEventDispatcher);
+      rmDispatcher
+          .register(SchedulerEventType.class, oppContainerAllocEventDispatcher);
+    }
+
     @Override
     protected void serviceStart() throws Exception {
       RMStateStore rmStore = rmContext.getStateStore();
@@ -1335,8 +1354,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
 
   protected ApplicationMasterService createApplicationMasterService() {
     Configuration config = this.rmContext.getYarnConfiguration();
-    if (YarnConfiguration.isOpportunisticContainerAllocationEnabled(config)
-        || YarnConfiguration.isDistSchedulingEnabled(config)) {
+    if (isOpportunisticSchedulingEnabled(conf)) {
       if (YarnConfiguration.isDistSchedulingEnabled(config) &&
           !YarnConfiguration
               .isOpportunisticContainerAllocationEnabled(config)) {
@@ -1348,16 +1366,6 @@ public class ResourceManager extends CompositeService implements Recoverable {
           oppContainerAllocatingAMService =
           new OpportunisticContainerAllocatorAMService(this.rmContext,
               scheduler);
-      EventDispatcher oppContainerAllocEventDispatcher =
-          new EventDispatcher(oppContainerAllocatingAMService,
-              OpportunisticContainerAllocatorAMService.class.getName());
-      // Add an event dispatcher for the
-      // OpportunisticContainerAllocatorAMService to handle node
-      // additions, updates and removals. Since the SchedulerEvent is currently
-      // a super set of theses, we register interest for it.
-      addService(oppContainerAllocEventDispatcher);
-      rmDispatcher.register(SchedulerEventType.class,
-          oppContainerAllocEventDispatcher);
       this.rmContext.setContainerQueueLimitCalculator(
           oppContainerAllocatingAMService.getNodeManagerQueueLimitCalculator());
       return oppContainerAllocatingAMService;
@@ -1373,6 +1381,11 @@ public class ResourceManager extends CompositeService implements Recoverable {
     return new RMSecretManagerService(conf, rmContext);
   }
 
+  private boolean isOpportunisticSchedulingEnabled(Configuration conf) {
+    return YarnConfiguration.isOpportunisticContainerAllocationEnabled(conf)
+        || YarnConfiguration.isDistSchedulingEnabled(conf);
+  }
+
   /**
    * Create RMDelegatedNodeLabelsUpdater based on configuration.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a48a0cc7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
index 385e8db..c17dee8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
@@ -18,6 +18,10 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager;
 
+import com.google.common.base.Supplier;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -659,6 +663,46 @@ public class TestRMHA {
   }
 
   @Test
+  public void testOpportunisticAllocatorAfterFailover() throws Exception {
+    configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
+    configuration.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
+    Configuration conf = new YarnConfiguration(configuration);
+    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
+    conf.setBoolean(
+        YarnConfiguration.OPPORTUNISTIC_CONTAINER_ALLOCATION_ENABLED, true);
+    // 1. start RM
+    rm = new MockRM(conf);
+    rm.init(conf);
+    rm.start();
+
+    StateChangeRequestInfo requestInfo = new StateChangeRequestInfo(
+        HAServiceProtocol.RequestSource.REQUEST_BY_USER);
+    // 2. Transition to active
+    rm.adminService.transitionToActive(requestInfo);
+    // 3. Transition to standby
+    rm.adminService.transitionToStandby(requestInfo);
+    // 4. Transition to active
+    rm.adminService.transitionToActive(requestInfo);
+
+    MockNM nm1 = rm.registerNode("h1:1234", 8 * 1024);
+    RMNode rmNode1 = rm.getRMContext().getRMNodes().get(nm1.getNodeId());
+    rmNode1.getRMContext().getDispatcher().getEventHandler()
+        .handle(new NodeUpdateSchedulerEvent(rmNode1));
+    OpportunisticContainerAllocatorAMService appMaster =
+        (OpportunisticContainerAllocatorAMService) rm.getRMContext()
+            .getApplicationMasterService();
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return appMaster.getLeastLoadedNodes().size() == 1;
+      }
+    }, 100, 3000);
+    rm.stop();
+    Assert.assertEquals(1, appMaster.getLeastLoadedNodes().size());
+
+  }
+
+  @Test
   public void testResourceProfilesManagerAfterRMWentStandbyThenBackToActive()
       throws Exception {
     configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: HDDS-293. Reduce memory usage and object creation in KeyData.

Posted by bo...@apache.org.
HDDS-293. Reduce memory usage and object creation in KeyData.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee53602a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee53602a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee53602a

Branch: refs/heads/YARN-7402
Commit: ee53602a8179e76f4102d9062d0bebe8bb09d875
Parents: 2b39ad2
Author: Tsz Wo Nicholas Sze <sz...@apache.org>
Authored: Mon Jul 30 15:00:29 2018 -0700
Committer: Tsz Wo Nicholas Sze <sz...@apache.org>
Committed: Mon Jul 30 15:00:29 2018 -0700

----------------------------------------------------------------------
 .../ozone/container/common/helpers/KeyData.java |  84 +++++++++----
 .../common/impl/OpenContainerBlockMap.java      |   2 +-
 .../container/keyvalue/KeyValueHandler.java     |   3 -
 .../container/common/helpers/TestKeyData.java   | 119 +++++++++++++++++++
 4 files changed, 179 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee53602a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
index 1919ed9..84a6f71 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.container.common.helpers;
 
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.client.BlockID;
+import com.google.common.base.Preconditions;
 
 import java.io.IOException;
 import java.util.Collections;
@@ -35,11 +36,17 @@ public class KeyData {
   private final Map<String, String> metadata;
 
   /**
+   * Represent a list of chunks.
+   * In order to reduce memory usage, chunkList is declared as an {@link Object}.
+   * When #elements == 0, chunkList is null.
+   * When #elements == 1, chunkList refers to the only element.
+   * When #elements > 1, chunkList refers to the list.
+   *
    * Please note : when we are working with keys, we don't care what they point
    * to. So we We don't read chunkinfo nor validate them. It is responsibility
    * of higher layer like ozone. We just read and write data from network.
    */
-  private List<ContainerProtos.ChunkInfo> chunks;
+  private Object chunkList;
 
   /**
    * total size of the key.
@@ -73,7 +80,7 @@ public class KeyData {
     }
     keyData.setChunks(data.getChunksList());
     if (data.hasSize()) {
-      keyData.setSize(data.getSize());
+      Preconditions.checkArgument(data.getSize() == keyData.getSize());
     }
     return keyData;
   }
@@ -86,13 +93,13 @@ public class KeyData {
     ContainerProtos.KeyData.Builder builder =
         ContainerProtos.KeyData.newBuilder();
     builder.setBlockID(this.blockID.getDatanodeBlockIDProtobuf());
-    builder.addAllChunks(this.chunks);
     for (Map.Entry<String, String> entry : metadata.entrySet()) {
       ContainerProtos.KeyValue.Builder keyValBuilder =
           ContainerProtos.KeyValue.newBuilder();
       builder.addMetadata(keyValBuilder.setKey(entry.getKey())
           .setValue(entry.getValue()).build());
     }
+    builder.addAllChunks(getChunks());
     builder.setSize(size);
     return builder.build();
   }
@@ -132,30 +139,65 @@ public class KeyData {
     metadata.remove(key);
   }
 
+  @SuppressWarnings("unchecked")
+  private List<ContainerProtos.ChunkInfo> castChunkList() {
+    return (List<ContainerProtos.ChunkInfo>)chunkList;
+  }
+
   /**
    * Returns chunks list.
    *
    * @return list of chunkinfo.
    */
   public List<ContainerProtos.ChunkInfo> getChunks() {
-    return chunks;
+    return chunkList == null? Collections.emptyList()
+        : chunkList instanceof ContainerProtos.ChunkInfo?
+            Collections.singletonList((ContainerProtos.ChunkInfo)chunkList)
+        : Collections.unmodifiableList(castChunkList());
   }
 
   /**
    * Adds chinkInfo to the list
    */
   public void addChunk(ContainerProtos.ChunkInfo chunkInfo) {
-    if (chunks == null) {
-      chunks = new ArrayList<>();
+    if (chunkList == null) {
+      chunkList = chunkInfo;
+    } else {
+      final List<ContainerProtos.ChunkInfo> list;
+      if (chunkList instanceof ContainerProtos.ChunkInfo) {
+        list = new ArrayList<>(2);
+        list.add((ContainerProtos.ChunkInfo)chunkList);
+        chunkList = list;
+      } else {
+        list = castChunkList();
+      }
+      list.add(chunkInfo);
     }
-    chunks.add(chunkInfo);
+    size += chunkInfo.getLen();
   }
 
   /**
    * removes the chunk.
    */
-  public void removeChunk(ContainerProtos.ChunkInfo chunkInfo) {
-    chunks.remove(chunkInfo);
+  public boolean removeChunk(ContainerProtos.ChunkInfo chunkInfo) {
+    final boolean removed;
+    if (chunkList instanceof List) {
+      final List<ContainerProtos.ChunkInfo> list = castChunkList();
+      removed = list.remove(chunkInfo);
+      if (list.size() == 1) {
+        chunkList = list.get(0);
+      }
+    } else if (chunkInfo.equals(chunkList)) {
+      chunkList = null;
+      removed = true;
+    } else {
+      removed = false;
+    }
+
+    if (removed) {
+      size -= chunkInfo.getLen();
+    }
+    return removed;
   }
 
   /**
@@ -189,15 +231,14 @@ public class KeyData {
    * @param chunks - List of chunks.
    */
   public void setChunks(List<ContainerProtos.ChunkInfo> chunks) {
-    this.chunks = chunks;
-  }
-
-  /**
-   * sets the total size of the block
-   * @param size size of the block
-   */
-  public void setSize(long size) {
-    this.size = size;
+    if (chunks == null) {
+      chunkList = null;
+      size = 0L;
+    } else {
+      final int n = chunks.size();
+      chunkList = n == 0? null: n == 1? chunks.get(0): chunks;
+      size = chunks.parallelStream().mapToLong(ContainerProtos.ChunkInfo::getLen).sum();
+    }
   }
 
   /**
@@ -207,11 +248,4 @@ public class KeyData {
   public long getSize() {
     return size;
   }
-
-  /**
-   * computes the total size of chunks allocated for the key.
-   */
-  public void computeSize() {
-    setSize(chunks.parallelStream().mapToLong(e -> e.getLen()).sum());
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee53602a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
index 8e2667d..ab7789b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
@@ -33,7 +33,7 @@ import java.util.concurrent.ConcurrentMap;
 import java.util.function.Function;
 
 /**
- * Map: containerId -> (localId -> KeyData).
+ * Map: containerId -> (localId -> {@link KeyData}).
  * The outer container map does not entail locking for a better performance.
  * The inner {@link KeyDataMap} is synchronized.
  *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee53602a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index a4e124b..fac3f3c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -439,8 +439,6 @@ public class KeyValueHandler extends Handler {
   private void commitKey(KeyData keyData, KeyValueContainer kvContainer)
       throws IOException {
     Preconditions.checkNotNull(keyData);
-    //sets the total size of the key before committing
-    keyData.computeSize();
     keyManager.putKey(kvContainer, keyData);
     //update the open key Map in containerManager
     this.openContainerBlockMap.removeFromKeyMap(keyData.getBlockID());
@@ -696,7 +694,6 @@ public class KeyValueHandler extends Handler {
       List<ContainerProtos.ChunkInfo> chunks = new LinkedList<>();
       chunks.add(chunkInfo.getProtoBufMessage());
       keyData.setChunks(chunks);
-      keyData.computeSize();
       keyManager.putKey(kvContainer, keyData);
       metrics.incContainerBytesStats(Type.PutSmallFile, data.length);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee53602a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestKeyData.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestKeyData.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestKeyData.java
new file mode 100644
index 0000000..f57fe99
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestKeyData.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.helpers;
+
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestRule;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ThreadLocalRandom;
+
+/**
+ * Tests to test block deleting service.
+ */
+public class TestKeyData {
+  static final Logger LOG = LoggerFactory.getLogger(TestKeyData.class);
+  @Rule
+  public TestRule timeout = new Timeout(10000);
+
+  static ContainerProtos.ChunkInfo buildChunkInfo(String name, long offset, long len) {
+    return ContainerProtos.ChunkInfo.newBuilder()
+        .setChunkName(name).setOffset(offset).setLen(len).build();
+  }
+
+  @Test
+  public void testAddAndRemove() {
+    final KeyData computed = new KeyData(null);
+    final List<ContainerProtos.ChunkInfo> expected = new ArrayList<>();
+
+    assertChunks(expected, computed);
+    long offset = 0;
+    int n = 5;
+    for(int i = 0; i < n; i++) {
+      offset += assertAddChunk(expected, computed, offset);
+    }
+
+    for(; !expected.isEmpty(); ) {
+      removeChunk(expected, computed);
+    }
+  }
+
+  private static int chunkCount = 0;
+  static ContainerProtos.ChunkInfo addChunk(List<ContainerProtos.ChunkInfo> expected, long offset) {
+    final long length = ThreadLocalRandom.current().nextLong(1000);
+    final ContainerProtos.ChunkInfo info = buildChunkInfo("c" + ++chunkCount, offset, length);
+    expected.add(info);
+    return info;
+  }
+
+  static long assertAddChunk(List<ContainerProtos.ChunkInfo> expected, KeyData computed, long offset) {
+    final ContainerProtos.ChunkInfo info = addChunk(expected, offset);
+    LOG.info("addChunk: " + toString(info));
+    computed.addChunk(info);
+    assertChunks(expected, computed);
+    return info.getLen();
+  }
+
+
+  static void removeChunk(List<ContainerProtos.ChunkInfo> expected, KeyData computed) {
+    final int i = ThreadLocalRandom.current().nextInt(expected.size());
+    final ContainerProtos.ChunkInfo info = expected.remove(i);
+    LOG.info("removeChunk: " + toString(info));
+    computed.removeChunk(info);
+    assertChunks(expected, computed);
+  }
+
+  static void assertChunks(List<ContainerProtos.ChunkInfo> expected, KeyData computed) {
+    final List<ContainerProtos.ChunkInfo> computedChunks = computed.getChunks();
+    Assert.assertEquals("expected=" + expected + "\ncomputed=" + computedChunks, expected, computedChunks);
+    Assert.assertEquals(expected.stream().mapToLong(i -> i.getLen()).sum(), computed.getSize());
+  }
+
+  static String toString(ContainerProtos.ChunkInfo info) {
+    return info.getChunkName() + ":" + info.getOffset() + "," + info.getLen();
+  }
+
+  static String toString(List<ContainerProtos.ChunkInfo> infos) {
+    return infos.stream().map(TestKeyData::toString)
+        .reduce((left, right) -> left + ", " + right)
+        .orElse("");
+  }
+
+  @Test
+  public void testSetChunks() {
+    final KeyData computed = new KeyData(null);
+    final List<ContainerProtos.ChunkInfo> expected = new ArrayList<>();
+
+    assertChunks(expected, computed);
+    long offset = 0;
+    int n = 5;
+    for(int i = 0; i < n; i++) {
+      offset += addChunk(expected, offset).getLen();
+      LOG.info("setChunk: " + toString(expected));
+      computed.setChunks(expected);
+      assertChunks(expected, computed);
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: HADOOP-15607. AliyunOSS: fix duplicated partNumber issue in AliyunOSSBlockOutputStream. Contributed by Jinhu Wu.

Posted by bo...@apache.org.
HADOOP-15607. AliyunOSS: fix duplicated partNumber issue in AliyunOSSBlockOutputStream. Contributed by Jinhu Wu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0857f116
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0857f116
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0857f116

Branch: refs/heads/YARN-7402
Commit: 0857f116b754d83d3c540cd6f989087af24fef27
Parents: 007e6f5
Author: Sammi Chen <sa...@intel.com>
Authored: Mon Jul 30 10:53:44 2018 +0800
Committer: Sammi Chen <sa...@intel.com>
Committed: Mon Jul 30 10:53:44 2018 +0800

----------------------------------------------------------------------
 .../aliyun/oss/AliyunOSSBlockOutputStream.java  | 59 ++++++++++++--------
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java |  2 +
 .../oss/TestAliyunOSSBlockOutputStream.java     | 12 +++-
 3 files changed, 49 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0857f116/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
index 12d551b..0a833b2 100644
--- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
+++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
@@ -33,7 +33,9 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 
@@ -50,7 +52,7 @@ public class AliyunOSSBlockOutputStream extends OutputStream {
   private boolean closed;
   private String key;
   private File blockFile;
-  private List<File> blockFiles = new ArrayList<>();
+  private Map<Integer, File> blockFiles = new HashMap<>();
   private long blockSize;
   private int blockId = 0;
   private long blockWritten = 0L;
@@ -94,8 +96,9 @@ public class AliyunOSSBlockOutputStream extends OutputStream {
 
     blockStream.flush();
     blockStream.close();
-    if (!blockFiles.contains(blockFile)) {
-      blockFiles.add(blockFile);
+    if (!blockFiles.values().contains(blockFile)) {
+      blockId++;
+      blockFiles.put(blockId, blockFile);
     }
 
     try {
@@ -107,7 +110,7 @@ public class AliyunOSSBlockOutputStream extends OutputStream {
           ListenableFuture<PartETag> partETagFuture =
               executorService.submit(() -> {
                 PartETag partETag = store.uploadPart(blockFile, key, uploadId,
-                    blockId + 1);
+                    blockId);
                 return partETag;
               });
           partETagsFutures.add(partETagFuture);
@@ -120,11 +123,7 @@ public class AliyunOSSBlockOutputStream extends OutputStream {
         store.completeMultipartUpload(key, uploadId, partETags);
       }
     } finally {
-      for (File tFile: blockFiles) {
-        if (tFile.exists() && !tFile.delete()) {
-          LOG.warn("Failed to delete temporary file {}", tFile);
-        }
-      }
+      removePartFiles();
       closed = true;
     }
   }
@@ -141,38 +140,52 @@ public class AliyunOSSBlockOutputStream extends OutputStream {
     if (closed) {
       throw new IOException("Stream closed.");
     }
-    try {
-      blockStream.write(b, off, len);
-      blockWritten += len;
-      if (blockWritten >= blockSize) {
-        uploadCurrentPart();
-        blockWritten = 0L;
+    blockStream.write(b, off, len);
+    blockWritten += len;
+    if (blockWritten >= blockSize) {
+      uploadCurrentPart();
+      blockWritten = 0L;
+    }
+  }
+
+  private void removePartFiles() throws IOException {
+    for (ListenableFuture<PartETag> partETagFuture : partETagsFutures) {
+      if (!partETagFuture.isDone()) {
+        continue;
       }
-    } finally {
-      for (File tFile: blockFiles) {
-        if (tFile.exists() && !tFile.delete()) {
-          LOG.warn("Failed to delete temporary file {}", tFile);
+
+      try {
+        File blockFile = blockFiles.get(partETagFuture.get().getPartNumber());
+        if (blockFile != null && blockFile.exists() && !blockFile.delete()) {
+          LOG.warn("Failed to delete temporary file {}", blockFile);
         }
+      } catch (InterruptedException | ExecutionException e) {
+        throw new IOException(e);
       }
     }
   }
 
   private void uploadCurrentPart() throws IOException {
-    blockFiles.add(blockFile);
     blockStream.flush();
     blockStream.close();
     if (blockId == 0) {
       uploadId = store.getUploadId(key);
     }
+
+    blockId++;
+    blockFiles.put(blockId, blockFile);
+
+    File currentFile = blockFile;
+    int currentBlockId = blockId;
     ListenableFuture<PartETag> partETagFuture =
         executorService.submit(() -> {
-          PartETag partETag = store.uploadPart(blockFile, key, uploadId,
-              blockId + 1);
+          PartETag partETag = store.uploadPart(currentFile, key, uploadId,
+              currentBlockId);
           return partETag;
         });
     partETagsFutures.add(partETagFuture);
+    removePartFiles();
     blockFile = newBlockFile();
-    blockId++;
     blockStream = new BufferedOutputStream(new FileOutputStream(blockFile));
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0857f116/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
index 5e21759..dc5f99ee 100644
--- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
+++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
@@ -450,6 +450,8 @@ public class AliyunOSSFileSystemStore {
       request.setRange(byteStart, byteEnd);
       return ossClient.getObject(request).getObjectContent();
     } catch (OSSException | ClientException e) {
+      LOG.error("Exception thrown when store retrieves key: "
+              + key + ", exception: " + e);
       return null;
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0857f116/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSBlockOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSBlockOutputStream.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSBlockOutputStream.java
index 365d931..6fe6f03 100644
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSBlockOutputStream.java
+++ b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSBlockOutputStream.java
@@ -31,6 +31,7 @@ import org.junit.rules.Timeout;
 import java.io.IOException;
 
 import static org.apache.hadoop.fs.aliyun.oss.Constants.MULTIPART_UPLOAD_PART_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.IO_CHUNK_BUFFER_SIZE;
 
 /**
  * Tests regular and multi-part upload functionality for
@@ -48,7 +49,10 @@ public class TestAliyunOSSBlockOutputStream {
   public void setUp() throws Exception {
     Configuration conf = new Configuration();
     conf.setLong(Constants.MIN_MULTIPART_UPLOAD_THRESHOLD_KEY, 5 * 1024 * 1024);
-    conf.setInt(Constants.MULTIPART_UPLOAD_PART_SIZE_KEY, 5 * 1024 * 1024);
+    conf.setInt(Constants.MULTIPART_UPLOAD_PART_SIZE_KEY, 1024 * 1024);
+    conf.setInt(IO_CHUNK_BUFFER_SIZE,
+        conf.getInt(Constants.MULTIPART_UPLOAD_PART_SIZE_KEY, 0));
+    conf.setInt(Constants.UPLOAD_ACTIVE_BLOCKS_KEY, 20);
     fs = AliyunOSSTestUtils.createTestFileSystem(conf);
   }
 
@@ -85,6 +89,12 @@ public class TestAliyunOSSBlockOutputStream {
   }
 
   @Test
+  public void testMultiPartUploadConcurrent() throws IOException {
+    ContractTestUtils.createAndVerifyFile(fs, getTestPath(),
+        50 * 1024 * 1024 - 1);
+  }
+
+  @Test
   public void testHugeUpload() throws IOException {
     ContractTestUtils.createAndVerifyFile(fs, getTestPath(),
         MULTIPART_UPLOAD_PART_SIZE_DEFAULT - 1);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: YARN-6648. [GPG] Add SubClusterCleaner in Global Policy Generator. (botong)

Posted by bo...@apache.org.
YARN-6648. [GPG] Add SubClusterCleaner in Global Policy Generator. (botong)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f833e1b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f833e1b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f833e1b3

Branch: refs/heads/YARN-7402
Commit: f833e1b300758e7c7622e2ca93c2dd164ec6d73d
Parents: 48a8379
Author: Botong Huang <bo...@apache.org>
Authored: Thu Feb 1 14:43:48 2018 -0800
Committer: Botong Huang <bo...@apache.org>
Committed: Thu Aug 2 09:59:48 2018 -0700

----------------------------------------------------------------------
 .../dev-support/findbugs-exclude.xml            |   5 +
 .../hadoop/yarn/conf/YarnConfiguration.java     |  18 +++
 .../src/main/resources/yarn-default.xml         |  24 ++++
 .../store/impl/MemoryFederationStateStore.java  |  13 ++
 .../utils/FederationStateStoreFacade.java       |  41 ++++++-
 .../GlobalPolicyGenerator.java                  |  92 ++++++++++-----
 .../subclustercleaner/SubClusterCleaner.java    | 109 +++++++++++++++++
 .../subclustercleaner/package-info.java         |  19 +++
 .../TestSubClusterCleaner.java                  | 118 +++++++++++++++++++
 9 files changed, 409 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f833e1b3/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 216c3bd..9fcafad 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -387,6 +387,11 @@
     <Method name="initAndStartNodeManager" />
     <Bug pattern="DM_EXIT" />
   </Match>
+  <Match>
+    <Class name="org.apache.hadoop.yarn.server.globalpolicygenerator.GlobalPolicyGenerator" />
+    <Medhod name="startGPG" />
+    <Bug pattern="DM_EXIT" />
+  </Match>
  
   <!-- Ignore heartbeat exception when killing localizer -->
   <Match>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f833e1b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index bbf877f..ec88411 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3342,6 +3342,24 @@ public class YarnConfiguration extends Configuration {
   public static final boolean DEFAULT_ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED =
       false;
 
+  private static final String FEDERATION_GPG_PREFIX =
+      FEDERATION_PREFIX + "gpg.";
+
+  // The number of threads to use for the GPG scheduled executor service
+  public static final String GPG_SCHEDULED_EXECUTOR_THREADS =
+      FEDERATION_GPG_PREFIX + "scheduled.executor.threads";
+  public static final int DEFAULT_GPG_SCHEDULED_EXECUTOR_THREADS = 10;
+
+  // The interval at which the subcluster cleaner runs, -1 means disabled
+  public static final String GPG_SUBCLUSTER_CLEANER_INTERVAL_MS =
+      FEDERATION_GPG_PREFIX + "subcluster.cleaner.interval-ms";
+  public static final long DEFAULT_GPG_SUBCLUSTER_CLEANER_INTERVAL_MS = -1;
+
+  // The expiration time for a subcluster heartbeat, default is 30 minutes
+  public static final String GPG_SUBCLUSTER_EXPIRATION_MS =
+      FEDERATION_GPG_PREFIX + "subcluster.heartbeat.expiration-ms";
+  public static final long DEFAULT_GPG_SUBCLUSTER_EXPIRATION_MS = 1800000;
+
   ////////////////////////////////
   // Other Configs
   ////////////////////////////////

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f833e1b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 2cc842f..66493f3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3533,6 +3533,30 @@
 
   <property>
     <description>
+      The number of threads to use for the GPG scheduled executor service.
+    </description>
+    <name>yarn.federation.gpg.scheduled.executor.threads</name>
+    <value>10</value>
+  </property>
+
+  <property>
+    <description>
+      The interval at which the subcluster cleaner runs, -1 means disabled.
+    </description>
+    <name>yarn.federation.gpg.subcluster.cleaner.interval-ms</name>
+    <value>-1</value>
+  </property>
+
+  <property>
+    <description>
+      The expiration time for a subcluster heartbeat, default is 30 minutes.
+    </description>
+    <name>yarn.federation.gpg.subcluster.heartbeat.expiration-ms</name>
+    <value>1800000</value>
+  </property>
+
+  <property>
+    <description>
        It is TimelineClient 1.5 configuration whether to store active
        application’s timeline data with in user directory i.e
        ${yarn.timeline-service.entity-group-fs-store.active-dir}/${user.name}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f833e1b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
index 7c06256..b42fc79 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
@@ -68,6 +68,8 @@ import org.apache.hadoop.yarn.util.MonotonicClock;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * In-memory implementation of {@link FederationStateStore}.
  */
@@ -158,6 +160,17 @@ public class MemoryFederationStateStore implements FederationStateStore {
     return SubClusterHeartbeatResponse.newInstance();
   }
 
+  @VisibleForTesting
+  public void setSubClusterLastHeartbeat(SubClusterId subClusterId,
+      long lastHeartbeat) throws YarnException {
+    SubClusterInfo subClusterInfo = membership.get(subClusterId);
+    if (subClusterInfo == null) {
+      throw new YarnException(
+          "Subcluster " + subClusterId.toString() + " does not exist");
+    }
+    subClusterInfo.setLastHeartBeat(lastHeartbeat);
+  }
+
   @Override
   public GetSubClusterInfoResponse getSubCluster(
       GetSubClusterInfoRequest request) throws YarnException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f833e1b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
index 1bcb0f4..4c3bed0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
@@ -62,9 +62,11 @@ import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolic
 import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse;
 import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest;
 import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState;
 import org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -221,6 +223,22 @@ public final class FederationStateStoreFacade {
   }
 
   /**
+   * Deregister a <em>subcluster</em> identified by {@code SubClusterId} to
+   * change state in federation. This can be done to mark the sub cluster lost,
+   * deregistered, or decommissioned.
+   *
+   * @param subClusterId the target subclusterId
+   * @param subClusterState the state to update it to
+   * @throws YarnException if the request is invalid/fails
+   */
+  public void deregisterSubCluster(SubClusterId subClusterId,
+      SubClusterState subClusterState) throws YarnException {
+    stateStore.deregisterSubCluster(
+        SubClusterDeregisterRequest.newInstance(subClusterId, subClusterState));
+    return;
+  }
+
+  /**
    * Returns the {@link SubClusterInfo} for the specified {@link SubClusterId}.
    *
    * @param subClusterId the identifier of the sub-cluster
@@ -255,8 +273,7 @@ public final class FederationStateStoreFacade {
   public SubClusterInfo getSubCluster(final SubClusterId subClusterId,
       final boolean flushCache) throws YarnException {
     if (flushCache && isCachingEnabled()) {
-      LOG.info("Flushing subClusters from cache and rehydrating from store,"
-          + " most likely on account of RM failover.");
+      LOG.info("Flushing subClusters from cache and rehydrating from store.");
       cache.remove(buildGetSubClustersCacheRequest(false));
     }
     return getSubCluster(subClusterId);
@@ -287,6 +304,26 @@ public final class FederationStateStoreFacade {
   }
 
   /**
+   * Updates the cache with the central {@link FederationStateStore} and returns
+   * the {@link SubClusterInfo} of all active sub cluster(s).
+   *
+   * @param filterInactiveSubClusters whether to filter out inactive
+   *          sub-clusters
+   * @param flushCache flag to indicate if the cache should be flushed or not
+   * @return the sub cluster information
+   * @throws YarnException if the call to the state store is unsuccessful
+   */
+  public Map<SubClusterId, SubClusterInfo> getSubClusters(
+      final boolean filterInactiveSubClusters, final boolean flushCache)
+      throws YarnException {
+    if (flushCache && isCachingEnabled()) {
+      LOG.info("Flushing subClusters from cache and rehydrating from store.");
+      cache.remove(buildGetSubClustersCacheRequest(filterInactiveSubClusters));
+    }
+    return getSubClusters(filterInactiveSubClusters);
+  }
+
+  /**
    * Returns the {@link SubClusterPolicyConfiguration} for the specified queue.
    *
    * @param queue the queue whose policy is required

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f833e1b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
index c1f7460..f6cfba0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
@@ -18,8 +18,11 @@
 
 package org.apache.hadoop.yarn.server.globalpolicygenerator;
 
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
+import org.apache.commons.lang.time.DurationFormatUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.service.CompositeService;
@@ -28,6 +31,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.subclustercleaner.SubClusterCleaner;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -55,36 +59,26 @@ public class GlobalPolicyGenerator extends CompositeService {
   // Federation Variables
   private GPGContext gpgContext;
 
+  // Scheduler service that runs tasks periodically
+  private ScheduledThreadPoolExecutor scheduledExecutorService;
+  private SubClusterCleaner subClusterCleaner;
+
   public GlobalPolicyGenerator() {
     super(GlobalPolicyGenerator.class.getName());
     this.gpgContext = new GPGContextImpl();
   }
 
-  protected void initAndStart(Configuration conf, boolean hasToReboot) {
-    try {
-      // Remove the old hook if we are rebooting.
-      if (hasToReboot && null != gpgShutdownHook) {
-        ShutdownHookManager.get().removeShutdownHook(gpgShutdownHook);
-      }
-
-      gpgShutdownHook = new CompositeServiceShutdownHook(this);
-      ShutdownHookManager.get().addShutdownHook(gpgShutdownHook,
-          SHUTDOWN_HOOK_PRIORITY);
-
-      this.init(conf);
-      this.start();
-    } catch (Throwable t) {
-      LOG.error("Error starting globalpolicygenerator", t);
-      System.exit(-1);
-    }
-  }
-
   @Override
   protected void serviceInit(Configuration conf) throws Exception {
     // Set up the context
     this.gpgContext
         .setStateStoreFacade(FederationStateStoreFacade.getInstance());
 
+    this.scheduledExecutorService = new ScheduledThreadPoolExecutor(
+        conf.getInt(YarnConfiguration.GPG_SCHEDULED_EXECUTOR_THREADS,
+            YarnConfiguration.DEFAULT_GPG_SCHEDULED_EXECUTOR_THREADS));
+    this.subClusterCleaner = new SubClusterCleaner(conf, this.gpgContext);
+
     DefaultMetricsSystem.initialize(METRICS_NAME);
 
     // super.serviceInit after all services are added
@@ -94,10 +88,32 @@ public class GlobalPolicyGenerator extends CompositeService {
   @Override
   protected void serviceStart() throws Exception {
     super.serviceStart();
+
+    // Scheduler SubClusterCleaner service
+    long scCleanerIntervalMs = getConfig().getLong(
+        YarnConfiguration.GPG_SUBCLUSTER_CLEANER_INTERVAL_MS,
+        YarnConfiguration.DEFAULT_GPG_SUBCLUSTER_CLEANER_INTERVAL_MS);
+    if (scCleanerIntervalMs > 0) {
+      this.scheduledExecutorService.scheduleAtFixedRate(this.subClusterCleaner,
+          0, scCleanerIntervalMs, TimeUnit.MILLISECONDS);
+      LOG.info("Scheduled sub-cluster cleaner with interval: {}",
+          DurationFormatUtils.formatDurationISO(scCleanerIntervalMs));
+    }
   }
 
   @Override
   protected void serviceStop() throws Exception {
+    try {
+      if (this.scheduledExecutorService != null
+          && !this.scheduledExecutorService.isShutdown()) {
+        this.scheduledExecutorService.shutdown();
+        LOG.info("Stopped ScheduledExecutorService");
+      }
+    } catch (Exception e) {
+      LOG.error("Failed to shutdown ScheduledExecutorService", e);
+      throw e;
+    }
+
     if (this.isStopping.getAndSet(true)) {
       return;
     }
@@ -113,20 +129,40 @@ public class GlobalPolicyGenerator extends CompositeService {
     return this.gpgContext;
   }
 
+  private void initAndStart(Configuration conf, boolean hasToReboot) {
+    // Remove the old hook if we are rebooting.
+    if (hasToReboot && null != gpgShutdownHook) {
+      ShutdownHookManager.get().removeShutdownHook(gpgShutdownHook);
+    }
+
+    gpgShutdownHook = new CompositeServiceShutdownHook(this);
+    ShutdownHookManager.get().addShutdownHook(gpgShutdownHook,
+        SHUTDOWN_HOOK_PRIORITY);
+
+    this.init(conf);
+    this.start();
+  }
+
   @SuppressWarnings("resource")
   public static void startGPG(String[] argv, Configuration conf) {
     boolean federationEnabled =
         conf.getBoolean(YarnConfiguration.FEDERATION_ENABLED,
             YarnConfiguration.DEFAULT_FEDERATION_ENABLED);
-    if (federationEnabled) {
-      Thread.setDefaultUncaughtExceptionHandler(
-          new YarnUncaughtExceptionHandler());
-      StringUtils.startupShutdownMessage(GlobalPolicyGenerator.class, argv,
-          LOG);
-      GlobalPolicyGenerator globalPolicyGenerator = new GlobalPolicyGenerator();
-      globalPolicyGenerator.initAndStart(conf, false);
-    } else {
-      LOG.warn("Federation is not enabled. The gpg cannot start.");
+    try {
+      if (federationEnabled) {
+        Thread.setDefaultUncaughtExceptionHandler(
+            new YarnUncaughtExceptionHandler());
+        StringUtils.startupShutdownMessage(GlobalPolicyGenerator.class, argv,
+            LOG);
+        GlobalPolicyGenerator globalPolicyGenerator =
+            new GlobalPolicyGenerator();
+        globalPolicyGenerator.initAndStart(conf, false);
+      } else {
+        LOG.warn("Federation is not enabled. The gpg cannot start.");
+      }
+    } catch (Throwable t) {
+      LOG.error("Error starting globalpolicygenerator", t);
+      System.exit(-1);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f833e1b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
new file mode 100644
index 0000000..dad5121
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator.subclustercleaner;
+
+import java.util.Date;
+import java.util.Map;
+
+import org.apache.commons.lang.time.DurationFormatUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The sub-cluster cleaner is one of the GPG's services that periodically checks
+ * the membership table in FederationStateStore and mark sub-clusters that have
+ * not sent a heartbeat in certain amount of time as LOST.
+ */
+public class SubClusterCleaner implements Runnable {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SubClusterCleaner.class);
+
+  private GPGContext gpgContext;
+  private long heartbeatExpirationMillis;
+
+  /**
+   * The sub-cluster cleaner runnable is invoked by the sub cluster cleaner
+   * service to check the membership table and remove sub clusters that have not
+   * sent a heart beat in some amount of time.
+   */
+  public SubClusterCleaner(Configuration conf, GPGContext gpgContext) {
+    this.heartbeatExpirationMillis =
+        conf.getLong(YarnConfiguration.GPG_SUBCLUSTER_EXPIRATION_MS,
+            YarnConfiguration.DEFAULT_GPG_SUBCLUSTER_EXPIRATION_MS);
+    this.gpgContext = gpgContext;
+    LOG.info("Initialized SubClusterCleaner with heartbeat expiration of {}",
+        DurationFormatUtils.formatDurationISO(this.heartbeatExpirationMillis));
+  }
+
+  @Override
+  public void run() {
+    try {
+      Date now = new Date();
+      LOG.info("SubClusterCleaner at {}", now);
+
+      Map<SubClusterId, SubClusterInfo> infoMap =
+          this.gpgContext.getStateStoreFacade().getSubClusters(false, true);
+
+      // Iterate over each sub cluster and check last heartbeat
+      for (Map.Entry<SubClusterId, SubClusterInfo> entry : infoMap.entrySet()) {
+        SubClusterInfo subClusterInfo = entry.getValue();
+
+        Date lastHeartBeat = new Date(subClusterInfo.getLastHeartBeat());
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Checking subcluster {} in state {}, last heartbeat at {}",
+              subClusterInfo.getSubClusterId(), subClusterInfo.getState(),
+              lastHeartBeat);
+        }
+
+        if (!subClusterInfo.getState().isUnusable()) {
+          long timeUntilDeregister = this.heartbeatExpirationMillis
+              - (now.getTime() - lastHeartBeat.getTime());
+          // Deregister sub-cluster as SC_LOST if last heartbeat too old
+          if (timeUntilDeregister < 0) {
+            LOG.warn(
+                "Deregistering subcluster {} in state {} last heartbeat at {}",
+                subClusterInfo.getSubClusterId(), subClusterInfo.getState(),
+                new Date(subClusterInfo.getLastHeartBeat()));
+            try {
+              this.gpgContext.getStateStoreFacade().deregisterSubCluster(
+                  subClusterInfo.getSubClusterId(), SubClusterState.SC_LOST);
+            } catch (Exception e) {
+              LOG.error("deregisterSubCluster failed on subcluster "
+                  + subClusterInfo.getSubClusterId(), e);
+            }
+          } else if (LOG.isDebugEnabled()) {
+            LOG.debug("Time until deregister for subcluster {}: {}",
+                entry.getKey(),
+                DurationFormatUtils.formatDurationISO(timeUntilDeregister));
+          }
+        }
+      }
+    } catch (Throwable e) {
+      LOG.error("Subcluster cleaner fails: ", e);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f833e1b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/package-info.java
new file mode 100644
index 0000000..f65444a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/package-info.java
@@ -0,0 +1,19 @@
+/**
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator.subclustercleaner;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f833e1b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/TestSubClusterCleaner.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/TestSubClusterCleaner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/TestSubClusterCleaner.java
new file mode 100644
index 0000000..19b8802
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/TestSubClusterCleaner.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator.subclustercleaner;
+
+import java.util.ArrayList;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState;
+import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGContext;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGContextImpl;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Unit test for Sub-cluster Cleaner in GPG.
+ */
+public class TestSubClusterCleaner {
+
+  private Configuration conf;
+  private MemoryFederationStateStore stateStore;
+  private FederationStateStoreFacade facade;
+  private SubClusterCleaner cleaner;
+  private GPGContext gpgContext;
+
+  private ArrayList<SubClusterId> subClusterIds;
+
+  @Before
+  public void setup() throws YarnException {
+    conf = new YarnConfiguration();
+
+    // subcluster expires in one second
+    conf.setLong(YarnConfiguration.GPG_SUBCLUSTER_EXPIRATION_MS, 1000);
+
+    stateStore = new MemoryFederationStateStore();
+    stateStore.init(conf);
+
+    facade = FederationStateStoreFacade.getInstance();
+    facade.reinitialize(stateStore, conf);
+
+    gpgContext = new GPGContextImpl();
+    gpgContext.setStateStoreFacade(facade);
+
+    cleaner = new SubClusterCleaner(conf, gpgContext);
+
+    // Create and register six sub clusters
+    subClusterIds = new ArrayList<SubClusterId>();
+    for (int i = 0; i < 3; i++) {
+      // Create sub cluster id and info
+      SubClusterId subClusterId =
+          SubClusterId.newInstance("SUBCLUSTER-" + Integer.toString(i));
+
+      SubClusterInfo subClusterInfo = SubClusterInfo.newInstance(subClusterId,
+          "1.2.3.4:1", "1.2.3.4:2", "1.2.3.4:3", "1.2.3.4:4",
+          SubClusterState.SC_RUNNING, System.currentTimeMillis(), "");
+      // Register the sub cluster
+      stateStore.registerSubCluster(
+          SubClusterRegisterRequest.newInstance(subClusterInfo));
+      // Append the id to a local list
+      subClusterIds.add(subClusterId);
+    }
+  }
+
+  @After
+  public void breakDown() throws Exception {
+    stateStore.close();
+  }
+
+  @Test
+  public void testSubClusterRegisterHeartBeatTime() throws YarnException {
+    cleaner.run();
+    Assert.assertEquals(3, facade.getSubClusters(true, true).size());
+  }
+
+  /**
+   * Test the base use case.
+   */
+  @Test
+  public void testSubClusterHeartBeat() throws YarnException {
+    // The first subcluster reports as Unhealthy
+    SubClusterId subClusterId = subClusterIds.get(0);
+    stateStore.subClusterHeartbeat(SubClusterHeartbeatRequest
+        .newInstance(subClusterId, SubClusterState.SC_UNHEALTHY, "capacity"));
+
+    // The second subcluster didn't heartbeat for two seconds, should mark lost
+    subClusterId = subClusterIds.get(1);
+    stateStore.setSubClusterLastHeartbeat(subClusterId,
+        System.currentTimeMillis() - 2000);
+
+    cleaner.run();
+    Assert.assertEquals(1, facade.getSubClusters(true, true).size());
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: HDDS-246. Datanode should throw BlockNotCommittedException for uncommitted blocks to Ozone Client. Contributed by Shashikant Banerjee.

Posted by bo...@apache.org.
HDDS-246. Datanode should throw BlockNotCommittedException for uncommitted blocks to Ozone Client. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b038f82
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b038f82
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b038f82

Branch: refs/heads/YARN-7402
Commit: 6b038f82da8fa8c1c4f1e1bf448eacc6dd523044
Parents: 3d58684
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Sat Jul 28 22:04:11 2018 +0530
Committer: Mukul Kumar Singh <ms...@apache.org>
Committed: Sat Jul 28 22:04:11 2018 +0530

----------------------------------------------------------------------
 .../main/proto/DatanodeContainerProtocol.proto  |  1 +
 .../common/impl/OpenContainerBlockMap.java      | 12 ++++++
 .../container/keyvalue/KeyValueHandler.java     | 12 ++++--
 .../ozone/scm/TestCommittedBlockLengthAPI.java  | 45 +++++++++++++++-----
 4 files changed, 57 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b038f82/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index a3c4467..6969fa6 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -138,6 +138,7 @@ enum Result {
   CONTAINER_FILES_CREATE_ERROR = 32;
   CONTAINER_CHECKSUM_ERROR = 33;
   UNKNOWN_CONTAINER_TYPE = 34;
+  BLOCK_NOT_COMMITTED = 35;
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b038f82/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
index 6a93c9d..8e2667d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
@@ -129,6 +129,18 @@ public class OpenContainerBlockMap {
         -> blocks.removeAndGetSize(blockID.getLocalID()) == 0? null: blocks);
   }
 
+  /**
+   * Returns true if the block exists in the map, false otherwise
+   *
+   * @param blockID
+   * @return True, if it exists, false otherwise
+   */
+  public boolean checkIfBlockExists(BlockID blockID) {
+    KeyDataMap keyDataMap = containers.get(blockID.getContainerID());
+    return keyDataMap == null ? false :
+        keyDataMap.get(blockID.getLocalID()) != null;
+  }
+
   @VisibleForTesting
   KeyDataMap getKeyDataMap(long containerId) {
     return containers.get(containerId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b038f82/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index b08e128..0b26a14 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -91,6 +91,8 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .Result.GET_SMALL_FILE_ERROR;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .Result.PUT_SMALL_FILE_ERROR;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.BLOCK_NOT_COMMITTED;
 
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .Stage;
@@ -494,10 +496,14 @@ public class KeyValueHandler extends Handler {
 
     long blockLength;
     try {
-      BlockID blockID = BlockID.getFromProtobuf(
-          request.getGetCommittedBlockLength().getBlockID());
+      BlockID blockID = BlockID
+          .getFromProtobuf(request.getGetCommittedBlockLength().getBlockID());
+      // Check if it really exists in the openContainerBlockMap
+      if (openContainerBlockMap.checkIfBlockExists(blockID)) {
+        String msg = "Block " + blockID + " is not committed yet.";
+        throw new StorageContainerException(msg, BLOCK_NOT_COMMITTED);
+      }
       blockLength = keyManager.getCommittedBlockLength(kvContainer, blockID);
-
     } catch (StorageContainerException ex) {
       return ContainerUtils.logAndReturnError(LOG, ex, request);
     } catch (IOException ex) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b038f82/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCommittedBlockLengthAPI.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCommittedBlockLengthAPI.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCommittedBlockLengthAPI.java
index 7e8aa5f..3c6479f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCommittedBlockLengthAPI.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCommittedBlockLengthAPI.java
@@ -104,16 +104,6 @@ public class TestCommittedBlockLengthAPI {
             .getWriteChunkRequest(container.getPipeline(), blockID,
                 data.length);
     client.sendCommand(writeChunkRequest);
-    try {
-      // since there is neither explicit putKey request made for the block,
-      // nor the container is closed, GetCommittedBlockLength request
-      // should fail here.
-      response = ContainerProtocolCalls
-          .getCommittedBlockLength(client, blockID, traceID);
-      Assert.fail("Expected exception not thrown");
-    } catch (StorageContainerException sce) {
-      Assert.assertTrue(sce.getMessage().contains("Unable to find the key"));
-    }
     // Now, explicitly make a putKey request for the block.
     ContainerProtos.ContainerCommandRequestProto putKeyRequest =
         ContainerTestHelper
@@ -188,4 +178,39 @@ public class TestCommittedBlockLengthAPI {
     }
     xceiverClientManager.releaseClient(client);
   }
+
+  @Test
+  public void testGetCommittedBlockLengthForOpenBlock() throws Exception {
+    String traceID = UUID.randomUUID().toString();
+    ContainerWithPipeline container = storageContainerLocationClient
+        .allocateContainer(xceiverClientManager.getType(),
+            HddsProtos.ReplicationFactor.ONE, containerOwner);
+    long containerID = container.getContainerInfo().getContainerID();
+    XceiverClientSpi client = xceiverClientManager
+        .acquireClient(container.getPipeline(), containerID);
+    ContainerProtocolCalls
+        .createContainer(client, containerID, traceID);
+
+    BlockID blockID =
+        ContainerTestHelper.getTestBlockID(containerID);
+    ContainerProtos.ContainerCommandRequestProto requestProto =
+        ContainerTestHelper
+            .getWriteChunkRequest(container.getPipeline(), blockID, 1024);
+    client.sendCommand(requestProto);
+    try {
+      ContainerProtocolCalls.getCommittedBlockLength(client, blockID, traceID);
+      Assert.fail("Expected Exception not thrown");
+    } catch (StorageContainerException sce) {
+      Assert.assertEquals(ContainerProtos.Result.BLOCK_NOT_COMMITTED,
+          sce.getResult());
+    }
+    // now close the container, it should auto commit pending open blocks
+    ContainerProtocolCalls
+        .closeContainer(client, containerID, traceID);
+    ContainerProtos.GetCommittedBlockLengthResponseProto response =
+        ContainerProtocolCalls
+            .getCommittedBlockLength(client, blockID, traceID);
+    Assert.assertTrue(response.getBlockLength() == 1024);
+    xceiverClientManager.releaseClient(client);
+  }
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: YARN-8593. Add RM web service endpoint to get user information. Contributed by Akhil PB.

Posted by bo...@apache.org.
YARN-8593. Add RM web service endpoint to get user information. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/735b4925
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/735b4925
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/735b4925

Branch: refs/heads/YARN-7402
Commit: 735b4925569541fb8e65dc0c668ccc2aa2ffb30b
Parents: 23f3942
Author: Sunil G <su...@apache.org>
Authored: Thu Aug 2 08:34:09 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Thu Aug 2 08:34:09 2018 +0530

----------------------------------------------------------------------
 .../server/resourcemanager/ResourceManager.java |  4 ++
 .../resourcemanager/webapp/RMWSConsts.java      |  3 +
 .../webapp/RMWebServiceProtocol.java            | 10 +++
 .../resourcemanager/webapp/RMWebServices.java   | 12 ++++
 .../webapp/dao/ClusterUserInfo.java             | 64 ++++++++++++++++++++
 .../webapp/TestRMWebServices.java               | 21 +++++++
 .../webapp/DefaultRequestInterceptorREST.java   |  8 +++
 .../webapp/FederationInterceptorREST.java       |  6 ++
 .../server/router/webapp/RouterWebServices.java | 12 ++++
 .../webapp/MockRESTRequestInterceptor.java      |  6 ++
 .../PassThroughRESTRequestInterceptor.java      |  6 ++
 11 files changed, 152 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/735b4925/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index f14d440..bb85b67 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -222,6 +222,10 @@ public class ResourceManager extends CompositeService implements Recoverable {
     return clusterTimeStamp;
   }
 
+  public String getRMLoginUser() {
+    return rmLoginUGI.getShortUserName();
+  }
+
   @VisibleForTesting
   protected static void setClusterTimeStamp(long timestamp) {
     clusterTimeStamp = timestamp;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/735b4925/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
index 9822878..a3fd2a9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
@@ -36,6 +36,9 @@ public final class RMWSConsts {
   /** Path for {@code RMWebServiceProtocol#getClusterInfo}. */
   public static final String INFO = "/info";
 
+  /** Path for {@code RMWebServiceProtocol#getClusterUserInfo}. */
+  public static final String CLUSTER_USER_INFO = "/userinfo";
+
   /** Path for {@code RMWebServiceProtocol#getClusterMetricsInfo}. */
   public static final String METRICS = "/metrics";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/735b4925/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java
index 85ea07d..a310853 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmi
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterUserInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.DelegationToken;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
@@ -88,6 +89,15 @@ public interface RMWebServiceProtocol {
    */
   ClusterInfo getClusterInfo();
 
+
+  /**
+   * This method retrieves the cluster user information, and it is reachable by using
+   * {@link RMWSConsts#CLUSTER_USER_INFO}.
+   *
+   * @return the cluster user information
+   */
+  ClusterUserInfo getClusterUserInfo(HttpServletRequest hsr);
+
   /**
    * This method retrieves the cluster metrics information, and it is reachable
    * by using {@link RMWSConsts#METRICS}.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/735b4925/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 4527a02..7752fa2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -160,6 +160,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterUserInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.DelegationToken;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FairSchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo;
@@ -336,6 +337,17 @@ public class RMWebServices extends WebServices implements RMWebServiceProtocol {
   }
 
   @GET
+  @Path(RMWSConsts.CLUSTER_USER_INFO)
+  @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
+      MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
+  public ClusterUserInfo getClusterUserInfo(@Context HttpServletRequest hsr) {
+    initForReadableEndpoints();
+    UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
+    return new ClusterUserInfo(this.rm, callerUGI);
+  }
+
+  @GET
   @Path(RMWSConsts.METRICS)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })

http://git-wip-us.apache.org/repos/asf/hadoop/blob/735b4925/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterUserInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterUserInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterUserInfo.java
new file mode 100644
index 0000000..7a6bd40
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterUserInfo.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+
+/**
+ * The YARN UI doesn't have centralized login mechanism. While accessing UI2 from kerberized shell, user who is
+ * placed the request to YARN need to be displayed in UI. Given requests from UI2 is routed via Proxy, only RM can provide
+ * the user who has placed the request. This DAO object help to provide the requested user and also RM logged in user.
+ * the response sent by RM is authenticated user instead of proxy user.
+ * It is always good to display authenticated user in browser which eliminates lot of confusion to end use.
+ */
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.FIELD)
+@InterfaceStability.Unstable
+public class ClusterUserInfo {
+
+    // User who has started the RM
+    protected String rmLoginUser;
+    // User who has placed the request
+    protected String requestedUser;
+
+    public ClusterUserInfo() {
+    }
+
+    public ClusterUserInfo(ResourceManager rm, UserGroupInformation ugi) {
+        this.rmLoginUser = rm.getRMLoginUser();
+        if (ugi != null) {
+            this.requestedUser = ugi.getShortUserName();
+        } else {
+            this.requestedUser = "UNKNOWN_USER";
+        }
+    }
+
+    public String getRmLoginUser() {
+        return rmLoginUser;
+    }
+
+    public String getRequestedUser() {
+        return requestedUser;
+    }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/735b4925/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
index 3902889..2f67c44 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
@@ -61,6 +61,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterUserInfo;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.util.AdHocLogDumper;
 import org.apache.hadoop.yarn.util.YarnVersionInfo;
@@ -843,4 +844,24 @@ public class TestRMWebServices extends JerseyTestBase {
     Assert.assertFalse(webSvc.checkUserAccessToQueue("queue", "yarn",
         QueueACL.ADMINISTER_QUEUE.name(), mockHsr).isAllowed());
   }
+
+  @Test
+  public void testClusterUserInfo() throws JSONException, Exception {
+    ResourceManager mockRM = mock(ResourceManager.class);
+    Configuration conf = new YarnConfiguration();
+    HttpServletRequest mockHsr = mockHttpServletRequestByUserName("admin");
+    when(mockRM.getRMLoginUser()).thenReturn("yarn");
+    RMWebServices webSvc =
+            new RMWebServices(mockRM, conf, mock(HttpServletResponse.class));
+    ClusterUserInfo userInfo = webSvc.getClusterUserInfo(mockHsr);
+    verifyClusterUserInfo(userInfo, "yarn", "admin");
+  }
+
+  public void verifyClusterUserInfo(ClusterUserInfo userInfo,
+            String rmLoginUser, String requestedUser) {
+    assertEquals("rmLoginUser doesn't match: ",
+            rmLoginUser, userInfo.getRmLoginUser());
+    assertEquals("requestedUser doesn't match: ",
+            requestedUser, userInfo.getRequestedUser());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/735b4925/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java
index 53e5def..4110c89 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmi
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterUserInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.DelegationToken;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
@@ -104,6 +105,13 @@ public class DefaultRequestInterceptorREST
   }
 
   @Override
+  public ClusterUserInfo getClusterUserInfo(HttpServletRequest hsr) {
+    return RouterWebServiceUtil.genericForward(webAppAddress, hsr,
+            ClusterUserInfo.class, HTTPMethods.GET,
+            RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.CLUSTER_USER_INFO, null, null);
+  }
+
+  @Override
   public ClusterMetricsInfo getClusterMetricsInfo() {
     return RouterWebServiceUtil.genericForward(webAppAddress, null,
         ClusterMetricsInfo.class, HTTPMethods.GET,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/735b4925/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
index 51dfb00..40addc9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
@@ -72,6 +72,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmi
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterUserInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.DelegationToken;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
@@ -1044,6 +1045,11 @@ public class FederationInterceptorREST extends AbstractRESTRequestInterceptor {
   }
 
   @Override
+  public ClusterUserInfo getClusterUserInfo(HttpServletRequest hsr) {
+    throw new NotImplementedException("Code is not implemented");
+  }
+
+  @Override
   public SchedulerTypeInfo getSchedulerInfo() {
     throw new NotImplementedException("Code is not implemented");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/735b4925/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java
index 49de588..28bf859 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java
@@ -68,6 +68,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmi
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterUserInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.DelegationToken;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
@@ -327,6 +328,17 @@ public class RouterWebServices implements RMWebServiceProtocol {
   }
 
   @GET
+  @Path(RMWSConsts.CLUSTER_USER_INFO)
+  @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
+          MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
+  public ClusterUserInfo getClusterUserInfo(@Context HttpServletRequest hsr) {
+    init();
+    RequestInterceptorChainWrapper pipeline = getInterceptorChain(hsr);
+    return pipeline.getRootInterceptor().getClusterUserInfo(hsr);
+  }
+
+  @GET
   @Path(RMWSConsts.METRICS)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })

http://git-wip-us.apache.org/repos/asf/hadoop/blob/735b4925/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java
index 0007843..f914d73 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmi
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterUserInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.DelegationToken;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
@@ -83,6 +84,11 @@ public class MockRESTRequestInterceptor extends AbstractRESTRequestInterceptor {
   }
 
   @Override
+  public ClusterUserInfo getClusterUserInfo(HttpServletRequest hsr) {
+    return new ClusterUserInfo();
+  }
+
+  @Override
   public ClusterMetricsInfo getClusterMetricsInfo() {
     return new ClusterMetricsInfo();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/735b4925/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java
index 72fd442..7d05a5a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmi
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterUserInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.DelegationToken;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
@@ -108,6 +109,11 @@ public class PassThroughRESTRequestInterceptor
   }
 
   @Override
+  public ClusterUserInfo getClusterUserInfo(HttpServletRequest hsr) {
+    return getNextInterceptor().getClusterUserInfo(hsr);
+  }
+
+  @Override
   public ClusterMetricsInfo getClusterMetricsInfo() {
     return getNextInterceptor().getClusterMetricsInfo();
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: YARN-8592. [UI2] rmip:port/ui2 endpoint shows a blank page in windows OS and Chrome browser. Contributed by Akhil PB.

Posted by bo...@apache.org.
YARN-8592. [UI2] rmip:port/ui2 endpoint shows a blank page in windows OS and Chrome browser. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97870ec1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97870ec1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97870ec1

Branch: refs/heads/YARN-7402
Commit: 97870ec1f6e40ee863333f29411d71b6c687bbed
Parents: 1ea8116
Author: Sunil G <su...@apache.org>
Authored: Thu Aug 2 16:10:54 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Thu Aug 2 16:10:54 2018 +0530

----------------------------------------------------------------------
 .../src/main/webapp/app/utils/date-utils.js           | 14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97870ec1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/date-utils.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/date-utils.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/date-utils.js
index 6a9780c..4abdc72 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/date-utils.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/date-utils.js
@@ -16,12 +16,18 @@
  * limitations under the License.
  */
 
-
 const defaultTz = "America/Los_Angeles";
 
 const getDefaultTimezone = () => {
-  return moment.tz.guess() || defaultTz;
+  let timezone = defaultTz;
+  try {
+    timezone = moment.tz.guess();
+  } catch (e) {
+    console.log(e);
+  }
+  return timezone || defaultTz;
 };
 
-export const convertTimestampWithTz = (timestamp, format = "YYYY/MM/DD") =>
-  moment.tz(parseInt(timestamp), getDefaultTimezone()).format(format);
+export const convertTimestampWithTz = (timestamp, format = "YYYY/MM/DD") => {
+  return moment.tz(parseInt(timestamp), getDefaultTimezone()).format(format);
+};


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: HADOOP-15476. fix logging for split-dns multihome . Contributed by Ajay Kumar.

Posted by bo...@apache.org.
HADOOP-15476. fix logging for split-dns multihome . Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2e29acb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2e29acb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2e29acb

Branch: refs/heads/YARN-7402
Commit: f2e29acbfa0b7e1fcecbdcf3e791c96114b456a5
Parents: 603a574
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed Aug 1 12:32:01 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Wed Aug 1 12:32:01 2018 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/ipc/Client.java            | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2e29acb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 163e80d..e147048 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -677,7 +677,8 @@ public class Client implements AutoCloseable {
                 this.socket.setReuseAddress(true);
                 localAddr = NetUtils.bindToLocalAddress(localAddr,
                     bindToWildCardAddress);
-                LOG.debug("Binding {} to {}", principal, localAddr);
+                LOG.debug("Binding {} to {}", principal,
+                    (bindToWildCardAddress) ? "0.0.0.0" : localAddr);
                 this.socket.bind(new InetSocketAddress(localAddr, 0));
               }
             }
@@ -1281,9 +1282,6 @@ public class Client implements AutoCloseable {
     this.bindToWildCardAddress = conf
         .getBoolean(CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_KEY,
             CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_DEFAULT);
-    LOG.debug("{} set to true. Will bind client sockets to wildcard "
-            + "address.",
-        CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_KEY);
 
     this.clientId = ClientId.getClientId();
     this.sendParamsExecutor = clientExcecutorFactory.refAndGetInstance();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: YARN-8522. Application fails with InvalidResourceRequestException. (Zian Chen via wangda)

Posted by bo...@apache.org.
YARN-8522. Application fails with InvalidResourceRequestException. (Zian Chen via wangda)

Change-Id: I34dd7fa49bd4d10580c4a78051033b1068d28f1e


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5cc8e991
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5cc8e991
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5cc8e991

Branch: refs/heads/YARN-7402
Commit: 5cc8e99147276a059979813f7fd323dd7d77b248
Parents: f4db753
Author: Wangda Tan <wa...@apache.org>
Authored: Tue Jul 31 17:48:44 2018 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Tue Jul 31 22:34:53 2018 -0700

----------------------------------------------------------------------
 .../pb/ApplicationSubmissionContextPBImpl.java  | 87 +++++++++++---------
 1 file changed, 46 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cc8e991/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java
index 0c91e18..b30224e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java
@@ -84,7 +84,7 @@ extends ApplicationSubmissionContext {
     viaProto = true;
   }
   
-  public ApplicationSubmissionContextProto getProto() {
+  public synchronized ApplicationSubmissionContextProto getProto() {
       mergeLocalToProto();
     proto = viaProto ? proto : builder.build();
     viaProto = true;
@@ -164,7 +164,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public Priority getPriority() {
+  public synchronized Priority getPriority() {
     ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
     if (this.priority != null) {
       return this.priority;
@@ -177,7 +177,7 @@ extends ApplicationSubmissionContext {
   }
   
   @Override
-  public void setPriority(Priority priority) {
+  public synchronized void setPriority(Priority priority) {
     maybeInitBuilder();
     if (priority == null)
       builder.clearPriority();
@@ -185,7 +185,7 @@ extends ApplicationSubmissionContext {
   }
   
   @Override
-  public ApplicationId getApplicationId() {
+  public synchronized ApplicationId getApplicationId() {
     ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
     if (this.applicationId != null) {
       return applicationId;
@@ -198,7 +198,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public void setApplicationId(ApplicationId applicationId) {
+  public synchronized void setApplicationId(ApplicationId applicationId) {
     maybeInitBuilder();
     if (applicationId == null)
       builder.clearApplicationId();
@@ -206,7 +206,7 @@ extends ApplicationSubmissionContext {
   }
   
   @Override
-  public String getApplicationName() {
+  public synchronized String getApplicationName() {
     ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
     if (!p.hasApplicationName()) {
       return null;
@@ -215,7 +215,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public void setApplicationName(String applicationName) {
+  public synchronized void setApplicationName(String applicationName) {
     maybeInitBuilder();
     if (applicationName == null) {
       builder.clearApplicationName();
@@ -225,7 +225,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public String getQueue() {
+  public synchronized String getQueue() {
     ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
     if (!p.hasQueue()) {
       return null;
@@ -234,7 +234,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public String getApplicationType() {
+  public synchronized String getApplicationType() {
     ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
     if (!p.hasApplicationType()) {
       return null;
@@ -252,13 +252,13 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public Set<String> getApplicationTags() {
+  public synchronized Set<String> getApplicationTags() {
     initApplicationTags();
     return this.applicationTags;
   }
 
   @Override
-  public void setQueue(String queue) {
+  public synchronized void setQueue(String queue) {
     maybeInitBuilder();
     if (queue == null) {
       builder.clearQueue();
@@ -268,7 +268,7 @@ extends ApplicationSubmissionContext {
   }
   
   @Override
-  public void setApplicationType(String applicationType) {
+  public synchronized void setApplicationType(String applicationType) {
     maybeInitBuilder();
     if (applicationType == null) {
       builder.clearApplicationType();
@@ -296,7 +296,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public void setApplicationTags(Set<String> tags) {
+  public synchronized void setApplicationTags(Set<String> tags) {
     maybeInitBuilder();
     if (tags == null || tags.isEmpty()) {
       builder.clearApplicationTags();
@@ -312,7 +312,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public ContainerLaunchContext getAMContainerSpec() {
+  public synchronized ContainerLaunchContext getAMContainerSpec() {
     ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
     if (this.amContainer != null) {
       return amContainer;
@@ -325,7 +325,8 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public void setAMContainerSpec(ContainerLaunchContext amContainer) {
+  public synchronized void
+      setAMContainerSpec(ContainerLaunchContext amContainer) {
     maybeInitBuilder();
     if (amContainer == null) {
       builder.clearAmContainerSpec();
@@ -334,44 +335,44 @@ extends ApplicationSubmissionContext {
   }
   
   @Override
-  public boolean getUnmanagedAM() {
+  public synchronized boolean getUnmanagedAM() {
     ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
     return p.getUnmanagedAm();
   }
   
   @Override
-  public void setUnmanagedAM(boolean value) {
+  public synchronized void setUnmanagedAM(boolean value) {
     maybeInitBuilder();
     builder.setUnmanagedAm(value);
   }
   
   @Override
-  public boolean getCancelTokensWhenComplete() {
+  public synchronized boolean getCancelTokensWhenComplete() {
     ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
     //There is a default so cancelTokens should never be null
     return p.getCancelTokensWhenComplete();
   }
   
   @Override
-  public void setCancelTokensWhenComplete(boolean cancel) {
+  public synchronized void setCancelTokensWhenComplete(boolean cancel) {
     maybeInitBuilder();
     builder.setCancelTokensWhenComplete(cancel);
   }
 
   @Override
-  public int getMaxAppAttempts() {
+  public synchronized int getMaxAppAttempts() {
     ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
     return p.getMaxAppAttempts();
   }
 
   @Override
-  public void setMaxAppAttempts(int maxAppAttempts) {
+  public synchronized void setMaxAppAttempts(int maxAppAttempts) {
     maybeInitBuilder();
     builder.setMaxAppAttempts(maxAppAttempts);
   }
 
   @Override
-  public Resource getResource() {
+  public synchronized Resource getResource() {
     ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
     if (this.resource != null) {
       return this.resource;
@@ -384,7 +385,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public void setResource(Resource resource) {
+  public synchronized void setResource(Resource resource) {
     maybeInitBuilder();
     if (resource == null) {
       builder.clearResource();
@@ -393,7 +394,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public ReservationId getReservationID() {
+  public synchronized ReservationId getReservationID() {
     ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
     if (reservationId != null) {
       return reservationId;
@@ -406,7 +407,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public void setReservationID(ReservationId reservationID) {
+  public synchronized void setReservationID(ReservationId reservationID) {
     maybeInitBuilder();
     if (reservationID == null) {
       builder.clearReservationId();
@@ -416,14 +417,14 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public void
+  public synchronized void
       setKeepContainersAcrossApplicationAttempts(boolean keepContainers) {
     maybeInitBuilder();
     builder.setKeepContainersAcrossApplicationAttempts(keepContainers);
   }
 
   @Override
-  public boolean getKeepContainersAcrossApplicationAttempts() {
+  public synchronized boolean getKeepContainersAcrossApplicationAttempts() {
     ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
     return p.getKeepContainersAcrossApplicationAttempts();
   }
@@ -481,7 +482,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public String getNodeLabelExpression() {
+  public synchronized String getNodeLabelExpression() {
     ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
     if (!p.hasNodeLabelExpression()) {
       return null;
@@ -490,7 +491,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public void setNodeLabelExpression(String labelExpression) {
+  public synchronized void setNodeLabelExpression(String labelExpression) {
     maybeInitBuilder();
     if (labelExpression == null) {
       builder.clearNodeLabelExpression();
@@ -501,7 +502,7 @@ extends ApplicationSubmissionContext {
   
   @Override
   @Deprecated
-  public ResourceRequest getAMContainerResourceRequest() {
+  public synchronized ResourceRequest getAMContainerResourceRequest() {
     List<ResourceRequest> reqs = getAMContainerResourceRequests();
     if (reqs == null || reqs.isEmpty()) {
       return null;
@@ -510,7 +511,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public List<ResourceRequest> getAMContainerResourceRequests() {
+  public synchronized List<ResourceRequest> getAMContainerResourceRequests() {
     ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
     if (this.amResourceRequests != null) {
       return amResourceRequests;
@@ -525,7 +526,8 @@ extends ApplicationSubmissionContext {
 
   @Override
   @Deprecated
-  public void setAMContainerResourceRequest(ResourceRequest request) {
+  public synchronized void setAMContainerResourceRequest(
+      ResourceRequest request) {
     maybeInitBuilder();
     if (request == null) {
       builder.clearAmContainerResourceRequest();
@@ -534,7 +536,8 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public void setAMContainerResourceRequests(List<ResourceRequest> requests) {
+  public synchronized void setAMContainerResourceRequests(
+      List<ResourceRequest> requests) {
     maybeInitBuilder();
     if (requests == null) {
       builder.clearAmContainerResourceRequest();
@@ -543,13 +546,13 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public long getAttemptFailuresValidityInterval() {
+  public synchronized long getAttemptFailuresValidityInterval() {
     ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
     return p.getAttemptFailuresValidityInterval();
   }
 
   @Override
-  public void setAttemptFailuresValidityInterval(
+  public synchronized void setAttemptFailuresValidityInterval(
       long attemptFailuresValidityInterval) {
     maybeInitBuilder();
     builder.setAttemptFailuresValidityInterval(attemptFailuresValidityInterval);
@@ -566,7 +569,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public LogAggregationContext getLogAggregationContext() {
+  public synchronized LogAggregationContext getLogAggregationContext() {
     ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
     if (this.logAggregationContext != null) {
       return this.logAggregationContext;
@@ -579,7 +582,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public void setLogAggregationContext(
+  public synchronized void setLogAggregationContext(
       LogAggregationContext logAggregationContext) {
     maybeInitBuilder();
     if (logAggregationContext == null)
@@ -596,7 +599,8 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public Map<ApplicationTimeoutType, Long> getApplicationTimeouts() {
+  public synchronized
+      Map<ApplicationTimeoutType, Long> getApplicationTimeouts() {
     initApplicationTimeout();
     return this.applicationTimeouts;
   }
@@ -618,7 +622,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public void setApplicationTimeouts(
+  public synchronized void setApplicationTimeouts(
       Map<ApplicationTimeoutType, Long> appTimeouts) {
     if (appTimeouts == null) {
       return;
@@ -719,13 +723,14 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public Map<String, String> getApplicationSchedulingPropertiesMap() {
+  public synchronized
+      Map<String, String> getApplicationSchedulingPropertiesMap() {
     initApplicationSchedulingProperties();
     return this.schedulingProperties;
   }
 
   @Override
-  public void setApplicationSchedulingPropertiesMap(
+  public synchronized void setApplicationSchedulingPropertiesMap(
       Map<String, String> schedulingPropertyMap) {
     if (schedulingPropertyMap == null) {
       return;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: YARN-3660. [GPG] Federation Global Policy Generator (service hook only). (Contributed by Botong Huang via curino)

Posted by bo...@apache.org.
YARN-3660. [GPG] Federation Global Policy Generator (service hook only). (Contributed by Botong Huang via curino)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48a83794
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48a83794
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48a83794

Branch: refs/heads/YARN-7402
Commit: 48a83794a7f7b6edf815f09cce8cfc054aaaaa97
Parents: 7526815
Author: Carlo Curino <cu...@apache.org>
Authored: Thu Jan 18 17:21:06 2018 -0800
Committer: Botong Huang <bo...@apache.org>
Committed: Thu Aug 2 09:59:48 2018 -0700

----------------------------------------------------------------------
 hadoop-project/pom.xml                          |   6 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn        |   5 +
 hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd    |  55 +++++---
 .../hadoop-yarn/conf/yarn-env.sh                |  12 ++
 .../pom.xml                                     |  98 +++++++++++++
 .../globalpolicygenerator/GPGContext.java       |  31 +++++
 .../globalpolicygenerator/GPGContextImpl.java   |  41 ++++++
 .../GlobalPolicyGenerator.java                  | 136 +++++++++++++++++++
 .../globalpolicygenerator/package-info.java     |  19 +++
 .../TestGlobalPolicyGenerator.java              |  38 ++++++
 .../hadoop-yarn/hadoop-yarn-server/pom.xml      |   1 +
 hadoop-yarn-project/pom.xml                     |   4 +
 12 files changed, 424 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/48a83794/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 387a3da..ede6af4 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -446,6 +446,12 @@
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-yarn-server-globalpolicygenerator</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-yarn-services-core</artifactId>
         <version>${hadoop.version}</version>
       </dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48a83794/hadoop-yarn-project/hadoop-yarn/bin/yarn
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index 69afe6f..8061859 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -39,6 +39,7 @@ function hadoop_usage
   hadoop_add_subcommand "container" client "prints container(s) report"
   hadoop_add_subcommand "daemonlog" admin "get/set the log level for each daemon"
   hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
+  hadoop_add_subcommand "globalpolicygenerator" daemon "run the Global Policy Generator"
   hadoop_add_subcommand "jar <jar>" client "run a jar file"
   hadoop_add_subcommand "logs" client "dump container logs"
   hadoop_add_subcommand "node" admin "prints node report(s)"
@@ -103,6 +104,10 @@ ${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"
       echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
       exit 0
     ;;
+    globalpolicygenerator)
+      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+      HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.globalpolicygenerator.GlobalPolicyGenerator'
+    ;;
     jar)
       HADOOP_CLASSNAME=org.apache.hadoop.util.RunJar
     ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48a83794/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
index e1ac112..bebfd71 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
@@ -134,6 +134,10 @@ if "%1" == "--loglevel" (
     set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-router\target\classes
   )
 
+  if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-globalpolicygenerator\target\classes (
+    set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-globalpolicygenerator\target\classes
+  )
+
   if exist %HADOOP_YARN_HOME%\build\test\classes (
     set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\build\test\classes
   )
@@ -155,7 +159,7 @@ if "%1" == "--loglevel" (
 
   set yarncommands=resourcemanager nodemanager proxyserver rmadmin version jar ^
      application applicationattempt container node queue logs daemonlog historyserver ^
-     timelineserver timelinereader router classpath
+     timelineserver timelinereader router globalpolicygenerator classpath
   for %%i in ( %yarncommands% ) do (
     if %yarn-command% == %%i set yarncommand=true
   )
@@ -259,7 +263,13 @@ goto :eof
 :router
   set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\router-config\log4j.properties
   set CLASS=org.apache.hadoop.yarn.server.router.Router
-  set YARN_OPTS=%YARN_OPTS% %HADOOP_ROUTER_OPTS%
+  set YARN_OPTS=%YARN_OPTS% %YARN_ROUTER_OPTS%
+  goto :eof
+
+:globalpolicygenerator
+  set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\globalpolicygenerator-config\log4j.properties
+  set CLASS=org.apache.hadoop.yarn.server.globalpolicygenerator.GlobalPolicyGenerator
+  set YARN_OPTS=%YARN_OPTS% %YARN_GLOBALPOLICYGENERATOR_OPTS%
   goto :eof
 
 :nodemanager
@@ -336,27 +346,28 @@ goto :eof
 :print_usage
   @echo Usage: yarn [--config confdir] [--loglevel loglevel] COMMAND
   @echo        where COMMAND is one of:
-  @echo   resourcemanager      run the ResourceManager
-  @echo   nodemanager          run a nodemanager on each slave
-  @echo   router               run the Router daemon
-  @echo   timelineserver       run the timeline server
-  @echo   timelinereader       run the timeline reader server
-  @echo   rmadmin              admin tools
-  @echo   version              print the version
-  @echo   jar ^<jar^>          run a jar file
-  @echo   application          prints application(s) report/kill application
-  @echo   applicationattempt   prints applicationattempt(s) report
-  @echo   cluster              prints cluster information
-  @echo   container            prints container(s) report
-  @echo   node                 prints node report(s)
-  @echo   queue                prints queue information
-  @echo   logs                 dump container logs
-  @echo   schedulerconf        updates scheduler configuration
-  @echo   classpath            prints the class path needed to get the
-  @echo                        Hadoop jar and the required libraries
-  @echo   daemonlog            get/set the log level for each daemon
+  @echo   resourcemanager        run the ResourceManager
+  @echo   nodemanager            run a nodemanager on each slave
+  @echo   router                 run the Router daemon
+  @echo   globalpolicygenerator  run the Global Policy Generator
+  @echo   timelineserver         run the timeline server
+  @echo   timelinereader         run the timeline reader server
+  @echo   rmadmin                admin tools
+  @echo   version                print the version
+  @echo   jar ^<jar^>            run a jar file
+  @echo   application            prints application(s) report/kill application
+  @echo   applicationattempt     prints applicationattempt(s) report
+  @echo   cluster                prints cluster information
+  @echo   container              prints container(s) report
+  @echo   node                   prints node report(s)
+  @echo   queue                  prints queue information
+  @echo   logs                   dump container logs
+  @echo   schedulerconf          updates scheduler configuration
+  @echo   classpath              prints the class path needed to get the
+  @echo                          Hadoop jar and the required libraries
+  @echo   daemonlog              get/set the log level for each daemon
   @echo   or
-  @echo   CLASSNAME            run the class named CLASSNAME
+  @echo   CLASSNAME              run the class named CLASSNAME
   @echo Most commands print help when invoked w/o parameters.
 
 endlocal

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48a83794/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
index 76d1d6b..ae5af49 100644
--- a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
+++ b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
@@ -150,6 +150,18 @@
 #export YARN_ROUTER_OPTS=
 
 ###
+# Global Policy Generator specific parameters
+###
+
+# Specify the JVM options to be used when starting the GPG.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# See ResourceManager for some examples
+#
+#export YARN_GLOBALPOLICYGENERATOR_OPTS=
+
+###
 # Registry DNS specific parameters
 ###
 # For privileged registry DNS, user to run as after dropping privileges

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48a83794/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
new file mode 100644
index 0000000..9bbb936
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
@@ -0,0 +1,98 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <parent>
+    <artifactId>hadoop-yarn-server</artifactId>
+    <groupId>org.apache.hadoop</groupId>
+    <version>3.1.0-SNAPSHOT</version>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>org.apache.hadoop</groupId>
+  <artifactId>hadoop-yarn-server-globalpolicygenerator</artifactId>
+  <version>3.1.0-SNAPSHOT</version>
+  <name>hadoop-yarn-server-globalpolicygenerator</name>
+
+  <properties>
+    <!-- Needed for generating FindBugs warnings using parent pom -->
+    <yarn.basedir>${project.parent.parent.basedir}</yarn.basedir>
+  </properties>
+
+  <dependencies>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-common</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-api</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-common</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-common</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.hsqldb</groupId>
+      <artifactId>hsqldb</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+      </plugin>
+    </plugins>
+  </build>
+</project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48a83794/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContext.java
new file mode 100644
index 0000000..da8a383
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContext.java
@@ -0,0 +1,31 @@
+/**
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator;
+
+import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+
+/**
+ * Context for Global Policy Generator.
+ */
+public interface GPGContext {
+
+  FederationStateStoreFacade getStateStoreFacade();
+
+  void setStateStoreFacade(FederationStateStoreFacade facade);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48a83794/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContextImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContextImpl.java
new file mode 100644
index 0000000..3884ace
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContextImpl.java
@@ -0,0 +1,41 @@
+/**
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator;
+
+import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+
+/**
+ * Context implementation for Global Policy Generator.
+ */
+public class GPGContextImpl implements GPGContext {
+
+  private FederationStateStoreFacade facade;
+
+  @Override
+  public FederationStateStoreFacade getStateStoreFacade() {
+    return facade;
+  }
+
+  @Override
+  public void setStateStoreFacade(
+      FederationStateStoreFacade federationStateStoreFacade) {
+    this.facade = federationStateStoreFacade;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48a83794/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
new file mode 100644
index 0000000..c1f7460
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.service.CompositeService;
+import org.apache.hadoop.util.ShutdownHookManager;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Global Policy Generator (GPG) is a Yarn Federation component. By tuning the
+ * Federation policies in Federation State Store, GPG overlooks the entire
+ * federated cluster and ensures that the system is tuned and balanced all the
+ * time.
+ *
+ * The GPG operates continuously but out-of-band from all cluster operations,
+ * that allows to enforce global invariants, affect load balancing, trigger
+ * draining of sub-clusters that will undergo maintenance, etc.
+ */
+public class GlobalPolicyGenerator extends CompositeService {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(GlobalPolicyGenerator.class);
+
+  // YARN Variables
+  private static CompositeServiceShutdownHook gpgShutdownHook;
+  public static final int SHUTDOWN_HOOK_PRIORITY = 30;
+  private AtomicBoolean isStopping = new AtomicBoolean(false);
+  private static final String METRICS_NAME = "Global Policy Generator";
+
+  // Federation Variables
+  private GPGContext gpgContext;
+
+  public GlobalPolicyGenerator() {
+    super(GlobalPolicyGenerator.class.getName());
+    this.gpgContext = new GPGContextImpl();
+  }
+
+  protected void initAndStart(Configuration conf, boolean hasToReboot) {
+    try {
+      // Remove the old hook if we are rebooting.
+      if (hasToReboot && null != gpgShutdownHook) {
+        ShutdownHookManager.get().removeShutdownHook(gpgShutdownHook);
+      }
+
+      gpgShutdownHook = new CompositeServiceShutdownHook(this);
+      ShutdownHookManager.get().addShutdownHook(gpgShutdownHook,
+          SHUTDOWN_HOOK_PRIORITY);
+
+      this.init(conf);
+      this.start();
+    } catch (Throwable t) {
+      LOG.error("Error starting globalpolicygenerator", t);
+      System.exit(-1);
+    }
+  }
+
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    // Set up the context
+    this.gpgContext
+        .setStateStoreFacade(FederationStateStoreFacade.getInstance());
+
+    DefaultMetricsSystem.initialize(METRICS_NAME);
+
+    // super.serviceInit after all services are added
+    super.serviceInit(conf);
+  }
+
+  @Override
+  protected void serviceStart() throws Exception {
+    super.serviceStart();
+  }
+
+  @Override
+  protected void serviceStop() throws Exception {
+    if (this.isStopping.getAndSet(true)) {
+      return;
+    }
+    DefaultMetricsSystem.shutdown();
+    super.serviceStop();
+  }
+
+  public String getName() {
+    return "FederationGlobalPolicyGenerator";
+  }
+
+  public GPGContext getGPGContext() {
+    return this.gpgContext;
+  }
+
+  @SuppressWarnings("resource")
+  public static void startGPG(String[] argv, Configuration conf) {
+    boolean federationEnabled =
+        conf.getBoolean(YarnConfiguration.FEDERATION_ENABLED,
+            YarnConfiguration.DEFAULT_FEDERATION_ENABLED);
+    if (federationEnabled) {
+      Thread.setDefaultUncaughtExceptionHandler(
+          new YarnUncaughtExceptionHandler());
+      StringUtils.startupShutdownMessage(GlobalPolicyGenerator.class, argv,
+          LOG);
+      GlobalPolicyGenerator globalPolicyGenerator = new GlobalPolicyGenerator();
+      globalPolicyGenerator.initAndStart(conf, false);
+    } else {
+      LOG.warn("Federation is not enabled. The gpg cannot start.");
+    }
+  }
+
+  public static void main(String[] argv) {
+    startGPG(argv, new YarnConfiguration());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48a83794/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/package-info.java
new file mode 100644
index 0000000..abaa57c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/package-info.java
@@ -0,0 +1,19 @@
+/**
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48a83794/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/TestGlobalPolicyGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/TestGlobalPolicyGenerator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/TestGlobalPolicyGenerator.java
new file mode 100644
index 0000000..f657b86
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/TestGlobalPolicyGenerator.java
@@ -0,0 +1,38 @@
+/**
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.Test;
+
+/**
+ * Unit test for GlobalPolicyGenerator.
+ */
+public class TestGlobalPolicyGenerator {
+
+  @Test(timeout = 1000)
+  public void testNonFederation() {
+    Configuration conf = new YarnConfiguration();
+    conf.setBoolean(YarnConfiguration.FEDERATION_ENABLED, false);
+
+    // If GPG starts running, this call will not return
+    GlobalPolicyGenerator.startGPG(new String[0], conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48a83794/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
index de4484c..226407b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
@@ -46,5 +46,6 @@
     <module>hadoop-yarn-server-timelineservice-hbase</module>
     <module>hadoop-yarn-server-timelineservice-hbase-tests</module>
     <module>hadoop-yarn-server-router</module>
+    <module>hadoop-yarn-server-globalpolicygenerator</module>
   </modules>
 </project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48a83794/hadoop-yarn-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml
index 4593441..311b26e 100644
--- a/hadoop-yarn-project/pom.xml
+++ b/hadoop-yarn-project/pom.xml
@@ -80,6 +80,10 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-globalpolicygenerator</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-yarn-services-core</artifactId>
     </dependency>
   </dependencies>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: YARN-8558. NM recovery level db not cleaned up properly on container finish. Contributed by Bibin A Chundatt.

Posted by bo...@apache.org.
YARN-8558. NM recovery level db not cleaned up properly on container finish. Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d586841
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d586841
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d586841

Branch: refs/heads/YARN-7402
Commit: 3d586841aba99c7df98b2b4d3e48ec0144bad086
Parents: 59adeb8
Author: bibinchundatt <bi...@apache.org>
Authored: Sat Jul 28 20:52:39 2018 +0530
Committer: bibinchundatt <bi...@apache.org>
Committed: Sat Jul 28 20:52:39 2018 +0530

----------------------------------------------------------------------
 .../recovery/NMLeveldbStateStoreService.java          | 14 ++++++++++----
 .../recovery/TestNMLeveldbStateStoreService.java      |  7 +++++++
 2 files changed, 17 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d586841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
index 44f5e18..67f642d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
@@ -143,9 +143,9 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
       NM_TOKENS_KEY_PREFIX + PREV_MASTER_KEY_SUFFIX;
   private static final String CONTAINER_TOKENS_KEY_PREFIX =
       "ContainerTokens/";
-  private static final String CONTAINER_TOKENS_CURRENT_MASTER_KEY =
+  private static final String CONTAINER_TOKEN_SECRETMANAGER_CURRENT_MASTER_KEY =
       CONTAINER_TOKENS_KEY_PREFIX + CURRENT_MASTER_KEY_SUFFIX;
-  private static final String CONTAINER_TOKENS_PREV_MASTER_KEY =
+  private static final String CONTAINER_TOKEN_SECRETMANAGER_PREV_MASTER_KEY =
       CONTAINER_TOKENS_KEY_PREFIX + PREV_MASTER_KEY_SUFFIX;
 
   private static final String LOG_DELETER_KEY_PREFIX = "LogDeleters/";
@@ -658,6 +658,12 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
         batch.delete(bytes(keyPrefix + CONTAINER_KILLED_KEY_SUFFIX));
         batch.delete(bytes(keyPrefix + CONTAINER_EXIT_CODE_KEY_SUFFIX));
         batch.delete(bytes(keyPrefix + CONTAINER_UPDATE_TOKEN_SUFFIX));
+        batch.delete(bytes(keyPrefix + CONTAINER_START_TIME_KEY_SUFFIX));
+        batch.delete(bytes(keyPrefix + CONTAINER_LOG_DIR_KEY_SUFFIX));
+        batch.delete(bytes(keyPrefix + CONTAINER_VERSION_KEY_SUFFIX));
+        batch.delete(bytes(keyPrefix + CONTAINER_REMAIN_RETRIES_KEY_SUFFIX));
+        batch.delete(bytes(keyPrefix + CONTAINER_RESTART_TIMES_SUFFIX));
+        batch.delete(bytes(keyPrefix + CONTAINER_WORK_DIR_KEY_SUFFIX));
         List<String> unknownKeysForContainer = containerUnknownKeySuffixes
             .removeAll(containerId);
         for (String unknownKeySuffix : unknownKeysForContainer) {
@@ -1169,13 +1175,13 @@ public class NMLeveldbStateStoreService extends NMStateStoreService {
   @Override
   public void storeContainerTokenCurrentMasterKey(MasterKey key)
       throws IOException {
-    storeMasterKey(CONTAINER_TOKENS_CURRENT_MASTER_KEY, key);
+    storeMasterKey(CONTAINER_TOKEN_SECRETMANAGER_CURRENT_MASTER_KEY, key);
   }
 
   @Override
   public void storeContainerTokenPreviousMasterKey(MasterKey key)
       throws IOException {
-    storeMasterKey(CONTAINER_TOKENS_PREV_MASTER_KEY, key);
+    storeMasterKey(CONTAINER_TOKEN_SECRETMANAGER_PREV_MASTER_KEY, key);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d586841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
index c8c07d1..8a8cfa2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
@@ -28,7 +28,9 @@ import static org.junit.Assert.assertTrue;
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.isNull;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.timeout;
+import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
@@ -382,6 +384,11 @@ public class TestNMLeveldbStateStoreService {
     restartStateStore();
     recoveredContainers = stateStore.loadContainersState();
     assertTrue(recoveredContainers.isEmpty());
+    // recover again to check remove clears all containers
+    restartStateStore();
+    NMStateStoreService nmStoreSpy = spy(stateStore);
+    nmStoreSpy.loadContainersState();
+    verify(nmStoreSpy,times(0)).removeContainer(any(ContainerId.class));
   }
 
   private void validateRetryAttempts(ContainerId containerId)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: Updating GPG module pom version post rebase.

Posted by bo...@apache.org.
Updating GPG module pom version post rebase.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3213acd0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3213acd0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3213acd0

Branch: refs/heads/YARN-7402
Commit: 3213acd04980520f37dd1fa47c18c4fd7a3ca339
Parents: f83fc85
Author: Subru Krishnan <su...@apache.org>
Authored: Wed May 30 12:59:22 2018 -0700
Committer: Botong Huang <bo...@apache.org>
Committed: Thu Aug 2 09:59:48 2018 -0700

----------------------------------------------------------------------
 .../hadoop-yarn-server-globalpolicygenerator/pom.xml             | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3213acd0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
index 9398b0b..c137c9e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
@@ -19,12 +19,12 @@
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-yarn-server-globalpolicygenerator</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>hadoop-yarn-server-globalpolicygenerator</name>
 
   <properties>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: YARN-8610. Fixed initiate upgrade error message. Contributed by Chandni Singh

Posted by bo...@apache.org.
YARN-8610.  Fixed initiate upgrade error message.
            Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23f39424
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23f39424
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23f39424

Branch: refs/heads/YARN-7402
Commit: 23f394240e1568a38025e63e9dc0842e8c5235f7
Parents: f2e29ac
Author: Eric Yang <ey...@apache.org>
Authored: Wed Aug 1 20:41:43 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Wed Aug 1 20:41:43 2018 -0400

----------------------------------------------------------------------
 .../java/org/apache/hadoop/yarn/service/client/ServiceClient.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23f39424/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 4b67998..5668d9f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -257,7 +257,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
     if (!liveService.getState().equals(ServiceState.STABLE)) {
       String message = service.getName() + " is at " +
           liveService.getState()
-          + " state, upgrade can not be invoked when service is STABLE.";
+          + " state and upgrade can only be initiated when service is STABLE.";
       LOG.error(message);
       throw new YarnException(message);
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: YARN-8517. getContainer and getContainers ResourceManager REST API methods are not documented (snemeth via rkanter)

Posted by bo...@apache.org.
YARN-8517. getContainer and getContainers ResourceManager REST API methods are not documented (snemeth via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2cccf406
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2cccf406
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2cccf406

Branch: refs/heads/YARN-7402
Commit: 2cccf4061cc4021c48e29879700dbc94f832b7d1
Parents: fecbac4
Author: Robert Kanter <rk...@apache.org>
Authored: Fri Jul 27 14:35:03 2018 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Fri Jul 27 14:35:03 2018 -0700

----------------------------------------------------------------------
 .../InvalidResourceRequestException.java        |  36 ++
 .../resourcemanager/DefaultAMSProcessor.java    |  23 +-
 .../scheduler/SchedulerUtils.java               |  55 +-
 .../scheduler/TestSchedulerUtils.java           | 630 ++++++++++---------
 4 files changed, 430 insertions(+), 314 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cccf406/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java
index f4fd2fa..1ea9eef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java
@@ -30,19 +30,55 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
  * 
  */
 public class InvalidResourceRequestException extends YarnException {
+  public static final String LESS_THAN_ZERO_RESOURCE_MESSAGE_TEMPLATE =
+          "Invalid resource request! Cannot allocate containers as "
+                  + "requested resource is less than 0! "
+                  + "Requested resource type=[%s], " + "Requested resource=%s";
+
+  public static final String GREATER_THAN_MAX_RESOURCE_MESSAGE_TEMPLATE =
+          "Invalid resource request! Cannot allocate containers as "
+                  + "requested resource is greater than " +
+                  "maximum allowed allocation. "
+                  + "Requested resource type=[%s], "
+                  + "Requested resource=%s, maximum allowed allocation=%s, "
+                  + "please note that maximum allowed allocation is calculated "
+                  + "by scheduler based on maximum resource of registered "
+                  + "NodeManagers, which might be less than configured "
+                  + "maximum allocation=%s";
+
+  public static final String UNKNOWN_REASON_MESSAGE_TEMPLATE =
+          "Invalid resource request! "
+                  + "Cannot allocate containers for an unknown reason! "
+                  + "Requested resource type=[%s], Requested resource=%s";
+
+  public enum InvalidResourceType {
+    LESS_THAN_ZERO, GREATER_THEN_MAX_ALLOCATION, UNKNOWN;
+  }
 
   private static final long serialVersionUID = 13498237L;
+  private final InvalidResourceType invalidResourceType;
 
   public InvalidResourceRequestException(Throwable cause) {
     super(cause);
+    this.invalidResourceType = InvalidResourceType.UNKNOWN;
   }
 
   public InvalidResourceRequestException(String message) {
+    this(message, InvalidResourceType.UNKNOWN);
+  }
+
+  public InvalidResourceRequestException(String message,
+      InvalidResourceType invalidResourceType) {
     super(message);
+    this.invalidResourceType = invalidResourceType;
   }
 
   public InvalidResourceRequestException(String message, Throwable cause) {
     super(message, cause);
+    this.invalidResourceType = InvalidResourceType.UNKNOWN;
   }
 
+  public InvalidResourceType getInvalidResourceType() {
+    return invalidResourceType;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cccf406/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
index 71558a7..43f73e4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
@@ -53,6 +53,8 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceBlacklistRequestException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
+import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException
+        .InvalidResourceType;
 import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResoureRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -89,6 +91,12 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 
+import static org.apache.hadoop.yarn.exceptions
+        .InvalidResourceRequestException.InvalidResourceType
+        .GREATER_THEN_MAX_ALLOCATION;
+import static org.apache.hadoop.yarn.exceptions
+        .InvalidResourceRequestException.InvalidResourceType.LESS_THAN_ZERO;
+
 /**
  * This is the default Application Master Service processor. It has be the
  * last processor in the @{@link AMSProcessingChain}.
@@ -231,8 +239,8 @@ final class DefaultAMSProcessor implements ApplicationMasterServiceProcessor {
           maximumCapacity, app.getQueue(),
           getScheduler(), getRmContext());
     } catch (InvalidResourceRequestException e) {
-      LOG.warn("Invalid resource ask by application " + appAttemptId, e);
-      throw e;
+      RMAppAttempt rmAppAttempt = app.getRMAppAttempt(appAttemptId);
+      handleInvalidResourceException(e, rmAppAttempt);
     }
 
     try {
@@ -336,6 +344,17 @@ final class DefaultAMSProcessor implements ApplicationMasterServiceProcessor {
         allocation.getPreviousAttemptContainers());
   }
 
+  private void handleInvalidResourceException(InvalidResourceRequestException e,
+          RMAppAttempt rmAppAttempt) throws InvalidResourceRequestException {
+    if (e.getInvalidResourceType() == LESS_THAN_ZERO ||
+            e.getInvalidResourceType() == GREATER_THEN_MAX_ALLOCATION) {
+      rmAppAttempt.updateAMLaunchDiagnostics(e.getMessage());
+    }
+    LOG.warn("Invalid resource ask by application " +
+            rmAppAttempt.getAppAttemptId(), e);
+    throw e;
+  }
+
   private void handleNodeUpdates(RMApp app, AllocateResponse allocateResponse) {
     Map<RMNode, NodeUpdateType> updatedNodes = new HashMap<>();
     if(app.pullRMNodeUpdates(updatedNodes) > 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cccf406/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
index 844057e..9b07d37 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
@@ -45,6 +45,8 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.InvalidLabelResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
+import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException
+        .InvalidResourceType;
 import org.apache.hadoop.yarn.exceptions
         .SchedulerInvalidResoureRequestException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -61,6 +63,15 @@ import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
+import static org.apache.hadoop.yarn.exceptions
+        .InvalidResourceRequestException
+        .GREATER_THAN_MAX_RESOURCE_MESSAGE_TEMPLATE;
+import static org.apache.hadoop.yarn.exceptions
+        .InvalidResourceRequestException
+        .LESS_THAN_ZERO_RESOURCE_MESSAGE_TEMPLATE;
+import static org.apache.hadoop.yarn.exceptions
+        .InvalidResourceRequestException.UNKNOWN_REASON_MESSAGE_TEMPLATE;
+
 /**
  * Utilities shared by schedulers. 
  */
@@ -257,9 +268,9 @@ public class SchedulerUtils {
   }
 
 
-  public static void normalizeAndValidateRequest(ResourceRequest resReq,
-      Resource maximumResource, String queueName, YarnScheduler scheduler,
-      boolean isRecovery, RMContext rmContext, QueueInfo queueInfo)
+  private static void normalizeAndValidateRequest(ResourceRequest resReq,
+          Resource maximumResource, String queueName, YarnScheduler scheduler,
+          boolean isRecovery, RMContext rmContext, QueueInfo queueInfo)
       throws InvalidResourceRequestException {
     Configuration conf = rmContext.getYarnConfiguration();
     // If Node label is not enabled throw exception
@@ -384,13 +395,13 @@ public class SchedulerUtils {
 
       if (requestedRI.getValue() < 0) {
         throwInvalidResourceException(reqResource, availableResource,
-            reqResourceName);
+            reqResourceName, InvalidResourceType.LESS_THAN_ZERO);
       }
 
       boolean valid = checkResource(requestedRI, availableResource);
       if (!valid) {
         throwInvalidResourceException(reqResource, availableResource,
-            reqResourceName);
+            reqResourceName, InvalidResourceType.GREATER_THEN_MAX_ALLOCATION);
       }
     }
   }
@@ -470,18 +481,30 @@ public class SchedulerUtils {
   }
 
   private static void throwInvalidResourceException(Resource reqResource,
-      Resource availableResource, String reqResourceName)
+          Resource maxAllowedAllocation, String reqResourceName,
+          InvalidResourceType invalidResourceType)
       throws InvalidResourceRequestException {
-    throw new InvalidResourceRequestException(
-        "Invalid resource request, requested resource type=[" + reqResourceName
-            + "] < 0 or greater than maximum allowed allocation. Requested "
-            + "resource=" + reqResource + ", maximum allowed allocation="
-            + availableResource
-            + ", please note that maximum allowed allocation is calculated "
-            + "by scheduler based on maximum resource of registered "
-            + "NodeManagers, which might be less than configured "
-            + "maximum allocation="
-            + ResourceUtils.getResourceTypesMaximumAllocation());
+    final String message;
+
+    if (invalidResourceType == InvalidResourceType.LESS_THAN_ZERO) {
+      message = String.format(LESS_THAN_ZERO_RESOURCE_MESSAGE_TEMPLATE,
+          reqResourceName, reqResource);
+    } else if (invalidResourceType ==
+            InvalidResourceType.GREATER_THEN_MAX_ALLOCATION) {
+      message = String.format(GREATER_THAN_MAX_RESOURCE_MESSAGE_TEMPLATE,
+          reqResourceName, reqResource, maxAllowedAllocation,
+          ResourceUtils.getResourceTypesMaximumAllocation());
+    } else if (invalidResourceType == InvalidResourceType.UNKNOWN) {
+      message = String.format(UNKNOWN_REASON_MESSAGE_TEMPLATE, reqResourceName,
+          reqResource);
+    } else {
+      throw new IllegalArgumentException(String.format(
+          "InvalidResourceType argument should be either " + "%s, %s or %s",
+          InvalidResourceType.LESS_THAN_ZERO,
+          InvalidResourceType.GREATER_THEN_MAX_ALLOCATION,
+          InvalidResourceType.UNKNOWN));
+    }
+    throw new InvalidResourceRequestException(message, invalidResourceType);
   }
 
   private static void checkQueueLabelInLabelManager(String labelExpression,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cccf406/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
index 15cfdb0..2ec2de2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
@@ -18,6 +18,11 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
+import static org.apache.hadoop.yarn.exceptions
+        .InvalidResourceRequestException.InvalidResourceType
+        .GREATER_THEN_MAX_ALLOCATION;
+import static org.apache.hadoop.yarn.exceptions
+        .InvalidResourceRequestException.InvalidResourceType.LESS_THAN_ZERO;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -67,6 +72,8 @@ import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.InvalidLabelResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceBlacklistRequestException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
+import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException
+        .InvalidResourceType;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
@@ -145,7 +152,7 @@ public class TestSchedulerUtils {
   private void initResourceTypes() {
     Configuration yarnConf = new Configuration();
     yarnConf.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
-        CustomResourceTypesConfigurationProvider.class.getName());
+            CustomResourceTypesConfigurationProvider.class.getName());
     ResourceUtils.resetResourceTypes(yarnConf);
   }
 
@@ -162,51 +169,51 @@ public class TestSchedulerUtils {
                     .build());
   }
 
-  @Test (timeout = 30000)
+  @Test(timeout = 30000)
   public void testNormalizeRequest() {
     ResourceCalculator resourceCalculator = new DefaultResourceCalculator();
-    
+
     final int minMemory = 1024;
     final int maxMemory = 8192;
     Resource minResource = Resources.createResource(minMemory, 0);
     Resource maxResource = Resources.createResource(maxMemory, 0);
-    
+
     ResourceRequest ask = new ResourceRequestPBImpl();
 
     // case negative memory
     ask.setCapability(Resources.createResource(-1024));
     SchedulerUtils.normalizeRequest(ask, resourceCalculator, minResource,
-        maxResource);
+            maxResource);
     assertEquals(minMemory, ask.getCapability().getMemorySize());
 
     // case zero memory
     ask.setCapability(Resources.createResource(0));
     SchedulerUtils.normalizeRequest(ask, resourceCalculator, minResource,
-        maxResource);
+            maxResource);
     assertEquals(minMemory, ask.getCapability().getMemorySize());
 
     // case memory is a multiple of minMemory
     ask.setCapability(Resources.createResource(2 * minMemory));
     SchedulerUtils.normalizeRequest(ask, resourceCalculator, minResource,
-        maxResource);
+            maxResource);
     assertEquals(2 * minMemory, ask.getCapability().getMemorySize());
 
     // case memory is not a multiple of minMemory
     ask.setCapability(Resources.createResource(minMemory + 10));
     SchedulerUtils.normalizeRequest(ask, resourceCalculator, minResource,
-        maxResource);
+            maxResource);
     assertEquals(2 * minMemory, ask.getCapability().getMemorySize());
 
     // case memory is equal to max allowed
     ask.setCapability(Resources.createResource(maxMemory));
     SchedulerUtils.normalizeRequest(ask, resourceCalculator, minResource,
-        maxResource);
+            maxResource);
     assertEquals(maxMemory, ask.getCapability().getMemorySize());
 
     // case memory is just less than max
     ask.setCapability(Resources.createResource(maxMemory - 10));
     SchedulerUtils.normalizeRequest(ask, resourceCalculator, minResource,
-        maxResource);
+            maxResource);
     assertEquals(maxMemory, ask.getCapability().getMemorySize());
 
     // max is not a multiple of min
@@ -214,39 +221,39 @@ public class TestSchedulerUtils {
     ask.setCapability(Resources.createResource(maxMemory - 100));
     // multiple of minMemory > maxMemory, then reduce to maxMemory
     SchedulerUtils.normalizeRequest(ask, resourceCalculator, minResource,
-        maxResource);
+            maxResource);
     assertEquals(maxResource.getMemorySize(),
-        ask.getCapability().getMemorySize());
+            ask.getCapability().getMemorySize());
 
     // ask is more than max
     maxResource = Resources.createResource(maxMemory, 0);
     ask.setCapability(Resources.createResource(maxMemory + 100));
     SchedulerUtils.normalizeRequest(ask, resourceCalculator, minResource,
-        maxResource);
+            maxResource);
     assertEquals(maxResource.getMemorySize(),
-        ask.getCapability().getMemorySize());
+            ask.getCapability().getMemorySize());
   }
 
-  @Test (timeout = 30000)
+  @Test(timeout = 30000)
   public void testNormalizeRequestWithDominantResourceCalculator() {
     ResourceCalculator resourceCalculator = new DominantResourceCalculator();
-    
+
     Resource minResource = Resources.createResource(1024, 1);
     Resource maxResource = Resources.createResource(10240, 10);
     Resource clusterResource = Resources.createResource(10 * 1024, 10);
-    
+
     ResourceRequest ask = new ResourceRequestPBImpl();
 
     // case negative memory/vcores
     ask.setCapability(Resources.createResource(-1024, -1));
     SchedulerUtils.normalizeRequest(
-        ask, resourceCalculator, minResource, maxResource);
+            ask, resourceCalculator, minResource, maxResource);
     assertEquals(minResource, ask.getCapability());
 
     // case zero memory/vcores
     ask.setCapability(Resources.createResource(0, 0));
     SchedulerUtils.normalizeRequest(
-        ask, resourceCalculator, minResource, maxResource);
+            ask, resourceCalculator, minResource, maxResource);
     assertEquals(minResource, ask.getCapability());
     assertEquals(1, ask.getCapability().getVirtualCores());
     assertEquals(1024, ask.getCapability().getMemorySize());
@@ -254,28 +261,28 @@ public class TestSchedulerUtils {
     // case non-zero memory & zero cores
     ask.setCapability(Resources.createResource(1536, 0));
     SchedulerUtils.normalizeRequest(
-        ask, resourceCalculator, minResource, maxResource);
+            ask, resourceCalculator, minResource, maxResource);
     assertEquals(Resources.createResource(2048, 1), ask.getCapability());
     assertEquals(1, ask.getCapability().getVirtualCores());
     assertEquals(2048, ask.getCapability().getMemorySize());
   }
-  
+
   @Test(timeout = 30000)
   public void testValidateResourceRequestWithErrorLabelsPermission()
-      throws IOException {
+          throws IOException {
     // mock queue and scheduler
     YarnScheduler scheduler = mock(YarnScheduler.class);
     Set<String> queueAccessibleNodeLabels = Sets.newHashSet();
     QueueInfo queueInfo = mock(QueueInfo.class);
     when(queueInfo.getQueueName()).thenReturn("queue");
     when(queueInfo.getAccessibleNodeLabels())
-        .thenReturn(queueAccessibleNodeLabels);
+            .thenReturn(queueAccessibleNodeLabels);
     when(scheduler.getQueueInfo(any(String.class), anyBoolean(), anyBoolean()))
-        .thenReturn(queueInfo);
+            .thenReturn(queueInfo);
 
     Resource maxResource = Resources.createResource(
-        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
-        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
+            YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
+            YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
 
     // queue has labels, success cases
     try {
@@ -283,36 +290,36 @@ public class TestSchedulerUtils {
       queueAccessibleNodeLabels.clear();
       queueAccessibleNodeLabels.addAll(Arrays.asList("x", "y"));
       rmContext.getNodeLabelManager().addToCluserNodeLabels(
-          ImmutableSet.of(NodeLabel.newInstance("x"),
-              NodeLabel.newInstance("y")));
+              ImmutableSet.of(NodeLabel.newInstance("x"),
+                      NodeLabel.newInstance("y")));
       Resource resource = Resources.createResource(
-          0,
-          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
+              0,
+              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
       ResourceRequest resReq = BuilderUtils.newResourceRequest(
-          mock(Priority.class), ResourceRequest.ANY, resource, 1);
+              mock(Priority.class), ResourceRequest.ANY, resource, 1);
       resReq.setNodeLabelExpression("x");
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-          scheduler, rmContext);
+              scheduler, rmContext);
 
       resReq.setNodeLabelExpression("y");
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-          scheduler, rmContext);
-      
+              scheduler, rmContext);
+
       resReq.setNodeLabelExpression("");
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-          scheduler, rmContext);
-      
+              scheduler, rmContext);
+
       resReq.setNodeLabelExpression(" ");
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-          scheduler, rmContext);
+              scheduler, rmContext);
     } catch (InvalidResourceRequestException e) {
       e.printStackTrace();
       fail("Should be valid when request labels is a subset of queue labels");
     } finally {
       rmContext.getNodeLabelManager().removeFromClusterNodeLabels(
-          Arrays.asList("x", "y"));
+              Arrays.asList("x", "y"));
     }
-    
+
     // same as above, but cluster node labels don't contains label being
     // requested. should fail
     try {
@@ -320,42 +327,42 @@ public class TestSchedulerUtils {
       queueAccessibleNodeLabels.clear();
       queueAccessibleNodeLabels.addAll(Arrays.asList("x", "y"));
       Resource resource = Resources.createResource(
-          0,
-          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
+              0,
+              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
       ResourceRequest resReq = BuilderUtils.newResourceRequest(
-          mock(Priority.class), ResourceRequest.ANY, resource, 1);
+              mock(Priority.class), ResourceRequest.ANY, resource, 1);
       resReq.setNodeLabelExpression("x");
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-          scheduler, rmContext);
-      
+              scheduler, rmContext);
+
       fail("Should fail");
     } catch (InvalidResourceRequestException e) {
     }
-    
+
     // queue has labels, failed cases (when ask a label not included by queue)
     try {
       // set queue accessible node labesl to [x, y]
       queueAccessibleNodeLabels.clear();
       queueAccessibleNodeLabels.addAll(Arrays.asList("x", "y"));
       rmContext.getNodeLabelManager().addToCluserNodeLabels(
-          ImmutableSet.of(NodeLabel.newInstance("x"),
-              NodeLabel.newInstance("y")));
-      
+              ImmutableSet.of(NodeLabel.newInstance("x"),
+                      NodeLabel.newInstance("y")));
+
       Resource resource = Resources.createResource(
-          0,
-          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
+              0,
+              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
       ResourceRequest resReq = BuilderUtils.newResourceRequest(
-          mock(Priority.class), ResourceRequest.ANY, resource, 1);
+              mock(Priority.class), ResourceRequest.ANY, resource, 1);
       resReq.setNodeLabelExpression("z");
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-          scheduler, rmContext);
+              scheduler, rmContext);
       fail("Should fail");
     } catch (InvalidResourceRequestException e) {
     } finally {
       rmContext.getNodeLabelManager().removeFromClusterNodeLabels(
-          Arrays.asList("x", "y"));
+              Arrays.asList("x", "y"));
     }
-    
+
     // we don't allow specify more than two node labels in a single expression
     // now
     try {
@@ -363,225 +370,225 @@ public class TestSchedulerUtils {
       queueAccessibleNodeLabels.clear();
       queueAccessibleNodeLabels.addAll(Arrays.asList("x", "y"));
       rmContext.getNodeLabelManager().addToCluserNodeLabels(
-          ImmutableSet.of(NodeLabel.newInstance("x"),
-              NodeLabel.newInstance("y")));
-      
+              ImmutableSet.of(NodeLabel.newInstance("x"),
+                      NodeLabel.newInstance("y")));
+
       Resource resource = Resources.createResource(
-          0,
-          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
+              0,
+              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
       ResourceRequest resReq = BuilderUtils.newResourceRequest(
-          mock(Priority.class), ResourceRequest.ANY, resource, 1);
+              mock(Priority.class), ResourceRequest.ANY, resource, 1);
       resReq.setNodeLabelExpression("x && y");
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-          scheduler, rmContext);
+              scheduler, rmContext);
       fail("Should fail");
     } catch (InvalidResourceRequestException e) {
     } finally {
       rmContext.getNodeLabelManager().removeFromClusterNodeLabels(
-          Arrays.asList("x", "y"));
+              Arrays.asList("x", "y"));
     }
-    
+
     // queue doesn't have label, succeed (when request no label)
     queueAccessibleNodeLabels.clear();
     try {
       // set queue accessible node labels to empty
       queueAccessibleNodeLabels.clear();
-      
+
       Resource resource = Resources.createResource(
-          0,
-          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
+              0,
+              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
       ResourceRequest resReq = BuilderUtils.newResourceRequest(
-          mock(Priority.class), ResourceRequest.ANY, resource, 1);
+              mock(Priority.class), ResourceRequest.ANY, resource, 1);
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-          scheduler, rmContext);
-      
+              scheduler, rmContext);
+
       resReq.setNodeLabelExpression("");
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-          scheduler, rmContext);
-      
+              scheduler, rmContext);
+
       resReq.setNodeLabelExpression("  ");
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-          scheduler, rmContext);
+              scheduler, rmContext);
     } catch (InvalidResourceRequestException e) {
       e.printStackTrace();
       fail("Should be valid when request labels is empty");
     }
-    boolean invalidlabelexception=false;
+    boolean invalidlabelexception = false;
     // queue doesn't have label, failed (when request any label)
     try {
       // set queue accessible node labels to empty
       queueAccessibleNodeLabels.clear();
-      
+
       rmContext.getNodeLabelManager().addToCluserNodeLabels(
-          ImmutableSet.of(NodeLabel.newInstance("x")));
-      
+              ImmutableSet.of(NodeLabel.newInstance("x")));
+
       Resource resource = Resources.createResource(
-          0,
-          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
+              0,
+              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
       ResourceRequest resReq = BuilderUtils.newResourceRequest(
-          mock(Priority.class), ResourceRequest.ANY, resource, 1);
+              mock(Priority.class), ResourceRequest.ANY, resource, 1);
       resReq.setNodeLabelExpression("x");
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-          scheduler, rmContext);
+              scheduler, rmContext);
       fail("Should fail");
     } catch (InvalidLabelResourceRequestException e) {
-      invalidlabelexception=true;
+      invalidlabelexception = true;
     } catch (InvalidResourceRequestException e) {
     } finally {
       rmContext.getNodeLabelManager().removeFromClusterNodeLabels(
-          Arrays.asList("x"));
+              Arrays.asList("x"));
     }
     Assert.assertTrue("InvalidLabelResourceRequestException expected",
-        invalidlabelexception);
+            invalidlabelexception);
     // queue is "*", always succeeded
     try {
       // set queue accessible node labels to empty
       queueAccessibleNodeLabels.clear();
       queueAccessibleNodeLabels.add(RMNodeLabelsManager.ANY);
-      
+
       rmContext.getNodeLabelManager().addToCluserNodeLabels(
-          ImmutableSet.of(NodeLabel.newInstance("x"),
-              NodeLabel.newInstance("y"), NodeLabel.newInstance("z")));
-      
+              ImmutableSet.of(NodeLabel.newInstance("x"),
+                      NodeLabel.newInstance("y"), NodeLabel.newInstance("z")));
+
       Resource resource = Resources.createResource(
-          0,
-          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
+              0,
+              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
       ResourceRequest resReq = BuilderUtils.newResourceRequest(
-          mock(Priority.class), ResourceRequest.ANY, resource, 1);
+              mock(Priority.class), ResourceRequest.ANY, resource, 1);
       resReq.setNodeLabelExpression("x");
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-          scheduler, rmContext);
-      
+              scheduler, rmContext);
+
       resReq.setNodeLabelExpression("y");
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-          scheduler, rmContext);
-      
+              scheduler, rmContext);
+
       resReq.setNodeLabelExpression("z");
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-          scheduler, rmContext);
+              scheduler, rmContext);
     } catch (InvalidResourceRequestException e) {
       e.printStackTrace();
       fail("Should be valid when queue can access any labels");
     } finally {
       rmContext.getNodeLabelManager().removeFromClusterNodeLabels(
-          Arrays.asList("x", "y", "z"));
+              Arrays.asList("x", "y", "z"));
     }
-    
+
     // same as above, but cluster node labels don't contains label, should fail
     try {
       // set queue accessible node labels to empty
       queueAccessibleNodeLabels.clear();
       queueAccessibleNodeLabels.add(RMNodeLabelsManager.ANY);
-      
+
       Resource resource = Resources.createResource(
-          0,
-          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
+              0,
+              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
       ResourceRequest resReq = BuilderUtils.newResourceRequest(
-          mock(Priority.class), ResourceRequest.ANY, resource, 1);
+              mock(Priority.class), ResourceRequest.ANY, resource, 1);
       resReq.setNodeLabelExpression("x");
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-          scheduler, rmContext);
+              scheduler, rmContext);
       fail("Should fail");
     } catch (InvalidResourceRequestException e) {
     }
-    
+
     // we don't allow resource name other than ANY and specify label
     try {
       // set queue accessible node labesl to [x, y]
       queueAccessibleNodeLabels.clear();
       queueAccessibleNodeLabels.addAll(Arrays.asList("x", "y"));
       rmContext.getNodeLabelManager().addToCluserNodeLabels(
-          ImmutableSet.of(NodeLabel.newInstance("x"),
-              NodeLabel.newInstance("y")));
-      
+              ImmutableSet.of(NodeLabel.newInstance("x"),
+                      NodeLabel.newInstance("y")));
+
       Resource resource = Resources.createResource(
-          0,
-          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
+              0,
+              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
       ResourceRequest resReq = BuilderUtils.newResourceRequest(
-          mock(Priority.class), "rack", resource, 1);
+              mock(Priority.class), "rack", resource, 1);
       resReq.setNodeLabelExpression("x");
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-          scheduler, rmContext);
+              scheduler, rmContext);
       fail("Should fail");
     } catch (InvalidResourceRequestException e) {
     } finally {
       rmContext.getNodeLabelManager().removeFromClusterNodeLabels(
-          Arrays.asList("x", "y"));
+              Arrays.asList("x", "y"));
     }
-    
+
     // we don't allow resource name other than ANY and specify label even if
     // queue has accessible label = *
     try {
       // set queue accessible node labesl to *
       queueAccessibleNodeLabels.clear();
       queueAccessibleNodeLabels.addAll(Arrays
-          .asList(CommonNodeLabelsManager.ANY));
+              .asList(CommonNodeLabelsManager.ANY));
       rmContext.getNodeLabelManager().addToCluserNodeLabels(
-          ImmutableSet.of(NodeLabel.newInstance("x")));
-      
+              ImmutableSet.of(NodeLabel.newInstance("x")));
+
       Resource resource = Resources.createResource(
-          0,
-          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
+              0,
+              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
       ResourceRequest resReq = BuilderUtils.newResourceRequest(
-          mock(Priority.class), "rack", resource, 1);
+              mock(Priority.class), "rack", resource, 1);
       resReq.setNodeLabelExpression("x");
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-          scheduler, rmContext);
+              scheduler, rmContext);
       fail("Should fail");
     } catch (InvalidResourceRequestException e) {
     } finally {
       rmContext.getNodeLabelManager().removeFromClusterNodeLabels(
-          Arrays.asList("x"));
+              Arrays.asList("x"));
     }
     try {
       Resource resource = Resources.createResource(0,
-          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
+              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
       ResourceRequest resReq1 = BuilderUtils
-          .newResourceRequest(mock(Priority.class), "*", resource, 1, "x");
+              .newResourceRequest(mock(Priority.class), "*", resource, 1, "x");
       SchedulerUtils.normalizeAndvalidateRequest(resReq1, maxResource, "queue",
-          scheduler, rmContext);
+              scheduler, rmContext);
       fail("Should fail");
     } catch (InvalidResourceRequestException e) {
       assertEquals("Invalid label resource request, cluster do not contain , "
-          + "label= x", e.getMessage());
+              + "label= x", e.getMessage());
     }
 
     try {
       rmContext.getYarnConfiguration()
-          .set(YarnConfiguration.NODE_LABELS_ENABLED, "false");
+              .set(YarnConfiguration.NODE_LABELS_ENABLED, "false");
       Resource resource = Resources.createResource(0,
-          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
+              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
       ResourceRequest resReq1 = BuilderUtils
-          .newResourceRequest(mock(Priority.class), "*", resource, 1, "x");
+              .newResourceRequest(mock(Priority.class), "*", resource, 1, "x");
       SchedulerUtils.normalizeAndvalidateRequest(resReq1, maxResource, "queue",
-          scheduler, rmContext);
+              scheduler, rmContext);
       Assert.assertEquals(RMNodeLabelsManager.NO_LABEL,
-          resReq1.getNodeLabelExpression());
+              resReq1.getNodeLabelExpression());
     } catch (InvalidResourceRequestException e) {
       assertEquals("Invalid resource request, node label not enabled but "
-          + "request contains label expression", e.getMessage());
+              + "request contains label expression", e.getMessage());
     }
   }
 
-  @Test (timeout = 30000)
+  @Test(timeout = 30000)
   public void testValidateResourceRequest() {
     YarnScheduler mockScheduler = mock(YarnScheduler.class);
 
     Resource maxResource =
-        Resources.createResource(
-            YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
-            YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
+            Resources.createResource(
+                    YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
+                    YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
 
     // zero memory
     try {
       Resource resource =
-          Resources.createResource(0,
-              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
+              Resources.createResource(0,
+                      YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
       ResourceRequest resReq =
-          BuilderUtils.newResourceRequest(mock(Priority.class),
-              ResourceRequest.ANY, resource, 1);
+              BuilderUtils.newResourceRequest(mock(Priority.class),
+                      ResourceRequest.ANY, resource, 1);
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, null,
-          mockScheduler, rmContext);
+              mockScheduler, rmContext);
     } catch (InvalidResourceRequestException e) {
       fail("Zero memory should be accepted");
     }
@@ -589,13 +596,13 @@ public class TestSchedulerUtils {
     // zero vcores
     try {
       Resource resource =
-          Resources.createResource(
-              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0);
+              Resources.createResource(
+                      YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0);
       ResourceRequest resReq =
-          BuilderUtils.newResourceRequest(mock(Priority.class),
-              ResourceRequest.ANY, resource, 1);
+              BuilderUtils.newResourceRequest(mock(Priority.class),
+                      ResourceRequest.ANY, resource, 1);
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, null,
-          mockScheduler, rmContext);
+              mockScheduler, rmContext);
     } catch (InvalidResourceRequestException e) {
       fail("Zero vcores should be accepted");
     }
@@ -603,14 +610,14 @@ public class TestSchedulerUtils {
     // max memory
     try {
       Resource resource =
-          Resources.createResource(
-              YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
-              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
+              Resources.createResource(
+                      YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
+                      YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
       ResourceRequest resReq =
-          BuilderUtils.newResourceRequest(mock(Priority.class),
-              ResourceRequest.ANY, resource, 1);
+              BuilderUtils.newResourceRequest(mock(Priority.class),
+                      ResourceRequest.ANY, resource, 1);
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, null,
-          mockScheduler, rmContext);
+              mockScheduler, rmContext);
     } catch (InvalidResourceRequestException e) {
       fail("Max memory should be accepted");
     }
@@ -618,14 +625,14 @@ public class TestSchedulerUtils {
     // max vcores
     try {
       Resource resource =
-          Resources.createResource(
-              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
-              YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
+              Resources.createResource(
+                      YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
+                      YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
       ResourceRequest resReq =
-          BuilderUtils.newResourceRequest(mock(Priority.class),
-              ResourceRequest.ANY, resource, 1);
+              BuilderUtils.newResourceRequest(mock(Priority.class),
+                      ResourceRequest.ANY, resource, 1);
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, null,
-          mockScheduler, rmContext);
+              mockScheduler, rmContext);
     } catch (InvalidResourceRequestException e) {
       fail("Max vcores should not be accepted");
     }
@@ -633,77 +640,77 @@ public class TestSchedulerUtils {
     // negative memory
     try {
       Resource resource =
-          Resources.createResource(-1,
-              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
+              Resources.createResource(-1,
+                      YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
       ResourceRequest resReq =
-          BuilderUtils.newResourceRequest(mock(Priority.class),
-              ResourceRequest.ANY, resource, 1);
+              BuilderUtils.newResourceRequest(mock(Priority.class),
+                      ResourceRequest.ANY, resource, 1);
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, null,
-          mockScheduler, rmContext);
+              mockScheduler, rmContext);
       fail("Negative memory should not be accepted");
     } catch (InvalidResourceRequestException e) {
-      // expected
+      assertEquals(LESS_THAN_ZERO, e.getInvalidResourceType());
     }
 
     // negative vcores
     try {
       Resource resource =
-          Resources.createResource(
-              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, -1);
+              Resources.createResource(
+                      YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, -1);
       ResourceRequest resReq =
-          BuilderUtils.newResourceRequest(mock(Priority.class),
-              ResourceRequest.ANY, resource, 1);
+              BuilderUtils.newResourceRequest(mock(Priority.class),
+                      ResourceRequest.ANY, resource, 1);
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, null,
-          mockScheduler, rmContext);
+              mockScheduler, rmContext);
       fail("Negative vcores should not be accepted");
     } catch (InvalidResourceRequestException e) {
-      // expected
+      assertEquals(LESS_THAN_ZERO, e.getInvalidResourceType());
     }
 
     // more than max memory
     try {
       Resource resource =
-          Resources.createResource(
-              YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB + 1,
-              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
+              Resources.createResource(
+                      YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB + 1,
+                      YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
       ResourceRequest resReq =
-          BuilderUtils.newResourceRequest(mock(Priority.class),
-              ResourceRequest.ANY, resource, 1);
+              BuilderUtils.newResourceRequest(mock(Priority.class),
+                      ResourceRequest.ANY, resource, 1);
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, null,
-          mockScheduler, rmContext);
+              mockScheduler, rmContext);
       fail("More than max memory should not be accepted");
     } catch (InvalidResourceRequestException e) {
-      // expected
+      assertEquals(GREATER_THEN_MAX_ALLOCATION, e.getInvalidResourceType());
     }
 
     // more than max vcores
     try {
       Resource resource = Resources.createResource(
-          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
-          YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES + 1);
+              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
+              YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES + 1);
       ResourceRequest resReq =
-          BuilderUtils.newResourceRequest(mock(Priority.class),
-              ResourceRequest.ANY, resource, 1);
+              BuilderUtils.newResourceRequest(mock(Priority.class),
+                      ResourceRequest.ANY, resource, 1);
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, null,
-          mockScheduler, rmContext);
+              mockScheduler, rmContext);
       fail("More than max vcores should not be accepted");
     } catch (InvalidResourceRequestException e) {
-      // expected
+      assertEquals(GREATER_THEN_MAX_ALLOCATION, e.getInvalidResourceType());
     }
   }
-  
+
   @Test
   public void testValidateResourceBlacklistRequest() throws Exception {
 
     MyContainerManager containerManager = new MyContainerManager();
     final MockRMWithAMS rm =
-        new MockRMWithAMS(new YarnConfiguration(), containerManager);
+            new MockRMWithAMS(new YarnConfiguration(), containerManager);
     rm.start();
 
     MockNM nm1 = rm.registerNode("localhost:1234", 5120);
 
     Map<ApplicationAccessType, String> acls =
-        new HashMap<ApplicationAccessType, String>(2);
+            new HashMap<ApplicationAccessType, String>(2);
     acls.put(ApplicationAccessType.VIEW_APP, "*");
     RMApp app = rm.submitApp(1024, "appname", "appuser", acls);
 
@@ -718,33 +725,33 @@ public class TestSchedulerUtils {
     final YarnRPC rpc = YarnRPC.create(yarnConf);
 
     UserGroupInformation currentUser =
-        UserGroupInformation.createRemoteUser(applicationAttemptId.toString());
+            UserGroupInformation.createRemoteUser(applicationAttemptId.toString());
     Credentials credentials = containerManager.getContainerCredentials();
     final InetSocketAddress rmBindAddress =
-        rm.getApplicationMasterService().getBindAddress();
+            rm.getApplicationMasterService().getBindAddress();
     Token<? extends TokenIdentifier> amRMToken =
-        MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,
-          credentials.getAllTokens());
+            MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,
+                    credentials.getAllTokens());
     currentUser.addToken(amRMToken);
     ApplicationMasterProtocol client =
-        currentUser.doAs(new PrivilegedAction<ApplicationMasterProtocol>() {
-          @Override
-          public ApplicationMasterProtocol run() {
-            return (ApplicationMasterProtocol) rpc.getProxy(
-              ApplicationMasterProtocol.class, rmBindAddress, yarnConf);
-          }
-        });
+            currentUser.doAs(new PrivilegedAction<ApplicationMasterProtocol>() {
+              @Override
+              public ApplicationMasterProtocol run() {
+                return (ApplicationMasterProtocol) rpc.getProxy(
+                        ApplicationMasterProtocol.class, rmBindAddress, yarnConf);
+              }
+            });
 
     RegisterApplicationMasterRequest request = Records
-        .newRecord(RegisterApplicationMasterRequest.class);
+            .newRecord(RegisterApplicationMasterRequest.class);
     client.registerApplicationMaster(request);
 
     ResourceBlacklistRequest blacklistRequest =
-        ResourceBlacklistRequest.newInstance(
-            Collections.singletonList(ResourceRequest.ANY), null);
+            ResourceBlacklistRequest.newInstance(
+                    Collections.singletonList(ResourceRequest.ANY), null);
 
     AllocateRequest allocateRequest =
-        AllocateRequest.newInstance(0, 0.0f, null, null, blacklistRequest);
+            AllocateRequest.newInstance(0, 0.0f, null, null, blacklistRequest);
     boolean error = false;
     try {
       client.allocate(allocateRequest);
@@ -753,26 +760,26 @@ public class TestSchedulerUtils {
     }
 
     rm.stop();
-    
+
     Assert.assertTrue(
-        "Didn't not catch InvalidResourceBlacklistRequestException", error);
+            "Didn't not catch InvalidResourceBlacklistRequestException", error);
   }
 
   private void waitForLaunchedState(RMAppAttempt attempt)
-      throws InterruptedException {
+          throws InterruptedException {
     int waitCount = 0;
     while (attempt.getAppAttemptState() != RMAppAttemptState.LAUNCHED
-        && waitCount++ < 20) {
+            && waitCount++ < 20) {
       LOG.info("Waiting for AppAttempt to reach LAUNCHED state. "
-          + "Current state is " + attempt.getAppAttemptState());
+              + "Current state is " + attempt.getAppAttemptState());
       Thread.sleep(1000);
     }
     Assert.assertEquals(attempt.getAppAttemptState(),
-        RMAppAttemptState.LAUNCHED);
+            RMAppAttemptState.LAUNCHED);
   }
 
   @Test
-  public void testComparePriorities(){
+  public void testComparePriorities() {
     Priority high = Priority.newInstance(1);
     Priority low = Priority.newInstance(2);
     assertTrue(high.compareTo(low) > 0);
@@ -781,22 +788,22 @@ public class TestSchedulerUtils {
   @Test
   public void testCreateAbnormalContainerStatus() {
     ContainerStatus cd = SchedulerUtils.createAbnormalContainerStatus(
-        ContainerId.newContainerId(ApplicationAttemptId.newInstance(
-          ApplicationId.newInstance(System.currentTimeMillis(), 1), 1), 1), "x");
+            ContainerId.newContainerId(ApplicationAttemptId.newInstance(
+                    ApplicationId.newInstance(System.currentTimeMillis(), 1), 1), 1), "x");
     Assert.assertEquals(ContainerExitStatus.ABORTED, cd.getExitStatus());
   }
 
   @Test
   public void testCreatePreemptedContainerStatus() {
     ContainerStatus cd = SchedulerUtils.createPreemptedContainerStatus(
-        ContainerId.newContainerId(ApplicationAttemptId.newInstance(
-          ApplicationId.newInstance(System.currentTimeMillis(), 1), 1), 1), "x");
+            ContainerId.newContainerId(ApplicationAttemptId.newInstance(
+                    ApplicationId.newInstance(System.currentTimeMillis(), 1), 1), 1), "x");
     Assert.assertEquals(ContainerExitStatus.PREEMPTED, cd.getExitStatus());
   }
-  
-  @Test (timeout = 30000)
+
+  @Test(timeout = 30000)
   public void testNormalizeNodeLabelExpression()
-      throws IOException {
+          throws IOException {
     // mock queue and scheduler
     YarnScheduler scheduler = mock(YarnScheduler.class);
     Set<String> queueAccessibleNodeLabels = Sets.newHashSet();
@@ -805,11 +812,11 @@ public class TestSchedulerUtils {
     when(queueInfo.getAccessibleNodeLabels()).thenReturn(queueAccessibleNodeLabels);
     when(queueInfo.getDefaultNodeLabelExpression()).thenReturn(" x ");
     when(scheduler.getQueueInfo(any(String.class), anyBoolean(), anyBoolean()))
-        .thenReturn(queueInfo);
-    
+            .thenReturn(queueInfo);
+
     Resource maxResource = Resources.createResource(
-        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
-        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
+            YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
+            YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
 
     // queue has labels, success cases
     try {
@@ -817,156 +824,163 @@ public class TestSchedulerUtils {
       queueAccessibleNodeLabels.clear();
       queueAccessibleNodeLabels.addAll(Arrays.asList("x", "y"));
       rmContext.getNodeLabelManager().addToCluserNodeLabels(
-          ImmutableSet.of(NodeLabel.newInstance("x"),
-              NodeLabel.newInstance("y")));
+              ImmutableSet.of(NodeLabel.newInstance("x"),
+                      NodeLabel.newInstance("y")));
       Resource resource = Resources.createResource(
-          0,
-          YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
+              0,
+              YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
       ResourceRequest resReq = BuilderUtils.newResourceRequest(
-          mock(Priority.class), ResourceRequest.ANY, resource, 1);
+              mock(Priority.class), ResourceRequest.ANY, resource, 1);
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-          scheduler, rmContext);
+              scheduler, rmContext);
       Assert.assertEquals("x", resReq.getNodeLabelExpression());
-      
+
       resReq.setNodeLabelExpression(" y ");
       SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
-          scheduler, rmContext);
+              scheduler, rmContext);
       Assert.assertEquals("y", resReq.getNodeLabelExpression());
     } catch (InvalidResourceRequestException e) {
       e.printStackTrace();
       fail("Should be valid when request labels is a subset of queue labels");
     } finally {
       rmContext.getNodeLabelManager().removeFromClusterNodeLabels(
-          Arrays.asList("x", "y"));
+              Arrays.asList("x", "y"));
     }
   }
 
   @Test
   public void testCustomResourceRequestedUnitIsSmallerThanAvailableUnit()
-      throws InvalidResourceRequestException {
+          throws InvalidResourceRequestException {
     Resource requestedResource =
-        ResourceTypesTestHelper.newResource(1, 1,
-                ImmutableMap.of("custom-resource-1", "11"));
+            ResourceTypesTestHelper.newResource(1, 1,
+                    ImmutableMap.of("custom-resource-1", "11"));
 
     Resource availableResource =
-        ResourceTypesTestHelper.newResource(1, 1,
-                ImmutableMap.of("custom-resource-1", "0G"));
+            ResourceTypesTestHelper.newResource(1, 1,
+                    ImmutableMap.of("custom-resource-1", "0G"));
 
     exception.expect(InvalidResourceRequestException.class);
     exception.expectMessage(InvalidResourceRequestExceptionMessageGenerator
-        .create().withRequestedResourceType("custom-resource-1")
-        .withRequestedResource(requestedResource)
-        .withAvailableAllocation(availableResource)
-        .withMaxAllocation(configuredMaxAllocation).build());
+            .create().withRequestedResourceType("custom-resource-1")
+            .withRequestedResource(requestedResource)
+            .withAvailableAllocation(availableResource)
+            .withMaxAllocation(configuredMaxAllocation)
+            .withInvalidResourceType(GREATER_THEN_MAX_ALLOCATION)
+            .build());
 
     SchedulerUtils.checkResourceRequestAgainstAvailableResource(
-        requestedResource, availableResource);
+            requestedResource, availableResource);
   }
 
   @Test
   public void testCustomResourceRequestedUnitIsSmallerThanAvailableUnit2() {
     Resource requestedResource =
-        ResourceTypesTestHelper.newResource(1, 1,
-                ImmutableMap.of("custom-resource-1", "11"));
+            ResourceTypesTestHelper.newResource(1, 1,
+                    ImmutableMap.of("custom-resource-1", "11"));
 
     Resource availableResource =
-        ResourceTypesTestHelper.newResource(1, 1,
-                ImmutableMap.of("custom-resource-1", "1G"));
+            ResourceTypesTestHelper.newResource(1, 1,
+                    ImmutableMap.of("custom-resource-1", "1G"));
 
     try {
       SchedulerUtils.checkResourceRequestAgainstAvailableResource(
-          requestedResource, availableResource);
+              requestedResource, availableResource);
     } catch (InvalidResourceRequestException e) {
       fail(String.format(
-          "Resource request should be accepted. Requested: %s, available: %s",
-          requestedResource, availableResource));
+              "Resource request should be accepted. Requested: %s, available: %s",
+              requestedResource, availableResource));
     }
   }
 
   @Test
   public void testCustomResourceRequestedUnitIsGreaterThanAvailableUnit()
-      throws InvalidResourceRequestException {
+          throws InvalidResourceRequestException {
     Resource requestedResource =
-        ResourceTypesTestHelper.newResource(1, 1,
-                ImmutableMap.of("custom-resource-1", "1M"));
+            ResourceTypesTestHelper.newResource(1, 1,
+                    ImmutableMap.of("custom-resource-1", "1M"));
 
     Resource availableResource = ResourceTypesTestHelper.newResource(1, 1,
-        ImmutableMap.<String, String> builder().put("custom-resource-1", "120k")
-            .build());
+            ImmutableMap.<String, String>builder().put("custom-resource-1",
+                    "120k")
+                    .build());
 
     exception.expect(InvalidResourceRequestException.class);
     exception.expectMessage(InvalidResourceRequestExceptionMessageGenerator
-        .create().withRequestedResourceType("custom-resource-1")
-        .withRequestedResource(requestedResource)
-        .withAvailableAllocation(availableResource)
-        .withMaxAllocation(configuredMaxAllocation).build());
+            .create().withRequestedResourceType("custom-resource-1")
+            .withRequestedResource(requestedResource)
+            .withAvailableAllocation(availableResource)
+            .withMaxAllocation(configuredMaxAllocation)
+            .withInvalidResourceType(GREATER_THEN_MAX_ALLOCATION)
+            .build());
     SchedulerUtils.checkResourceRequestAgainstAvailableResource(
-        requestedResource, availableResource);
+            requestedResource, availableResource);
   }
 
   @Test
   public void testCustomResourceRequestedUnitIsGreaterThanAvailableUnit2() {
     Resource requestedResource = ResourceTypesTestHelper.newResource(1, 1,
-        ImmutableMap.<String, String> builder().put("custom-resource-1", "11M")
-            .build());
+            ImmutableMap.<String, String>builder().put("custom-resource-1", "11M")
+                    .build());
 
     Resource availableResource =
-        ResourceTypesTestHelper.newResource(1, 1,
-                ImmutableMap.of("custom-resource-1", "1G"));
+            ResourceTypesTestHelper.newResource(1, 1,
+                    ImmutableMap.of("custom-resource-1", "1G"));
 
     try {
       SchedulerUtils.checkResourceRequestAgainstAvailableResource(
-          requestedResource, availableResource);
+              requestedResource, availableResource);
     } catch (InvalidResourceRequestException e) {
       fail(String.format(
-          "Resource request should be accepted. Requested: %s, available: %s",
-          requestedResource, availableResource));
+              "Resource request should be accepted. Requested: %s, available: %s",
+              requestedResource, availableResource));
     }
   }
 
   @Test
   public void testCustomResourceRequestedUnitIsSameAsAvailableUnit() {
     Resource requestedResource = ResourceTypesTestHelper.newResource(1, 1,
-        ImmutableMap.of("custom-resource-1", "11M"));
+            ImmutableMap.of("custom-resource-1", "11M"));
 
     Resource availableResource = ResourceTypesTestHelper.newResource(1, 1,
-        ImmutableMap.of("custom-resource-1", "100M"));
+            ImmutableMap.of("custom-resource-1", "100M"));
 
     try {
       SchedulerUtils.checkResourceRequestAgainstAvailableResource(
-          requestedResource, availableResource);
+              requestedResource, availableResource);
     } catch (InvalidResourceRequestException e) {
       fail(String.format(
-          "Resource request should be accepted. Requested: %s, available: %s",
-          requestedResource, availableResource));
+              "Resource request should be accepted. Requested: %s, available: %s",
+              requestedResource, availableResource));
     }
   }
 
   @Test
   public void testCustomResourceRequestedUnitIsSameAsAvailableUnit2()
-      throws InvalidResourceRequestException {
+          throws InvalidResourceRequestException {
     Resource requestedResource = ResourceTypesTestHelper.newResource(1, 1,
-        ImmutableMap.of("custom-resource-1", "110M"));
+            ImmutableMap.of("custom-resource-1", "110M"));
 
     Resource availableResource = ResourceTypesTestHelper.newResource(1, 1,
-        ImmutableMap.of("custom-resource-1", "100M"));
+            ImmutableMap.of("custom-resource-1", "100M"));
 
     exception.expect(InvalidResourceRequestException.class);
     exception.expectMessage(InvalidResourceRequestExceptionMessageGenerator
-        .create().withRequestedResourceType("custom-resource-1")
-        .withRequestedResource(requestedResource)
-        .withAvailableAllocation(availableResource)
-        .withMaxAllocation(configuredMaxAllocation).build());
+            .create().withRequestedResourceType("custom-resource-1")
+            .withRequestedResource(requestedResource)
+            .withAvailableAllocation(availableResource)
+            .withInvalidResourceType(GREATER_THEN_MAX_ALLOCATION)
+            .withMaxAllocation(configuredMaxAllocation)
+            .build());
 
     SchedulerUtils.checkResourceRequestAgainstAvailableResource(
-        requestedResource, availableResource);
+            requestedResource, availableResource);
   }
 
   public static void waitSchedulerApplicationAttemptStopped(
-      AbstractYarnScheduler ys,
-      ApplicationAttemptId attemptId) throws InterruptedException {
+          AbstractYarnScheduler ys,
+          ApplicationAttemptId attemptId) throws InterruptedException {
     SchedulerApplicationAttempt schedulerApp =
-        ys.getApplicationAttempt(attemptId);
+            ys.getApplicationAttempt(attemptId);
     if (null == schedulerApp) {
       return;
     }
@@ -986,35 +1000,35 @@ public class TestSchedulerUtils {
   }
 
   public static SchedulerApplication<SchedulerApplicationAttempt>
-      verifyAppAddedAndRemovedFromScheduler(
+  verifyAppAddedAndRemovedFromScheduler(
           Map<ApplicationId, SchedulerApplication<SchedulerApplicationAttempt>> applications,
           EventHandler<SchedulerEvent> handler, String queueName) {
 
     ApplicationId appId =
-        ApplicationId.newInstance(System.currentTimeMillis(), 1);
+            ApplicationId.newInstance(System.currentTimeMillis(), 1);
     AppAddedSchedulerEvent appAddedEvent =
-        new AppAddedSchedulerEvent(appId, queueName, "user");
+            new AppAddedSchedulerEvent(appId, queueName, "user");
     handler.handle(appAddedEvent);
     SchedulerApplication<SchedulerApplicationAttempt> app =
-        applications.get(appId);
+            applications.get(appId);
     // verify application is added.
     Assert.assertNotNull(app);
     Assert.assertEquals("user", app.getUser());
 
     AppRemovedSchedulerEvent appRemoveEvent =
-        new AppRemovedSchedulerEvent(appId, RMAppState.FINISHED);
+            new AppRemovedSchedulerEvent(appId, RMAppState.FINISHED);
     handler.handle(appRemoveEvent);
     Assert.assertNull(applications.get(appId));
     return app;
   }
-  
+
   private static RMContext getMockRMContext() {
     RMContext rmContext = mock(RMContext.class);
     RMNodeLabelsManager nlm = new NullRMNodeLabelsManager();
     nlm.init(new Configuration(false));
     when(rmContext.getYarnConfiguration()).thenReturn(conf);
     rmContext.getYarnConfiguration().set(YarnConfiguration.NODE_LABELS_ENABLED,
-        "true");
+            "true");
     when(rmContext.getNodeLabelManager()).thenReturn(nlm);
     return rmContext;
   }
@@ -1026,6 +1040,7 @@ public class TestSchedulerUtils {
     private Resource availableAllocation;
     private Resource configuredMaxAllowedAllocation;
     private String resourceType;
+    private InvalidResourceType invalidResourceType;
 
     InvalidResourceRequestExceptionMessageGenerator(StringBuilder sb) {
       this.sb = sb;
@@ -1033,7 +1048,7 @@ public class TestSchedulerUtils {
 
     public static InvalidResourceRequestExceptionMessageGenerator create() {
       return new InvalidResourceRequestExceptionMessageGenerator(
-          new StringBuilder());
+              new StringBuilder());
     }
 
     InvalidResourceRequestExceptionMessageGenerator withRequestedResource(
@@ -1055,23 +1070,46 @@ public class TestSchedulerUtils {
     }
 
     InvalidResourceRequestExceptionMessageGenerator withMaxAllocation(
-        Resource r) {
+            Resource r) {
       this.configuredMaxAllowedAllocation = r;
       return this;
     }
 
+    InvalidResourceRequestExceptionMessageGenerator
+    withInvalidResourceType(InvalidResourceType invalidResourceType) {
+      this.invalidResourceType = invalidResourceType;
+      return this;
+    }
+
     public String build() {
-      return sb
-          .append("Invalid resource request, requested resource type=[")
-          .append(resourceType).append("]")
-          .append(" < 0 or greater than maximum allowed allocation. ")
-          .append("Requested resource=").append(requestedResource).append(", ")
-          .append("maximum allowed allocation=").append(availableAllocation)
-          .append(", please note that maximum allowed allocation is calculated "
-              + "by scheduler based on maximum resource of " +
-                  "registered NodeManagers, which might be less than " +
-                  "configured maximum allocation=")
-          .append(configuredMaxAllowedAllocation).toString();
+      if (invalidResourceType == LESS_THAN_ZERO) {
+        return sb.append("Invalid resource request! " +
+                "Cannot allocate containers as " +
+                "requested resource is less than 0! ")
+                .append("Requested resource type=[")
+                .append(resourceType).append("]")
+                .append(", Requested resource=")
+                .append(requestedResource).toString();
+
+      } else if (invalidResourceType == GREATER_THEN_MAX_ALLOCATION) {
+        return sb.append("Invalid resource request! " +
+                "Cannot allocate containers as "
+                + "requested resource is greater than " +
+                "maximum allowed allocation. ")
+                .append("Requested resource type=[").append(resourceType)
+                .append("], ")
+                .append("Requested resource=").append(requestedResource)
+                .append(", maximum allowed allocation=")
+                .append(availableAllocation)
+                .append(", please note that maximum allowed allocation is " +
+                        "calculated by scheduler based on maximum resource " +
+                        "of registered NodeManagers, which might be less " +
+                        "than configured maximum allocation=")
+                .append(configuredMaxAllowedAllocation)
+                .toString();
+      }
+      throw new IllegalStateException("Wrong type of InvalidResourceType is " +
+              "detected!");
     }
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: YARN-8596. Allow SQLFederationStateStore to submit the same app in the same subcluster. Contributed by Giovanni Matteo Fumarola.

Posted by bo...@apache.org.
YARN-8596. Allow SQLFederationStateStore to submit the same app in the same subcluster. Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79091cf7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79091cf7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79091cf7

Branch: refs/heads/YARN-7402
Commit: 79091cf76f6e966f64ac1d65e43e95782695e678
Parents: 2cccf40
Author: Inigo Goiri <in...@apache.org>
Authored: Fri Jul 27 15:23:57 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Fri Jul 27 15:23:57 2018 -0700

----------------------------------------------------------------------
 .../store/impl/SQLFederationStateStore.java      | 14 +++++++-------
 .../store/impl/FederationStateStoreBaseTest.java | 19 +++++++++++++++++++
 2 files changed, 26 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79091cf7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
index e62dcaf..273118a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
@@ -564,13 +564,13 @@ public class SQLFederationStateStore implements FederationStateStore {
         // Check the ROWCOUNT value, if it is equal to 0 it means the call
         // did not add a new application into FederationStateStore
         if (cstmt.getInt(4) == 0) {
-          String errMsg = "The application " + appId
-              + " was not insert into the StateStore";
-          FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
-        }
-        // Check the ROWCOUNT value, if it is different from 1 it means the call
-        // had a wrong behavior. Maybe the database is not set correctly.
-        if (cstmt.getInt(4) != 1) {
+          LOG.info(
+              "The application {} was not inserted in the StateStore because it"
+                  + " was already present in SubCluster {}",
+              appId, subClusterHome);
+        } else if (cstmt.getInt(4) != 1) {
+          // Check the ROWCOUNT value, if it is different from 1 it means the
+          // call had a wrong behavior. Maybe the database is not set correctly.
           String errMsg = "Wrong behavior during the insertion of SubCluster "
               + subClusterId;
           FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79091cf7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java
index 15cc0f0..b17f870 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java
@@ -282,6 +282,25 @@ public abstract class FederationStateStoreBaseTest {
   }
 
   @Test
+  public void testAddApplicationHomeSubClusterAppAlreadyExistsInTheSameSC()
+      throws Exception {
+    ApplicationId appId = ApplicationId.newInstance(1, 1);
+    SubClusterId subClusterId1 = SubClusterId.newInstance("SC1");
+    addApplicationHomeSC(appId, subClusterId1);
+
+    ApplicationHomeSubCluster ahsc2 =
+        ApplicationHomeSubCluster.newInstance(appId, subClusterId1);
+
+    AddApplicationHomeSubClusterResponse response =
+        stateStore.addApplicationHomeSubCluster(
+            AddApplicationHomeSubClusterRequest.newInstance(ahsc2));
+
+    Assert.assertEquals(subClusterId1, response.getHomeSubCluster());
+    Assert.assertEquals(subClusterId1, queryApplicationHomeSC(appId));
+
+  }
+
+  @Test
   public void testDeleteApplicationHomeSubCluster() throws Exception {
     ApplicationId appId = ApplicationId.newInstance(1, 1);
     SubClusterId subClusterId = SubClusterId.newInstance("SC");


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: HDDS-271. Create a block iterator to iterate blocks in a container. Contributed by Bharat Viswanadham.

Posted by bo...@apache.org.
HDDS-271. Create a block iterator to iterate blocks in a container. Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c835fc08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c835fc08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c835fc08

Branch: refs/heads/YARN-7402
Commit: c835fc08adf556d2f848f2f241155cbfe3375695
Parents: c7ebcd7
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Tue Jul 31 16:26:09 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Tue Jul 31 16:26:09 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/utils/MetaStoreIterator.java  |   2 +-
 .../common/interfaces/BlockIterator.java        |  57 ++++
 .../keyvalue/KeyValueBlockIterator.java         | 148 ++++++++++
 .../keyvalue/TestKeyValueBlockIterator.java     | 275 +++++++++++++++++++
 4 files changed, 481 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c835fc08/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java
index 758d194..52d0a3e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java
@@ -24,7 +24,7 @@ import java.util.Iterator;
  * Iterator for MetaDataStore DB.
  * @param <T>
  */
-interface MetaStoreIterator<T> extends Iterator<T> {
+public interface MetaStoreIterator<T> extends Iterator<T> {
 
   /**
    * seek to first entry.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c835fc08/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java
new file mode 100644
index 0000000..f6931e3
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.interfaces;
+
+
+import java.io.IOException;
+import java.util.NoSuchElementException;
+
+/**
+ * Block Iterator for container. Each container type need to implement this
+ * interface.
+ * @param <T>
+ */
+public interface BlockIterator<T> {
+
+  /**
+   * This checks if iterator has next element. If it has returns true,
+   * otherwise false.
+   * @return boolean
+   */
+  boolean hasNext() throws IOException;
+
+  /**
+   * Seek to first entry.
+   */
+  void seekToFirst();
+
+  /**
+   * Seek to last entry.
+   */
+  void seekToLast();
+
+  /**
+   * Get next block in the container.
+   * @return next block or null if there are no blocks
+   * @throws IOException
+   */
+  T nextBlock() throws IOException, NoSuchElementException;
+
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c835fc08/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
new file mode 100644
index 0000000..f800223
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.keyvalue;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
+import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
+import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil;
+import org.apache.hadoop.utils.MetaStoreIterator;
+import org.apache.hadoop.utils.MetadataKeyFilters;
+import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
+import org.apache.hadoop.utils.MetadataStore;
+import org.apache.hadoop.utils.MetadataStore.KeyValue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.NoSuchElementException;
+
+
+/**
+ * Block Iterator for KeyValue Container. This block iterator returns blocks
+ * which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. If no
+ * filter is specified, then default filter used is
+ * {@link MetadataKeyFilters#getNormalKeyFilter()}
+ */
+@InterfaceAudience.Public
+public class KeyValueBlockIterator implements BlockIterator<KeyData> {
+
+  private static final Logger LOG = LoggerFactory.getLogger(
+      KeyValueBlockIterator.class);
+
+  private MetaStoreIterator<KeyValue> blockIterator;
+  private static KeyPrefixFilter defaultBlockFilter = MetadataKeyFilters
+      .getNormalKeyFilter();
+  private KeyPrefixFilter blockFilter;
+  private KeyData nextBlock;
+  private long containerId;
+
+  /**
+   * KeyValueBlockIterator to iterate blocks in a container.
+   * @param id - container id
+   * @param path -  container base path
+   * @throws IOException
+   */
+
+  public KeyValueBlockIterator(long id, File path)
+      throws IOException {
+    this(id, path, defaultBlockFilter);
+  }
+
+  /**
+   * KeyValueBlockIterator to iterate blocks in a container.
+   * @param id - container id
+   * @param path - container base path
+   * @param filter - Block filter, filter to be applied for blocks
+   * @throws IOException
+   */
+  public KeyValueBlockIterator(long id, File path, KeyPrefixFilter filter)
+      throws IOException {
+    containerId = id;
+    File metdataPath = new File(path, OzoneConsts.METADATA);
+    File containerFile = ContainerUtils.getContainerFile(metdataPath
+        .getParentFile());
+    ContainerData containerData = ContainerDataYaml.readContainerFile(
+        containerFile);
+    KeyValueContainerData keyValueContainerData = (KeyValueContainerData)
+        containerData;
+    keyValueContainerData.setDbFile(KeyValueContainerLocationUtil
+        .getContainerDBFile(metdataPath, containerId));
+    MetadataStore metadataStore = KeyUtils.getDB(keyValueContainerData, new
+        OzoneConfiguration());
+    blockIterator = metadataStore.iterator();
+    blockFilter = filter;
+  }
+
+  /**
+   * This method returns blocks matching with the filter.
+   * @return next block or null if no more blocks
+   * @throws IOException
+   */
+  @Override
+  public KeyData nextBlock() throws IOException, NoSuchElementException {
+    if (nextBlock != null) {
+      KeyData currentBlock = nextBlock;
+      nextBlock = null;
+      return currentBlock;
+    }
+    if(hasNext()) {
+      return nextBlock();
+    }
+    throw new NoSuchElementException("Block Iterator reached end for " +
+        "ContainerID " + containerId);
+  }
+
+  @Override
+  public boolean hasNext() throws IOException {
+    if (nextBlock != null) {
+      return true;
+    }
+    if (blockIterator.hasNext()) {
+      KeyValue block = blockIterator.next();
+      if (blockFilter.filterKey(null, block.getKey(), null)) {
+        nextBlock = KeyUtils.getKeyData(block.getValue());
+        LOG.trace("Block matching with filter found: blockID is : {} for " +
+            "containerID {}", nextBlock.getLocalID(), containerId);
+        return true;
+      }
+      hasNext();
+    }
+    return false;
+  }
+
+  @Override
+  public void seekToFirst() {
+    nextBlock = null;
+    blockIterator.seekToFirst();
+  }
+
+  @Override
+  public void seekToLast() {
+    nextBlock = null;
+    blockIterator.seekToLast();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c835fc08/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
new file mode 100644
index 0000000..ba57c3f
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
@@ -0,0 +1,275 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.keyvalue;
+
+import com.google.common.primitives.Longs;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
+import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.utils.MetadataKeyFilters;
+import org.apache.hadoop.utils.MetadataStore;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.File;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.UUID;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .OZONE_METADATA_STORE_IMPL_LEVELDB;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .OZONE_METADATA_STORE_IMPL_ROCKSDB;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * This class is used to test KeyValue container block iterator.
+ */
+@RunWith(Parameterized.class)
+public class TestKeyValueBlockIterator {
+
+  private KeyValueContainer container;
+  private KeyValueContainerData containerData;
+  private VolumeSet volumeSet;
+  private Configuration conf;
+  private File testRoot;
+
+  private final String storeImpl;
+
+  public TestKeyValueBlockIterator(String metadataImpl) {
+    this.storeImpl = metadataImpl;
+  }
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> data() {
+    return Arrays.asList(new Object[][] {
+        {OZONE_METADATA_STORE_IMPL_LEVELDB},
+        {OZONE_METADATA_STORE_IMPL_ROCKSDB}});
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    testRoot = GenericTestUtils.getRandomizedTestDir();
+    conf = new OzoneConfiguration();
+    conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
+    conf.set(OZONE_METADATA_STORE_IMPL, storeImpl);
+    volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
+  }
+
+
+  @After
+  public void tearDown() {
+    volumeSet.shutdown();
+    FileUtil.fullyDelete(testRoot);
+  }
+
+  @Test
+  public void testKeyValueBlockIteratorWithMixedBlocks() throws Exception {
+
+    long containerID = 100L;
+    int deletedBlocks = 5;
+    int normalBlocks = 5;
+    createContainerWithBlocks(containerID, normalBlocks, deletedBlocks);
+    String containerPath = new File(containerData.getMetadataPath())
+        .getParent();
+    KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
+        containerID, new File(containerPath));
+
+    int counter = 0;
+    while(keyValueBlockIterator.hasNext()) {
+      KeyData keyData = keyValueBlockIterator.nextBlock();
+      assertEquals(keyData.getLocalID(), counter++);
+    }
+
+    assertFalse(keyValueBlockIterator.hasNext());
+
+    keyValueBlockIterator.seekToFirst();
+    counter = 0;
+    while(keyValueBlockIterator.hasNext()) {
+      KeyData keyData = keyValueBlockIterator.nextBlock();
+      assertEquals(keyData.getLocalID(), counter++);
+    }
+    assertFalse(keyValueBlockIterator.hasNext());
+
+    try {
+      keyValueBlockIterator.nextBlock();
+    } catch (NoSuchElementException ex) {
+      GenericTestUtils.assertExceptionContains("Block Iterator reached end " +
+          "for ContainerID " + containerID, ex);
+    }
+  }
+
+  @Test
+  public void testKeyValueBlockIteratorWithNextBlock() throws Exception {
+    long containerID = 101L;
+    createContainerWithBlocks(containerID, 2, 0);
+    String containerPath = new File(containerData.getMetadataPath())
+        .getParent();
+    KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
+        containerID, new File(containerPath));
+    long blockID = 0L;
+    assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID());
+    assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
+
+    try {
+      keyValueBlockIterator.nextBlock();
+    } catch (NoSuchElementException ex) {
+      GenericTestUtils.assertExceptionContains("Block Iterator reached end " +
+          "for ContainerID " + containerID, ex);
+    }
+  }
+
+  @Test
+  public void testKeyValueBlockIteratorWithHasNext() throws Exception {
+    long containerID = 102L;
+    createContainerWithBlocks(containerID, 2, 0);
+    String containerPath = new File(containerData.getMetadataPath())
+        .getParent();
+    KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
+        containerID, new File(containerPath));
+    long blockID = 0L;
+
+    // Even calling multiple times hasNext() should not move entry forward.
+    assertTrue(keyValueBlockIterator.hasNext());
+    assertTrue(keyValueBlockIterator.hasNext());
+    assertTrue(keyValueBlockIterator.hasNext());
+    assertTrue(keyValueBlockIterator.hasNext());
+    assertTrue(keyValueBlockIterator.hasNext());
+    assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID());
+
+    assertTrue(keyValueBlockIterator.hasNext());
+    assertTrue(keyValueBlockIterator.hasNext());
+    assertTrue(keyValueBlockIterator.hasNext());
+    assertTrue(keyValueBlockIterator.hasNext());
+    assertTrue(keyValueBlockIterator.hasNext());
+    assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
+
+    keyValueBlockIterator.seekToLast();
+    assertTrue(keyValueBlockIterator.hasNext());
+    assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
+
+    keyValueBlockIterator.seekToFirst();
+    blockID = 0L;
+    assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID());
+    assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
+
+    try {
+      keyValueBlockIterator.nextBlock();
+    } catch (NoSuchElementException ex) {
+      GenericTestUtils.assertExceptionContains("Block Iterator reached end " +
+          "for ContainerID " + containerID, ex);
+    }
+
+
+  }
+
+  @Test
+  public void testKeyValueBlockIteratorWithFilter() throws Exception {
+    long containerId = 103L;
+    int deletedBlocks = 5;
+    int normalBlocks = 5;
+    createContainerWithBlocks(containerId, normalBlocks, deletedBlocks);
+    String containerPath = new File(containerData.getMetadataPath())
+        .getParent();
+    KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
+        containerId, new File(containerPath), MetadataKeyFilters
+        .getDeletingKeyFilter());
+
+    int counter = 5;
+    while(keyValueBlockIterator.hasNext()) {
+      KeyData keyData = keyValueBlockIterator.nextBlock();
+      assertEquals(keyData.getLocalID(), counter++);
+    }
+  }
+
+  @Test
+  public void testKeyValueBlockIteratorWithOnlyDeletedBlocks() throws
+      Exception {
+    long containerId = 104L;
+    createContainerWithBlocks(containerId, 0, 5);
+    String containerPath = new File(containerData.getMetadataPath())
+        .getParent();
+    KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
+        containerId, new File(containerPath));
+    //As all blocks are deleted blocks, blocks does not match with normal key
+    // filter.
+    assertFalse(keyValueBlockIterator.hasNext());
+  }
+
+  /**
+   * Creates a container with specified number of normal blocks and deleted
+   * blocks. First it will insert normal blocks, and then it will insert
+   * deleted blocks.
+   * @param containerId
+   * @param normalBlocks
+   * @param deletedBlocks
+   * @throws Exception
+   */
+  private void createContainerWithBlocks(long containerId, int
+      normalBlocks, int deletedBlocks) throws
+      Exception {
+    containerData = new KeyValueContainerData(containerId, 1);
+    container = new KeyValueContainer(containerData, conf);
+    container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID
+        .randomUUID().toString());
+    MetadataStore metadataStore = KeyUtils.getDB(containerData, conf);
+
+    List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
+    ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024);
+    chunkList.add(info.getProtoBufMessage());
+
+    for (int i=0; i<normalBlocks; i++) {
+      BlockID blockID = new BlockID(containerId, i);
+      KeyData keyData = new KeyData(blockID);
+      keyData.setChunks(chunkList);
+      metadataStore.put(Longs.toByteArray(blockID.getLocalID()), keyData
+          .getProtoBufMessage().toByteArray());
+    }
+
+    for (int i=normalBlocks; i<deletedBlocks; i++) {
+      BlockID blockID = new BlockID(containerId, i);
+      KeyData keyData = new KeyData(blockID);
+      keyData.setChunks(chunkList);
+      metadataStore.put(DFSUtil.string2Bytes(OzoneConsts
+          .DELETING_KEY_PREFIX + blockID.getLocalID()), keyData
+          .getProtoBufMessage().toByteArray());
+    }
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: HDDS-248. Refactor DatanodeContainerProtocol.proto Contributed by Hanisha Koneru.

Posted by bo...@apache.org.
HDDS-248. Refactor DatanodeContainerProtocol.proto Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/007e6f51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/007e6f51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/007e6f51

Branch: refs/heads/YARN-7402
Commit: 007e6f51135adb5864f6bfc258010fd09576387b
Parents: feb795b
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Sat Jul 28 14:50:43 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Sat Jul 28 14:57:11 2018 -0700

----------------------------------------------------------------------
 .../scm/storage/ContainerProtocolCalls.java     | 37 +++++---
 .../main/proto/DatanodeContainerProtocol.proto  | 96 +++++++++-----------
 .../container/common/impl/HddsDispatcher.java   | 51 +----------
 .../CloseContainerCommandHandler.java           |  8 +-
 .../server/ratis/ContainerStateMachine.java     |  6 +-
 .../keyvalue/KeyValueContainerData.java         |  9 --
 .../container/keyvalue/KeyValueHandler.java     | 16 +---
 .../container/ozoneimpl/OzoneContainer.java     |  2 +-
 .../container/keyvalue/TestKeyValueHandler.java | 12 ++-
 .../scm/cli/container/InfoContainerHandler.java |  1 -
 .../ozone/container/ContainerTestHelper.java    | 59 ++++++------
 .../common/impl/TestCloseContainerHandler.java  | 18 ++--
 .../genesis/BenchMarkDatanodeDispatcher.java    | 19 ++--
 13 files changed, 148 insertions(+), 186 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/007e6f51/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index 36cdfc9..abad9e3 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -29,6 +29,8 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .CloseContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .DatanodeBlockID;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .GetKeyRequestProto;
@@ -86,15 +88,18 @@ public final class ContainerProtocolCalls  {
         .newBuilder()
         .setBlockID(datanodeBlockID);
     String id = xceiverClient.getPipeline().getLeader().getUuidString();
+
     ContainerCommandRequestProto request = ContainerCommandRequestProto
         .newBuilder()
         .setCmdType(Type.GetKey)
+        .setContainerID(datanodeBlockID.getContainerID())
         .setTraceID(traceID)
         .setDatanodeUuid(id)
         .setGetKey(readKeyRequest)
         .build();
     ContainerCommandResponseProto response = xceiverClient.sendCommand(request);
     validateContainerResponse(response);
+
     return response.getGetKey();
   }
 
@@ -118,7 +123,9 @@ public final class ContainerProtocolCalls  {
     String id = xceiverClient.getPipeline().getLeader().getUuidString();
     ContainerCommandRequestProto request =
         ContainerCommandRequestProto.newBuilder()
-            .setCmdType(Type.GetCommittedBlockLength).setTraceID(traceID)
+            .setCmdType(Type.GetCommittedBlockLength)
+            .setContainerID(blockID.getContainerID())
+            .setTraceID(traceID)
             .setDatanodeUuid(id)
             .setGetCommittedBlockLength(getBlockLengthRequestBuilder).build();
     ContainerCommandResponseProto response = xceiverClient.sendCommand(request);
@@ -143,6 +150,7 @@ public final class ContainerProtocolCalls  {
     ContainerCommandRequestProto request = ContainerCommandRequestProto
         .newBuilder()
         .setCmdType(Type.PutKey)
+        .setContainerID(containerKeyData.getBlockID().getContainerID())
         .setTraceID(traceID)
         .setDatanodeUuid(id)
         .setPutKey(createKeyRequest)
@@ -171,6 +179,7 @@ public final class ContainerProtocolCalls  {
     ContainerCommandRequestProto request = ContainerCommandRequestProto
         .newBuilder()
         .setCmdType(Type.ReadChunk)
+        .setContainerID(blockID.getContainerID())
         .setTraceID(traceID)
         .setDatanodeUuid(id)
         .setReadChunk(readChunkRequest)
@@ -202,6 +211,7 @@ public final class ContainerProtocolCalls  {
     ContainerCommandRequestProto request = ContainerCommandRequestProto
         .newBuilder()
         .setCmdType(Type.WriteChunk)
+        .setContainerID(blockID.getContainerID())
         .setTraceID(traceID)
         .setDatanodeUuid(id)
         .setWriteChunk(writeChunkRequest)
@@ -250,6 +260,7 @@ public final class ContainerProtocolCalls  {
     ContainerCommandRequestProto request =
         ContainerCommandRequestProto.newBuilder()
             .setCmdType(Type.PutSmallFile)
+            .setContainerID(blockID.getContainerID())
             .setTraceID(traceID)
             .setDatanodeUuid(id)
             .setPutSmallFile(putSmallFileRequest)
@@ -270,7 +281,6 @@ public final class ContainerProtocolCalls  {
     ContainerProtos.CreateContainerRequestProto.Builder createRequest =
         ContainerProtos.CreateContainerRequestProto
             .newBuilder();
-    createRequest.setContainerID(containerID);
     createRequest.setContainerType(ContainerProtos.ContainerType
         .KeyValueContainer);
 
@@ -278,6 +288,7 @@ public final class ContainerProtocolCalls  {
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.CreateContainer);
+    request.setContainerID(containerID);
     request.setCreateContainer(createRequest.build());
     request.setDatanodeUuid(id);
     request.setTraceID(traceID);
@@ -298,12 +309,13 @@ public final class ContainerProtocolCalls  {
       boolean force, String traceID) throws IOException {
     ContainerProtos.DeleteContainerRequestProto.Builder deleteRequest =
         ContainerProtos.DeleteContainerRequestProto.newBuilder();
-    deleteRequest.setContainerID(containerID);
     deleteRequest.setForceDelete(force);
     String id = client.getPipeline().getLeader().getUuidString();
+
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.DeleteContainer);
+    request.setContainerID(containerID);
     request.setDeleteContainer(deleteRequest);
     request.setTraceID(traceID);
     request.setDatanodeUuid(id);
@@ -322,15 +334,13 @@ public final class ContainerProtocolCalls  {
    */
   public static void closeContainer(XceiverClientSpi client,
       long containerID, String traceID) throws IOException {
-    ContainerProtos.CloseContainerRequestProto.Builder closeRequest =
-        ContainerProtos.CloseContainerRequestProto.newBuilder();
-    closeRequest.setContainerID(containerID);
-
     String id = client.getPipeline().getLeader().getUuidString();
+
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
     request.setCmdType(Type.CloseContainer);
-    request.setCloseContainer(closeRequest);
+    request.setContainerID(containerID);
+    request.setCloseContainer(CloseContainerRequestProto.getDefaultInstance());
     request.setTraceID(traceID);
     request.setDatanodeUuid(id);
     ContainerCommandResponseProto response =
@@ -348,19 +358,19 @@ public final class ContainerProtocolCalls  {
   public static ReadContainerResponseProto readContainer(
       XceiverClientSpi client, long containerID,
       String traceID) throws IOException {
-    ReadContainerRequestProto.Builder readRequest =
-        ReadContainerRequestProto.newBuilder();
-    readRequest.setContainerID(containerID);
     String id = client.getPipeline().getLeader().getUuidString();
+
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
     request.setCmdType(Type.ReadContainer);
-    request.setReadContainer(readRequest);
+    request.setContainerID(containerID);
+    request.setReadContainer(ReadContainerRequestProto.getDefaultInstance());
     request.setDatanodeUuid(id);
     request.setTraceID(traceID);
     ContainerCommandResponseProto response =
         client.sendCommand(request.build());
     validateContainerResponse(response);
+
     return response.getReadContainer();
   }
 
@@ -383,15 +393,18 @@ public final class ContainerProtocolCalls  {
             .newBuilder().setKey(getKey)
             .build();
     String id = client.getPipeline().getLeader().getUuidString();
+
     ContainerCommandRequestProto request = ContainerCommandRequestProto
         .newBuilder()
         .setCmdType(Type.GetSmallFile)
+        .setContainerID(blockID.getContainerID())
         .setTraceID(traceID)
         .setDatanodeUuid(id)
         .setGetSmallFile(getSmallFileRequest)
         .build();
     ContainerCommandResponseProto response = client.sendCommand(request);
     validateContainerResponse(response);
+
     return response.getGetSmallFile();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/007e6f51/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 6969fa6..af06346 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -165,80 +165,81 @@ enum ContainerLifeCycleState {
 }
 
 message ContainerCommandRequestProto {
-  required Type cmdType = 1; // Type of the command
+  required   Type cmdType = 1; // Type of the command
 
   // A string that identifies this command, we generate  Trace ID in Ozone
   // frontend and this allows us to trace that command all over ozone.
-  optional string traceID = 2;
+  optional   string traceID = 2;
+
+  required   int64 containerID = 3;
+  required   string datanodeUuid = 4;
 
   // One of the following command is available when the corresponding
   // cmdType is set. At the protocol level we allow only
   // one command in each packet.
   // TODO : Upgrade to Protobuf 2.6 or later.
-  optional   CreateContainerRequestProto createContainer = 3;
-  optional   ReadContainerRequestProto readContainer = 4;
-  optional   UpdateContainerRequestProto updateContainer = 5;
-  optional   DeleteContainerRequestProto deleteContainer = 6;
-  optional   ListContainerRequestProto listContainer = 7;
+  optional   CreateContainerRequestProto createContainer = 5;
+  optional   ReadContainerRequestProto readContainer = 6;
+  optional   UpdateContainerRequestProto updateContainer = 7;
+  optional   DeleteContainerRequestProto deleteContainer = 8;
+  optional   ListContainerRequestProto listContainer = 9;
+  optional   CloseContainerRequestProto closeContainer = 10;
+
+  optional   PutKeyRequestProto putKey = 11;
+  optional   GetKeyRequestProto getKey = 12;
+  optional   DeleteKeyRequestProto deleteKey = 13;
+  optional   ListKeyRequestProto listKey = 14;
 
-  optional   PutKeyRequestProto putKey = 8;
-  optional   GetKeyRequestProto getKey = 9;
-  optional   DeleteKeyRequestProto deleteKey = 10;
-  optional   ListKeyRequestProto listKey = 11;
+  optional   ReadChunkRequestProto readChunk = 15;
+  optional   WriteChunkRequestProto writeChunk = 16;
+  optional   DeleteChunkRequestProto deleteChunk = 17;
+  optional   ListChunkRequestProto listChunk = 18;
 
-  optional   ReadChunkRequestProto readChunk = 12;
-  optional   WriteChunkRequestProto writeChunk = 13;
-  optional   DeleteChunkRequestProto deleteChunk = 14;
-  optional   ListChunkRequestProto listChunk = 15;
+  optional   PutSmallFileRequestProto putSmallFile = 19;
+  optional   GetSmallFileRequestProto getSmallFile = 20;
 
-  optional   PutSmallFileRequestProto putSmallFile = 16;
-  optional   GetSmallFileRequestProto getSmallFile = 17;
-  optional   CloseContainerRequestProto closeContainer = 18;
-  optional   GetCommittedBlockLengthRequestProto getCommittedBlockLength = 19;
-  required   string datanodeUuid = 20;
+  optional   GetCommittedBlockLengthRequestProto getCommittedBlockLength = 21;
 }
 
 message ContainerCommandResponseProto {
-  required Type cmdType = 1;
-  optional string traceID = 2;
+  required   Type cmdType = 1;
+  optional   string traceID = 2;
 
-  optional   CreateContainerResponseProto createContainer = 3;
-  optional   ReadContainerResponseProto readContainer = 4;
-  optional   UpdateContainerResponseProto updateContainer = 5;
-  optional   DeleteContainerResponseProto deleteContainer = 6;
-  optional   ListContainerResponseProto listContainer = 7;
+  required   Result result = 3;
+  optional   string message = 4;
 
-  optional   PutKeyResponseProto putKey = 8;
-  optional   GetKeyResponseProto getKey = 9;
-  optional   DeleteKeyResponseProto deleteKey = 10;
-  optional   ListKeyResponseProto listKey = 11;
+  optional   CreateContainerResponseProto createContainer = 5;
+  optional   ReadContainerResponseProto readContainer = 6;
+  optional   UpdateContainerResponseProto updateContainer = 7;
+  optional   DeleteContainerResponseProto deleteContainer = 8;
+  optional   ListContainerResponseProto listContainer = 9;
+  optional   CloseContainerResponseProto closeContainer = 10;
 
-  optional  WriteChunkResponseProto writeChunk = 12;
-  optional  ReadChunkResponseProto readChunk = 13;
-  optional  DeleteChunkResponseProto deleteChunk = 14;
-  optional  ListChunkResponseProto listChunk = 15;
+  optional   PutKeyResponseProto putKey = 11;
+  optional   GetKeyResponseProto getKey = 12;
+  optional   DeleteKeyResponseProto deleteKey = 13;
+  optional   ListKeyResponseProto listKey = 14;
 
-  required Result result = 17;
-  optional string message = 18;
+  optional   WriteChunkResponseProto writeChunk = 15;
+  optional   ReadChunkResponseProto readChunk = 16;
+  optional   DeleteChunkResponseProto deleteChunk = 17;
+  optional   ListChunkResponseProto listChunk = 18;
 
-  optional PutSmallFileResponseProto putSmallFile = 19;
-  optional GetSmallFileResponseProto getSmallFile = 20;
-  optional CloseContainerResponseProto closeContainer = 21;
-  optional GetCommittedBlockLengthResponseProto getCommittedBlockLength = 22;
+  optional   PutSmallFileResponseProto putSmallFile = 19;
+  optional   GetSmallFileResponseProto getSmallFile = 20;
 
+  optional GetCommittedBlockLengthResponseProto getCommittedBlockLength = 21;
 }
 
 message ContainerData {
   required int64 containerID = 1;
   repeated KeyValue metadata = 2;
-  optional string dbPath = 3;
   optional string containerPath = 4;
   optional int64 bytesUsed = 6;
   optional int64 size = 7;
-  optional int64 keyCount = 8;
+  optional int64 blockCount = 8;
   optional ContainerLifeCycleState state = 9 [default = OPEN];
   optional ContainerType containerType = 10 [default = KeyValueContainer];
-  optional string containerDBType = 11;
 }
 
 enum ContainerType {
@@ -248,7 +249,6 @@ enum ContainerType {
 
 // Container Messages.
 message  CreateContainerRequestProto {
-  required int64 containerID = 1;
   repeated KeyValue metadata = 2;
   optional ContainerType containerType = 3 [default = KeyValueContainer];
 }
@@ -257,7 +257,6 @@ message  CreateContainerResponseProto {
 }
 
 message  ReadContainerRequestProto {
-  required int64 containerID = 1;
 }
 
 message  ReadContainerResponseProto {
@@ -265,7 +264,6 @@ message  ReadContainerResponseProto {
 }
 
 message  UpdateContainerRequestProto {
-  required int64 containerID = 1;
   repeated KeyValue metadata = 2;
   optional bool forceUpdate = 3 [default = false];
 }
@@ -274,7 +272,6 @@ message  UpdateContainerResponseProto {
 }
 
 message  DeleteContainerRequestProto {
-  required int64 containerID = 1;
   optional bool forceDelete = 2 [default = false];
 }
 
@@ -282,7 +279,6 @@ message  DeleteContainerResponseProto {
 }
 
 message  ListContainerRequestProto {
-  required int64 startContainerID = 1;
   optional uint32 count = 2; // Max Results to return
 }
 
@@ -291,7 +287,6 @@ message  ListContainerResponseProto {
 }
 
 message CloseContainerRequestProto {
-  required int64 containerID = 1;
 }
 
 message CloseContainerResponseProto {
@@ -341,7 +336,6 @@ message   DeleteKeyResponseProto {
 }
 
 message  ListKeyRequestProto {
-  required int64 containerID = 1;
   optional int64 startLocalID = 2;
   required uint32 count = 3;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/007e6f51/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 6d11abb..3d418e5 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -98,13 +98,16 @@ public class HddsDispatcher implements ContainerDispatcher {
     long startTime = System.nanoTime();
     ContainerProtos.Type cmdType = msg.getCmdType();
     try {
-      long containerID = getContainerID(msg);
+      long containerID = msg.getContainerID();
 
       metrics.incContainerOpsMetrics(cmdType);
       if (cmdType != ContainerProtos.Type.CreateContainer) {
         container = getContainer(containerID);
         containerType = getContainerType(container);
       } else {
+        if (!msg.hasCreateContainer()) {
+          return ContainerUtils.malformedRequest(msg);
+        }
         containerType = msg.getCreateContainer().getContainerType();
       }
     } catch (StorageContainerException ex) {
@@ -143,52 +146,6 @@ public class HddsDispatcher implements ContainerDispatcher {
     }
   }
 
-  private long getContainerID(ContainerCommandRequestProto request)
-      throws StorageContainerException {
-    ContainerProtos.Type cmdType = request.getCmdType();
-
-    switch(cmdType) {
-    case CreateContainer:
-      return request.getCreateContainer().getContainerID();
-    case ReadContainer:
-      return request.getReadContainer().getContainerID();
-    case UpdateContainer:
-      return request.getUpdateContainer().getContainerID();
-    case DeleteContainer:
-      return request.getDeleteContainer().getContainerID();
-    case ListContainer:
-      return request.getListContainer().getStartContainerID();
-    case CloseContainer:
-      return request.getCloseContainer().getContainerID();
-    case PutKey:
-      return request.getPutKey().getKeyData().getBlockID().getContainerID();
-    case GetKey:
-      return request.getGetKey().getBlockID().getContainerID();
-    case DeleteKey:
-      return request.getDeleteKey().getBlockID().getContainerID();
-    case ListKey:
-      return request.getListKey().getContainerID();
-    case ReadChunk:
-      return request.getReadChunk().getBlockID().getContainerID();
-    case DeleteChunk:
-      return request.getDeleteChunk().getBlockID().getContainerID();
-    case WriteChunk:
-      return request.getWriteChunk().getBlockID().getContainerID();
-    case ListChunk:
-      return request.getListChunk().getBlockID().getContainerID();
-    case PutSmallFile:
-      return request.getPutSmallFile().getKey().getKeyData().getBlockID()
-          .getContainerID();
-    case GetSmallFile:
-      return request.getGetSmallFile().getKey().getBlockID().getContainerID();
-    case GetCommittedBlockLength:
-      return request.getGetCommittedBlockLength().getBlockID().getContainerID();
-    }
-
-    throw new StorageContainerException(
-        ContainerProtos.Result.UNSUPPORTED_REQUEST);
-  }
-
   @VisibleForTesting
   public Container getContainer(long containerID)
       throws StorageContainerException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/007e6f51/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
index f58cbae..a3bddfc 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
@@ -76,14 +76,12 @@ public class CloseContainerCommandHandler implements CommandHandler {
       HddsProtos.ReplicationType replicationType =
           closeContainerProto.getReplicationType();
 
-      ContainerProtos.CloseContainerRequestProto.Builder closeRequest =
-          ContainerProtos.CloseContainerRequestProto.newBuilder();
-      closeRequest.setContainerID(containerID);
-
       ContainerProtos.ContainerCommandRequestProto.Builder request =
           ContainerProtos.ContainerCommandRequestProto.newBuilder();
       request.setCmdType(ContainerProtos.Type.CloseContainer);
-      request.setCloseContainer(closeRequest);
+      request.setContainerID(containerID);
+      request.setCloseContainer(
+          ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
       request.setTraceID(UUID.randomUUID().toString());
       request.setDatanodeUuid(
           context.getParent().getDatanodeDetails().getUuidString());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/007e6f51/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index fc7635e..ac7aa57 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -207,8 +207,7 @@ public class ContainerStateMachine extends BaseStateMachine {
 
   private CompletableFuture<Message> handleCreateContainer(
       ContainerCommandRequestProto requestProto) {
-    long containerID =
-        requestProto.getCreateContainer().getContainerID();
+    long containerID = requestProto.getContainerID();
     createContainerFutureMap.
         computeIfAbsent(containerID, k -> new CompletableFuture<>());
     return CompletableFuture.completedFuture(() -> ByteString.EMPTY);
@@ -264,8 +263,7 @@ public class ContainerStateMachine extends BaseStateMachine {
       } else {
         Message message = runCommand(requestProto);
         if (cmdType == ContainerProtos.Type.CreateContainer) {
-          long containerID =
-              requestProto.getCreateContainer().getContainerID();
+          long containerID = requestProto.getContainerID();
           createContainerFutureMap.remove(containerID).complete(message);
         }
         return CompletableFuture.completedFuture(message);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/007e6f51/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
index 34035c8..0705cf4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
@@ -177,7 +177,6 @@ public class KeyValueContainerData extends ContainerData {
     ContainerProtos.ContainerData.Builder builder = ContainerProtos
         .ContainerData.newBuilder();
     builder.setContainerID(this.getContainerID());
-    builder.setDbPath(this.getDbFile().getPath());
     builder.setContainerPath(this.getMetadataPath());
     builder.setState(this.getState());
 
@@ -196,10 +195,6 @@ public class KeyValueContainerData extends ContainerData {
       builder.setContainerType(ContainerProtos.ContainerType.KeyValueContainer);
     }
 
-    if(this.getContainerDBType() != null) {
-      builder.setContainerDBType(containerDBType);
-    }
-
     return builder.build();
   }
 
@@ -239,10 +234,6 @@ public class KeyValueContainerData extends ContainerData {
       data.setBytesUsed(protoData.getBytesUsed());
     }
 
-    if(protoData.hasContainerDBType()) {
-      data.setContainerDBType(protoData.getContainerDBType());
-    }
-
     return data;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/007e6f51/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 0b26a14..a4e124b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -231,13 +231,7 @@ public class KeyValueHandler extends Handler {
     // container would be created here.
     Preconditions.checkArgument(kvContainer == null);
 
-    CreateContainerRequestProto createContainerReq =
-        request.getCreateContainer();
-    long containerID = createContainerReq.getContainerID();
-    if (createContainerReq.hasContainerType()) {
-      Preconditions.checkArgument(createContainerReq.getContainerType()
-          .equals(ContainerType.KeyValueContainer));
-    }
+    long containerID = request.getContainerID();
 
     KeyValueContainerData newContainerData = new KeyValueContainerData(
         containerID, maxContainerSizeGB);
@@ -381,15 +375,15 @@ public class KeyValueHandler extends Handler {
     try {
       checkContainerOpen(kvContainer);
 
+      KeyValueContainerData kvData = kvContainer.getContainerData();
+
       // remove the container from open block map once, all the blocks
       // have been committed and the container is closed
-      kvContainer.getContainerData()
-          .setState(ContainerProtos.ContainerLifeCycleState.CLOSING);
+      kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSING);
       commitPendingKeys(kvContainer);
       kvContainer.close();
       // make sure the the container open keys from BlockMap gets removed
-      openContainerBlockMap.removeContainer(
-          request.getCloseContainer().getContainerID());
+      openContainerBlockMap.removeContainer(kvData.getContainerID());
     } catch (StorageContainerException ex) {
       return ContainerUtils.logAndReturnError(LOG, ex, request);
     } catch (IOException ex) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/007e6f51/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index 8f067d9..30fe113 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -196,7 +196,7 @@ public class OzoneContainer {
     ContainerProtos.Type type = request.getCmdType();
     switch (type) {
     case CloseContainer:
-      return request.getCloseContainer().getContainerID();
+      return request.getContainerID();
       // Right now, we handle only closeContainer via queuing it over the
       // over the XceiVerServer. For all other commands we throw Illegal
       // argument exception here. Will need to extend the switch cases

http://git-wip-us.apache.org/repos/asf/hadoop/blob/007e6f51/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index 42ec54f..8e8a1be 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
+import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TestRule;
@@ -55,6 +56,7 @@ public class TestKeyValueHandler {
   private final String baseDir = MiniDFSCluster.getBaseDirectory();
   private final String volume = baseDir + "disk1";
 
+  private static final long DUMMY_CONTAINER_ID = 9999;
 
   @Test
   /**
@@ -74,8 +76,13 @@ public class TestKeyValueHandler {
 
     // Test Create Container Request handling
     ContainerCommandRequestProto createContainerRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.CreateContainer);
-
+        ContainerProtos.ContainerCommandRequestProto.newBuilder()
+            .setCmdType(ContainerProtos.Type.CreateContainer)
+            .setContainerID(DUMMY_CONTAINER_ID)
+            .setDatanodeUuid(DATANODE_UUID)
+            .setCreateContainer(ContainerProtos.CreateContainerRequestProto
+                .getDefaultInstance())
+            .build();
     dispatcher.dispatch(createContainerRequest);
     Mockito.verify(handler, times(1)).handleCreateContainer(
         any(ContainerCommandRequestProto.class), any());
@@ -191,6 +198,7 @@ public class TestKeyValueHandler {
     ContainerCommandRequestProto request =
         ContainerProtos.ContainerCommandRequestProto.newBuilder()
             .setCmdType(cmdType)
+            .setContainerID(DUMMY_CONTAINER_ID)
             .setDatanodeUuid(DATANODE_UUID)
             .build();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/007e6f51/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
index 3716ace..89215fa 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
@@ -81,7 +81,6 @@ public class InfoContainerHandler extends OzoneCommandHandler {
         containerData.getState() == ContainerLifeCycleState.OPEN ? "OPEN" :
             "CLOSED";
     logOut("Container State: %s", openStatus);
-    logOut("Container DB Path: %s", containerData.getDbPath());
     logOut("Container Path: %s", containerData.getContainerPath());
 
     // Output meta data.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/007e6f51/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index d25b73e..f3980a5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -220,6 +220,7 @@ public final class ContainerTestHelper {
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.WriteChunk);
+    request.setContainerID(blockID.getContainerID());
     request.setWriteChunk(writeRequest);
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
@@ -261,6 +262,7 @@ public final class ContainerTestHelper {
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.PutSmallFile);
+    request.setContainerID(blockID.getContainerID());
     request.setPutSmallFile(smallFileRequest);
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
@@ -279,6 +281,7 @@ public final class ContainerTestHelper {
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.GetSmallFile);
+    request.setContainerID(getKey.getGetKey().getBlockID().getContainerID());
     request.setGetSmallFile(smallFileRequest);
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
@@ -308,6 +311,7 @@ public final class ContainerTestHelper {
     ContainerCommandRequestProto.Builder newRequest =
         ContainerCommandRequestProto.newBuilder();
     newRequest.setCmdType(ContainerProtos.Type.ReadChunk);
+    newRequest.setContainerID(readRequest.getBlockID().getContainerID());
     newRequest.setReadChunk(readRequest);
     newRequest.setTraceID(UUID.randomUUID().toString());
     newRequest.setDatanodeUuid(pipeline.getLeader().getUuidString());
@@ -340,6 +344,7 @@ public final class ContainerTestHelper {
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.DeleteChunk);
+    request.setContainerID(writeRequest.getBlockID().getContainerID());
     request.setDeleteChunk(deleteRequest);
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
@@ -356,15 +361,12 @@ public final class ContainerTestHelper {
       long containerID, Pipeline pipeline) throws IOException {
     LOG.trace("addContainer: {}", containerID);
 
-    ContainerProtos.CreateContainerRequestProto.Builder createRequest =
-        ContainerProtos.CreateContainerRequestProto
-            .newBuilder();
-    createRequest.setContainerID(containerID);
-
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.CreateContainer);
-    request.setCreateContainer(createRequest);
+    request.setContainerID(containerID);
+    request.setCreateContainer(
+        ContainerProtos.CreateContainerRequestProto.getDefaultInstance());
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
 
@@ -385,7 +387,6 @@ public final class ContainerTestHelper {
       long containerID, Map<String, String> metaData) throws IOException {
     ContainerProtos.UpdateContainerRequestProto.Builder updateRequestBuilder =
         ContainerProtos.UpdateContainerRequestProto.newBuilder();
-    updateRequestBuilder.setContainerID(containerID);
     String[] keys = metaData.keySet().toArray(new String[]{});
     for(int i=0; i<keys.length; i++) {
       KeyValue.Builder kvBuilder = KeyValue.newBuilder();
@@ -399,6 +400,7 @@ public final class ContainerTestHelper {
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.UpdateContainer);
+    request.setContainerID(containerID);
     request.setUpdateContainer(updateRequestBuilder.build());
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
@@ -412,14 +414,13 @@ public final class ContainerTestHelper {
    */
   public static ContainerCommandResponseProto
       getCreateContainerResponse(ContainerCommandRequestProto request) {
-    ContainerProtos.CreateContainerResponseProto.Builder createResponse =
-        ContainerProtos.CreateContainerResponseProto.newBuilder();
 
     ContainerCommandResponseProto.Builder response =
         ContainerCommandResponseProto.newBuilder();
     response.setCmdType(ContainerProtos.Type.CreateContainer);
     response.setTraceID(request.getTraceID());
-    response.setCreateContainer(createResponse.build());
+    response.setCreateContainer(
+        ContainerProtos.CreateContainerResponseProto.getDefaultInstance());
     response.setResult(ContainerProtos.Result.SUCCESS);
     return response.build();
   }
@@ -448,6 +449,7 @@ public final class ContainerTestHelper {
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.PutKey);
+    request.setContainerID(keyData.getContainerID());
     request.setPutKey(putRequest);
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
@@ -474,6 +476,7 @@ public final class ContainerTestHelper {
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.GetKey);
+    request.setContainerID(blockID.getContainerID());
     request.setGetKey(getRequest);
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
@@ -501,14 +504,16 @@ public final class ContainerTestHelper {
    */
   public static ContainerCommandRequestProto getDeleteKeyRequest(
       Pipeline pipeline, ContainerProtos.PutKeyRequestProto putKeyRequest) {
-    LOG.trace("deleteKey: name={}",
-        putKeyRequest.getKeyData().getBlockID());
+    ContainerProtos.DatanodeBlockID blockID = putKeyRequest.getKeyData()
+        .getBlockID();
+    LOG.trace("deleteKey: name={}", blockID);
     ContainerProtos.DeleteKeyRequestProto.Builder delRequest =
         ContainerProtos.DeleteKeyRequestProto.newBuilder();
-    delRequest.setBlockID(putKeyRequest.getKeyData().getBlockID());
+    delRequest.setBlockID(blockID);
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.DeleteKey);
+    request.setContainerID(blockID.getContainerID());
     request.setDeleteKey(delRequest);
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
@@ -523,12 +528,12 @@ public final class ContainerTestHelper {
    */
   public static ContainerCommandRequestProto getCloseContainer(
       Pipeline pipeline, long containerID) {
-    ContainerProtos.CloseContainerRequestProto closeRequest =
-        ContainerProtos.CloseContainerRequestProto.newBuilder().
-            setContainerID(containerID).build();
     ContainerProtos.ContainerCommandRequestProto cmd =
-        ContainerCommandRequestProto.newBuilder().setCmdType(ContainerProtos
-            .Type.CloseContainer).setCloseContainer(closeRequest)
+        ContainerCommandRequestProto.newBuilder()
+            .setCmdType(ContainerProtos.Type.CloseContainer)
+            .setContainerID(containerID)
+            .setCloseContainer(
+                ContainerProtos.CloseContainerRequestProto.getDefaultInstance())
             .setTraceID(UUID.randomUUID().toString())
             .setDatanodeUuid(pipeline.getLeader().getUuidString())
             .build();
@@ -545,14 +550,14 @@ public final class ContainerTestHelper {
   public static ContainerCommandRequestProto getRequestWithoutTraceId(
       Pipeline pipeline, long containerID) {
     Preconditions.checkNotNull(pipeline);
-    ContainerProtos.CloseContainerRequestProto closeRequest =
-            ContainerProtos.CloseContainerRequestProto.newBuilder().
-                setContainerID(containerID).build();
     ContainerProtos.ContainerCommandRequestProto cmd =
-            ContainerCommandRequestProto.newBuilder().setCmdType(ContainerProtos
-                    .Type.CloseContainer).setCloseContainer(closeRequest)
-                    .setDatanodeUuid(pipeline.getLeader().getUuidString())
-                    .build();
+        ContainerCommandRequestProto.newBuilder()
+            .setCmdType(ContainerProtos.Type.CloseContainer)
+            .setContainerID(containerID)
+            .setCloseContainer(
+                ContainerProtos.CloseContainerRequestProto.getDefaultInstance())
+            .setDatanodeUuid(pipeline.getLeader().getUuidString())
+            .build();
     return cmd;
   }
 
@@ -566,10 +571,12 @@ public final class ContainerTestHelper {
     Preconditions.checkNotNull(pipeline);
     ContainerProtos.DeleteContainerRequestProto deleteRequest =
         ContainerProtos.DeleteContainerRequestProto.newBuilder().
-            setContainerID(containerID).
             setForceDelete(forceDelete).build();
     return ContainerCommandRequestProto.newBuilder()
         .setCmdType(ContainerProtos.Type.DeleteContainer)
+        .setContainerID(containerID)
+        .setDeleteContainer(
+            ContainerProtos.DeleteContainerRequestProto.getDefaultInstance())
         .setDeleteContainer(deleteRequest)
         .setTraceID(UUID.randomUUID().toString())
         .setDatanodeUuid(pipeline.getLeader().getUuidString())

http://git-wip-us.apache.org/repos/asf/hadoop/blob/007e6f51/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
index 6d1c086..d67cf88 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
@@ -108,16 +108,14 @@ public class TestCloseContainerHandler {
 
   private long createContainer() {
     long testContainerId = ContainerTestHelper.getTestContainerID();
-    ContainerProtos.CreateContainerRequestProto createReq =
-        ContainerProtos.CreateContainerRequestProto.newBuilder()
-            .setContainerID(testContainerId)
-            .build();
 
     ContainerProtos.ContainerCommandRequestProto request =
         ContainerProtos.ContainerCommandRequestProto.newBuilder()
             .setCmdType(ContainerProtos.Type.CreateContainer)
+            .setContainerID(testContainerId)
             .setDatanodeUuid(DATANODE_UUID)
-            .setCreateContainer(createReq)
+            .setCreateContainer(ContainerProtos.CreateContainerRequestProto
+                .getDefaultInstance())
             .build();
 
     dispatcher.dispatch(request);
@@ -143,6 +141,7 @@ public class TestCloseContainerHandler {
       ContainerProtos.ContainerCommandRequestProto.Builder request =
           ContainerProtos.ContainerCommandRequestProto.newBuilder();
       request.setCmdType(ContainerProtos.Type.WriteChunk);
+      request.setContainerID(blockID.getContainerID());
       request.setWriteChunk(writeRequest);
       request.setTraceID(UUID.randomUUID().toString());
       request.setDatanodeUuid(pipeline.getLeader().getUuidString());
@@ -177,6 +176,7 @@ public class TestCloseContainerHandler {
     ContainerProtos.ContainerCommandRequestProto.Builder request =
         ContainerProtos.ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.PutKey);
+    request.setContainerID(blockID.getContainerID());
     request.setPutKey(putKeyRequestProto);
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
@@ -213,6 +213,7 @@ public class TestCloseContainerHandler {
     ContainerProtos.ContainerCommandRequestProto.Builder request =
         ContainerProtos.ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.DeleteChunk);
+    request.setContainerID(blockID.getContainerID());
     request.setDeleteChunk(deleteChunkProto);
     request.setWriteChunk(writeRequest);
     request.setTraceID(UUID.randomUUID().toString());
@@ -242,13 +243,12 @@ public class TestCloseContainerHandler {
             .get(blockID.getLocalID()));
     Assert.assertTrue(
         keyData.getChunks().size() == chunkList.size());
-    ContainerProtos.CloseContainerRequestProto.Builder closeContainerProto =
-        ContainerProtos.CloseContainerRequestProto.newBuilder();
-    closeContainerProto.setContainerID(blockID.getContainerID());
     ContainerProtos.ContainerCommandRequestProto.Builder request =
         ContainerProtos.ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.CloseContainer);
-    request.setCloseContainer(closeContainerProto);
+    request.setContainerID(blockID.getContainerID());
+    request.setCloseContainer(
+        ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
     dispatcher.dispatch(request.build());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/007e6f51/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
index 93e7ef1..e757a7f 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
@@ -51,8 +51,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .CreateContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ReadChunkRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .WriteChunkRequestProto;
@@ -156,15 +154,14 @@ public class BenchMarkDatanodeDispatcher {
     FileUtils.deleteDirectory(new File(baseDir));
   }
 
-  private ContainerCommandRequestProto getCreateContainerCommand(long containerID) {
-    CreateContainerRequestProto.Builder createRequest =
-        CreateContainerRequestProto.newBuilder();
-    createRequest.setContainerID(containerID).build();
-
+  private ContainerCommandRequestProto getCreateContainerCommand(
+      long containerID) {
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.CreateContainer);
-    request.setCreateContainer(createRequest);
+    request.setContainerID(containerID);
+    request.setCreateContainer(
+        ContainerProtos.CreateContainerRequestProto.getDefaultInstance());
     request.setDatanodeUuid(datanodeUuid);
     request.setTraceID(containerID + "-trace");
     return request.build();
@@ -181,6 +178,7 @@ public class BenchMarkDatanodeDispatcher {
     ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
         .newBuilder();
     request.setCmdType(ContainerProtos.Type.WriteChunk)
+        .setContainerID(blockID.getContainerID())
         .setTraceID(getBlockTraceID(blockID))
         .setDatanodeUuid(datanodeUuid)
         .setWriteChunk(writeChunkRequest);
@@ -193,9 +191,11 @@ public class BenchMarkDatanodeDispatcher {
         .newBuilder()
         .setBlockID(blockID.getDatanodeBlockIDProtobuf())
         .setChunkData(getChunkInfo(blockID, chunkName));
+
     ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
         .newBuilder();
     request.setCmdType(ContainerProtos.Type.ReadChunk)
+        .setContainerID(blockID.getContainerID())
         .setTraceID(getBlockTraceID(blockID))
         .setDatanodeUuid(datanodeUuid)
         .setReadChunk(readChunkRequest);
@@ -219,9 +219,11 @@ public class BenchMarkDatanodeDispatcher {
     PutKeyRequestProto.Builder putKeyRequest = PutKeyRequestProto
         .newBuilder()
         .setKeyData(getKeyData(blockID, chunkKey));
+
     ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
         .newBuilder();
     request.setCmdType(ContainerProtos.Type.PutKey)
+        .setContainerID(blockID.getContainerID())
         .setTraceID(getBlockTraceID(blockID))
         .setDatanodeUuid(datanodeUuid)
         .setPutKey(putKeyRequest);
@@ -234,6 +236,7 @@ public class BenchMarkDatanodeDispatcher {
     ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
         .newBuilder()
         .setCmdType(ContainerProtos.Type.GetKey)
+        .setContainerID(blockID.getContainerID())
         .setTraceID(getBlockTraceID(blockID))
         .setDatanodeUuid(datanodeUuid)
         .setGetKey(readKeyRequest);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: YARN-8591. [ATSv2] NPE while checking for entity acl in non-secure cluster. Contributed by Rohith Sharma K S.

Posted by bo...@apache.org.
YARN-8591. [ATSv2] NPE while checking for entity acl in non-secure cluster. Contributed by Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63e08ec0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63e08ec0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63e08ec0

Branch: refs/heads/YARN-7402
Commit: 63e08ec071852640babea9e39780327a0907712a
Parents: 0857f11
Author: Sunil G <su...@apache.org>
Authored: Mon Jul 30 14:48:04 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Mon Jul 30 14:48:04 2018 +0530

----------------------------------------------------------------------
 .../server/timelineservice/reader/TimelineReaderWebServices.java | 3 ++-
 .../reader/TestTimelineReaderWebServicesBasicAcl.java            | 4 ++++
 2 files changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63e08ec0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
index 7f96bfb..b10b705 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
@@ -3532,7 +3532,8 @@ public class TimelineReaderWebServices {
   static boolean checkAccess(TimelineReaderManager readerManager,
       UserGroupInformation ugi, String entityUser) {
     if (isDisplayEntityPerUserFilterEnabled(readerManager.getConfig())) {
-      if (!validateAuthUserWithEntityUser(readerManager, ugi, entityUser)) {
+      if (ugi != null && !validateAuthUserWithEntityUser(readerManager, ugi,
+          entityUser)) {
         String userName = ugi.getShortUserName();
         String msg = "User " + userName
             + " is not allowed to read TimelineService V2 data.";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63e08ec0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
index 4239bf0..6651457 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
@@ -88,6 +88,10 @@ public class TestTimelineReaderWebServicesBasicAcl {
     Assert.assertFalse(TimelineReaderWebServices
         .validateAuthUserWithEntityUser(manager, null, user1));
 
+    // true because ugi is null
+    Assert.assertTrue(
+        TimelineReaderWebServices.checkAccess(manager, null, user1));
+
     // incoming ugi is admin asking for entity owner user1
     Assert.assertTrue(
         TimelineReaderWebServices.checkAccess(manager, adminUgi, user1));


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: HDDS-226. Client should update block length in OM while committing the key. Contributed by Shashikant Banerjee.

Posted by bo...@apache.org.
HDDS-226. Client should update block length in OM while committing the key. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4db753b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4db753b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4db753b

Branch: refs/heads/YARN-7402
Commit: f4db753bb6b4648c583722dbe8108973c23ba06f
Parents: 6310c0d
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Wed Aug 1 09:02:43 2018 +0530
Committer: Mukul Kumar Singh <ms...@apache.org>
Committed: Wed Aug 1 09:03:00 2018 +0530

----------------------------------------------------------------------
 .../ozone/client/io/ChunkGroupOutputStream.java | 22 +++++++++++-
 .../hadoop/ozone/om/helpers/OmKeyArgs.java      | 26 ++++++++++++---
 .../hadoop/ozone/om/helpers/OmKeyInfo.java      | 29 ++++++++++++++--
 .../ozone/om/helpers/OmKeyLocationInfo.java     |  6 +++-
 ...neManagerProtocolClientSideTranslatorPB.java |  8 ++++-
 .../src/main/proto/OzoneManagerProtocol.proto   |  1 +
 .../ozone/client/rpc/TestOzoneRpcClient.java    | 35 ++++++++++++++++++++
 .../hadoop/ozone/om/TestOmBlockVersioning.java  | 13 +++++++-
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  |  4 +++
 ...neManagerProtocolServerSideTranslatorPB.java |  5 ++-
 10 files changed, 138 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4db753b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index 9443317..83b4dfd 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -76,7 +76,7 @@ public class ChunkGroupOutputStream extends OutputStream {
   private final int chunkSize;
   private final String requestID;
   private boolean closed;
-
+  private List<OmKeyLocationInfo> locationInfoList;
   /**
    * A constructor for testing purpose only.
    */
@@ -91,6 +91,7 @@ public class ChunkGroupOutputStream extends OutputStream {
     chunkSize = 0;
     requestID = null;
     closed = false;
+    locationInfoList = null;
   }
 
   /**
@@ -133,6 +134,7 @@ public class ChunkGroupOutputStream extends OutputStream {
     this.xceiverClientManager = xceiverClientManager;
     this.chunkSize = chunkSize;
     this.requestID = requestId;
+    this.locationInfoList = new ArrayList<>();
     LOG.debug("Expecting open key with one block, but got" +
         info.getKeyLocationVersions().size());
   }
@@ -196,8 +198,19 @@ public class ChunkGroupOutputStream extends OutputStream {
     streamEntries.add(new ChunkOutputStreamEntry(subKeyInfo.getBlockID(),
         keyArgs.getKeyName(), xceiverClientManager, xceiverClient, requestID,
         chunkSize, subKeyInfo.getLength()));
+    // reset the original length to zero here. It will be updated as and when
+    // the data gets written.
+    subKeyInfo.setLength(0);
+    locationInfoList.add(subKeyInfo);
   }
 
+  private void incrementBlockLength(int index, long length) {
+    if (locationInfoList != null) {
+      OmKeyLocationInfo locationInfo = locationInfoList.get(index);
+      long originalLength = locationInfo.getLength();
+      locationInfo.setLength(originalLength + length);
+    }
+  }
 
   @VisibleForTesting
   public long getByteOffset() {
@@ -222,6 +235,7 @@ public class ChunkGroupOutputStream extends OutputStream {
     }
     ChunkOutputStreamEntry entry = streamEntries.get(currentStreamIndex);
     entry.write(b);
+    incrementBlockLength(currentStreamIndex, 1);
     if (entry.getRemaining() <= 0) {
       currentStreamIndex += 1;
     }
@@ -276,6 +290,7 @@ public class ChunkGroupOutputStream extends OutputStream {
       ChunkOutputStreamEntry current = streamEntries.get(currentStreamIndex);
       int writeLen = Math.min(len, (int)current.getRemaining());
       current.write(b, off, writeLen);
+      incrementBlockLength(currentStreamIndex, writeLen);
       if (current.getRemaining() <= 0) {
         currentStreamIndex += 1;
       }
@@ -328,8 +343,13 @@ public class ChunkGroupOutputStream extends OutputStream {
     }
     if (keyArgs != null) {
       // in test, this could be null
+      long length =
+          locationInfoList.parallelStream().mapToLong(e -> e.getLength()).sum();
+      Preconditions.checkState(byteOffset == length);
       keyArgs.setDataSize(byteOffset);
+      keyArgs.setLocationInfoList(locationInfoList);
       omClient.commitKey(keyArgs, openID);
+      locationInfoList = null;
     } else {
       LOG.warn("Closing ChunkGroupOutputStream, but key args is null");
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4db753b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
index 1f8ed5f..aab35c5 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.ozone.om.helpers;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 
+import java.util.List;
+
 /**
  * Args for key. Client use this to specify key's attributes on  key creation
  * (putKey()).
@@ -30,15 +32,18 @@ public final class OmKeyArgs {
   private long dataSize;
   private final ReplicationType type;
   private final ReplicationFactor factor;
+  private List<OmKeyLocationInfo> locationInfoList;
 
   private OmKeyArgs(String volumeName, String bucketName, String keyName,
-                    long dataSize, ReplicationType type, ReplicationFactor factor) {
+      long dataSize, ReplicationType type, ReplicationFactor factor,
+      List<OmKeyLocationInfo> locationInfoList) {
     this.volumeName = volumeName;
     this.bucketName = bucketName;
     this.keyName = keyName;
     this.dataSize = dataSize;
     this.type = type;
     this.factor = factor;
+    this.locationInfoList = locationInfoList;
   }
 
   public ReplicationType getType() {
@@ -69,6 +74,14 @@ public final class OmKeyArgs {
     dataSize = size;
   }
 
+  public void setLocationInfoList(List<OmKeyLocationInfo> locationInfoList) {
+    this.locationInfoList = locationInfoList;
+  }
+
+  public List<OmKeyLocationInfo> getLocationInfoList() {
+    return locationInfoList;
+  }
+
   /**
    * Builder class of OmKeyArgs.
    */
@@ -79,7 +92,7 @@ public final class OmKeyArgs {
     private long dataSize;
     private ReplicationType type;
     private ReplicationFactor factor;
-
+    private List<OmKeyLocationInfo> locationInfoList;
 
     public Builder setVolumeName(String volume) {
       this.volumeName = volume;
@@ -111,9 +124,14 @@ public final class OmKeyArgs {
       return this;
     }
 
+    public Builder setLocationInfoList(List<OmKeyLocationInfo> locationInfos) {
+      this.locationInfoList = locationInfos;
+      return this;
+    }
+
     public OmKeyArgs build() {
-      return new OmKeyArgs(volumeName, bucketName, keyName, dataSize,
-          type, factor);
+      return new OmKeyArgs(volumeName, bucketName, keyName, dataSize, type,
+          factor, locationInfoList);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4db753b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index 05c8d45..3603964 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -101,8 +101,7 @@ public final class OmKeyInfo {
     this.dataSize = size;
   }
 
-  public synchronized OmKeyLocationInfoGroup getLatestVersionLocations()
-      throws IOException {
+  public synchronized OmKeyLocationInfoGroup getLatestVersionLocations() {
     return keyLocationVersions.size() == 0? null :
         keyLocationVersions.get(keyLocationVersions.size() - 1);
   }
@@ -116,6 +115,32 @@ public final class OmKeyInfo {
   }
 
   /**
+   * updates the length of the each block in the list given.
+   * This will be called when the key is being committed to OzoneManager.
+   *
+   * @param locationInfoList list of locationInfo
+   * @throws IOException
+   */
+  public void updateLocationInfoList(List<OmKeyLocationInfo> locationInfoList) {
+    OmKeyLocationInfoGroup keyLocationInfoGroup = getLatestVersionLocations();
+    List<OmKeyLocationInfo> currentList =
+        keyLocationInfoGroup.getLocationList();
+    Preconditions.checkNotNull(keyLocationInfoGroup);
+    Preconditions.checkState(locationInfoList.size() <= currentList.size());
+    for (OmKeyLocationInfo current : currentList) {
+      // For Versioning, while committing the key for the newer version,
+      // we just need to update the lengths for new blocks. Need to iterate over
+      // and find the new blocks added in the latest version.
+      for (OmKeyLocationInfo info : locationInfoList) {
+        if (info.getBlockID().equals(current.getBlockID())) {
+          current.setLength(info.getLength());
+          break;
+        }
+      }
+    }
+  }
+
+  /**
    * Append a set of blocks to the latest version. Note that these blocks are
    * part of the latest version, not a new version.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4db753b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
index 3f6666d..fae92f8 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
@@ -27,7 +27,7 @@ public final class OmKeyLocationInfo {
   private final BlockID blockID;
   private final boolean shouldCreateContainer;
   // the id of this subkey in all the subkeys.
-  private final long length;
+  private long length;
   private final long offset;
   // the version number indicating when this block was added
   private long createVersion;
@@ -68,6 +68,10 @@ public final class OmKeyLocationInfo {
     return length;
   }
 
+  public void setLength(long length) {
+    this.length = length;
+  }
+
   public long getOffset() {
     return offset;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4db753b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index 37151fb..e557ac5 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.ozone.om.protocolPB;
 
+import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 import com.google.common.collect.Lists;
 import com.google.protobuf.RpcController;
@@ -581,11 +582,16 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
   public void commitKey(OmKeyArgs args, int clientID)
       throws IOException {
     CommitKeyRequest.Builder req = CommitKeyRequest.newBuilder();
+    List<OmKeyLocationInfo> locationInfoList = args.getLocationInfoList();
+    Preconditions.checkNotNull(locationInfoList);
     KeyArgs keyArgs = KeyArgs.newBuilder()
         .setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName())
         .setKeyName(args.getKeyName())
-        .setDataSize(args.getDataSize()).build();
+        .setDataSize(args.getDataSize())
+        .addAllKeyLocations(
+            locationInfoList.stream().map(OmKeyLocationInfo::getProtobuf)
+                .collect(Collectors.toList())).build();
     req.setKeyArgs(keyArgs);
     req.setClientID(clientID);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4db753b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
index 36b1c83..51a0a7f 100644
--- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
+++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
@@ -234,6 +234,7 @@ message KeyArgs {
     optional uint64 dataSize = 4;
     optional hadoop.hdds.ReplicationType type = 5;
     optional hadoop.hdds.ReplicationFactor factor = 6;
+    repeated KeyLocation keyLocations = 7;
 }
 
 message KeyLocation {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4db753b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
index 2fbab36..e31b528 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.ozone.client.rpc;
 
 import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -434,6 +435,40 @@ public class TestOzoneRpcClient {
   }
 
   @Test
+  public void testValidateBlockLengthWithCommitKey() throws IOException {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+
+    String value = RandomStringUtils.random(RandomUtils.nextInt(0,1024));
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    String keyName = UUID.randomUUID().toString();
+
+    // create the initial key with size 0, write will allocate the first block.
+    OzoneOutputStream out = bucket.createKey(keyName, 0,
+        ReplicationType.STAND_ALONE, ReplicationFactor.ONE);
+    out.write(value.getBytes());
+    out.close();
+    OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
+    builder.setVolumeName(volumeName).setBucketName(bucketName)
+        .setKeyName(keyName);
+    OmKeyInfo keyInfo = ozoneManager.lookupKey(builder.build());
+
+    List<OmKeyLocationInfo> locationInfoList =
+        keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
+    // LocationList should have only 1 block
+    Assert.assertEquals(1, locationInfoList.size());
+    // make sure the data block size is updated
+    Assert.assertEquals(value.getBytes().length,
+        locationInfoList.get(0).getLength());
+    // make sure the total data size is set correctly
+    Assert.assertEquals(value.getBytes().length, keyInfo.getDataSize());
+  }
+
+
+  @Test
   public void testPutKeyRatisOneNode()
       throws IOException, OzoneException {
     String volumeName = UUID.randomUUID().toString();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4db753b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java
index 15122b9..f5dddee 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java
@@ -44,6 +44,7 @@ import org.junit.rules.ExpectedException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.util.ArrayList;
 import java.util.LinkedList;
 import java.util.List;
 
@@ -122,6 +123,9 @@ public class TestOmBlockVersioning {
 
     // 1st update, version 0
     OpenKeySession openKey = ozoneManager.openKey(keyArgs);
+    // explicitly set the keyLocation list before committing the key.
+    keyArgs.setLocationInfoList(
+        openKey.getKeyInfo().getLatestVersionLocations().getLocationList());
     ozoneManager.commitKey(keyArgs, openKey.getId());
 
     OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs);
@@ -134,6 +138,9 @@ public class TestOmBlockVersioning {
     openKey = ozoneManager.openKey(keyArgs);
     //OmKeyLocationInfo locationInfo =
     //    ozoneManager.allocateBlock(keyArgs, openKey.getId());
+    // explicitly set the keyLocation list before committing the key.
+    keyArgs.setLocationInfoList(
+        openKey.getKeyInfo().getLatestVersionLocations().getLocationList());
     ozoneManager.commitKey(keyArgs, openKey.getId());
 
     keyInfo = ozoneManager.lookupKey(keyArgs);
@@ -144,7 +151,11 @@ public class TestOmBlockVersioning {
     // 3rd update, version 2
     openKey = ozoneManager.openKey(keyArgs);
     // this block will be appended to the latest version of version 2.
-    ozoneManager.allocateBlock(keyArgs, openKey.getId());
+    OmKeyLocationInfo locationInfo =
+        ozoneManager.allocateBlock(keyArgs, openKey.getId());
+    List<OmKeyLocationInfo> locationInfoList = new ArrayList<>();
+    locationInfoList.add(locationInfo);
+    keyArgs.setLocationInfoList(locationInfoList);
     ozoneManager.commitKey(keyArgs, openKey.getId());
 
     keyInfo = ozoneManager.lookupKey(keyArgs);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4db753b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index ba92a29..75342c6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -342,6 +342,10 @@ public class KeyManagerImpl implements KeyManager {
           OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(openKeyData));
       keyInfo.setDataSize(args.getDataSize());
       keyInfo.setModificationTime(Time.now());
+      List<OmKeyLocationInfo> locationInfoList = args.getLocationInfoList();
+      Preconditions.checkNotNull(locationInfoList);
+      //update the block length for each block
+      keyInfo.updateLocationInfoList(locationInfoList);
       BatchOperation batch = new BatchOperation();
       batch.delete(openKey);
       batch.put(objectKeyBytes, keyInfo.getProtobuf().toByteArray());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4db753b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
index 40a88b6..45ec2d0 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
@@ -519,9 +519,12 @@ public class OzoneManagerProtocolServerSideTranslatorPB implements
           .setVolumeName(keyArgs.getVolumeName())
           .setBucketName(keyArgs.getBucketName())
           .setKeyName(keyArgs.getKeyName())
-          .setDataSize(keyArgs.getDataSize())
+          .setLocationInfoList(keyArgs.getKeyLocationsList().stream()
+              .map(OmKeyLocationInfo::getFromProtobuf)
+              .collect(Collectors.toList()))
           .setType(type)
           .setFactor(factor)
+          .setDataSize(keyArgs.getDataSize())
           .build();
       int id = request.getClientID();
       impl.commitKey(omKeyArgs, id);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: YARN-8508. Release GPU resource for killed container. Contributed by Chandni Singh

Posted by bo...@apache.org.
YARN-8508.  Release GPU resource for killed container.
            Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed9d60e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed9d60e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed9d60e8

Branch: refs/heads/YARN-7402
Commit: ed9d60e888d0acfd748fda7f66249f5b79a3ed6d
Parents: 79091cf
Author: Eric Yang <ey...@apache.org>
Authored: Fri Jul 27 19:33:58 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Fri Jul 27 19:33:58 2018 -0400

----------------------------------------------------------------------
 .../nodemanager/LinuxContainerExecutor.java     | 34 ++++++++++----------
 .../nodemanager/TestLinuxContainerExecutor.java |  9 +++++-
 2 files changed, 25 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed9d60e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 03b88a4..4253f2f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -573,15 +573,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
       return handleExitCode(e, container, containerId);
     } finally {
       resourcesHandler.postExecute(containerId);
-
-      try {
-        if (resourceHandlerChain != null) {
-          resourceHandlerChain.postComplete(containerId);
-        }
-      } catch (ResourceHandlerException e) {
-        LOG.warn("ResourceHandlerChain.postComplete failed for " +
-            "containerId: " + containerId + ". Exception: " + e);
-      }
+      postComplete(containerId);
     }
 
     return 0;
@@ -721,14 +713,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
       return super.reacquireContainer(ctx);
     } finally {
       resourcesHandler.postExecute(containerId);
-      if (resourceHandlerChain != null) {
-        try {
-          resourceHandlerChain.postComplete(containerId);
-        } catch (ResourceHandlerException e) {
-          LOG.warn("ResourceHandlerChain.postComplete failed for " +
-              "containerId: " + containerId + " Exception: " + e);
-        }
-      }
+      postComplete(containerId);
     }
   }
 
@@ -798,6 +783,8 @@ public class LinuxContainerExecutor extends ContainerExecutor {
       logOutput(e.getOutput());
       throw new IOException("Error in reaping container "
           + container.getContainerId().toString() + " exit = " + retCode, e);
+    } finally {
+      postComplete(container.getContainerId());
     }
     return true;
   }
@@ -968,4 +955,17 @@ public class LinuxContainerExecutor extends ContainerExecutor {
       LOG.warn("Unable to remove docker container: " + containerId);
     }
   }
+
+  @VisibleForTesting
+  void postComplete(final ContainerId containerId) {
+    try {
+      if (resourceHandlerChain != null) {
+        LOG.debug("{} post complete", containerId);
+        resourceHandlerChain.postComplete(containerId);
+      }
+    } catch (ResourceHandlerException e) {
+      LOG.warn("ResourceHandlerChain.postComplete failed for " +
+          "containerId: {}. Exception: ", containerId, e);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed9d60e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
index ddbf3b9..6d77fc4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
@@ -25,11 +25,14 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.mockito.Matchers.anyObject;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntime;
 import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReapContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -40,6 +43,7 @@ import java.io.IOException;
 import java.io.PrintWriter;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -667,12 +671,15 @@ public class TestLinuxContainerExecutor {
   @Test
   public void testReapContainer() throws Exception {
     Container container = mock(Container.class);
-    LinuxContainerExecutor lce = mock(LinuxContainerExecutor.class);
+    LinuxContainerRuntime containerRuntime = mock(LinuxContainerRuntime.class);
+    LinuxContainerExecutor lce = spy(new LinuxContainerExecutor(
+        containerRuntime));
     ContainerReapContext.Builder builder =  new ContainerReapContext.Builder();
     builder.setContainer(container).setUser("foo");
     ContainerReapContext ctx = builder.build();
     lce.reapContainer(ctx);
     verify(lce, times(1)).reapContainer(ctx);
+    verify(lce, times(1)).postComplete(anyObject());
   }
 
   @Test


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: HADOOP-15637. LocalFs#listLocatedStatus does not filter out hidden .crc files. Contributed by Erik Krogen.

Posted by bo...@apache.org.
HADOOP-15637. LocalFs#listLocatedStatus does not filter out hidden .crc files. Contributed by Erik Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8f952ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8f952ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8f952ef

Branch: refs/heads/YARN-7402
Commit: e8f952ef06ae05d2b504300d6f19beb8a052b6f1
Parents: 3517a47
Author: Chen Liang <cl...@apache.org>
Authored: Mon Jul 30 10:25:07 2018 -0700
Committer: Chen Liang <cl...@apache.org>
Committed: Mon Jul 30 10:25:07 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/ChecksumFs.java   | 37 +++++++++++++++++++
 .../fs/FileContextMainOperationsBaseTest.java   | 38 ++++++++++++++++++++
 2 files changed, 75 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8f952ef/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index 75622ad..c56f6e0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -27,10 +27,12 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
 
+import java.util.NoSuchElementException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
 import org.slf4j.Logger;
@@ -527,4 +529,39 @@ public abstract class ChecksumFs extends FilterFs {
     }
     return results.toArray(new FileStatus[results.size()]);
   }
+
+  @Override
+  public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f)
+      throws AccessControlException, FileNotFoundException,
+             UnresolvedLinkException, IOException {
+    final RemoteIterator<LocatedFileStatus> iter =
+        getMyFs().listLocatedStatus(f);
+    return new RemoteIterator<LocatedFileStatus>() {
+
+      private LocatedFileStatus next = null;
+
+      @Override
+      public boolean hasNext() throws IOException {
+        while (next == null && iter.hasNext()) {
+          LocatedFileStatus unfilteredNext = iter.next();
+          if (!isChecksumFile(unfilteredNext.getPath())) {
+            next = unfilteredNext;
+          }
+        }
+        return next != null;
+      }
+
+      @Override
+      public LocatedFileStatus next() throws IOException {
+        if (!hasNext()) {
+          throw new NoSuchElementException();
+        }
+        LocatedFileStatus tmp = next;
+        next = null;
+        return tmp;
+      }
+
+    };
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8f952ef/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
index 62ecd9f..c07a6ff 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
@@ -369,6 +369,44 @@ public abstract class FileContextMainOperationsBaseTest  {
     pathsIterator = fc.listStatus(getTestRootPath(fc, "test/hadoop/a"));
     Assert.assertFalse(pathsIterator.hasNext());
   }
+
+  @Test
+  public void testListFiles() throws Exception {
+    Path[] testDirs = {
+        getTestRootPath(fc, "test/dir1"),
+        getTestRootPath(fc, "test/dir1/dir1"),
+        getTestRootPath(fc, "test/dir2")
+    };
+    Path[] testFiles = {
+        new Path(testDirs[0], "file1"),
+        new Path(testDirs[0], "file2"),
+        new Path(testDirs[1], "file2"),
+        new Path(testDirs[2], "file1")
+    };
+
+    for (Path path : testDirs) {
+      fc.mkdir(path, FsPermission.getDefault(), true);
+    }
+    for (Path p : testFiles) {
+      FSDataOutputStream out = fc.create(p).build();
+      out.writeByte(0);
+      out.close();
+    }
+
+    RemoteIterator<LocatedFileStatus> filesIterator =
+        fc.util().listFiles(getTestRootPath(fc, "test"), true);
+    LocatedFileStatus[] fileStats =
+        new LocatedFileStatus[testFiles.length];
+    for (int i = 0; i < fileStats.length; i++) {
+      assertTrue(filesIterator.hasNext());
+      fileStats[i] = filesIterator.next();
+    }
+    assertFalse(filesIterator.hasNext());
+
+    for (Path p : testFiles) {
+      assertTrue(containsPath(p, fileStats));
+    }
+  }
   
   @Test
   public void testListStatusFilterWithNoMatches() throws Exception {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: YARN-7707. [GPG] Policy generator framework. Contributed by Young Chen

Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json
new file mode 100644
index 0000000..2ff879e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json
@@ -0,0 +1,196 @@
+ {
+      "type": "capacityScheduler",
+      "capacity": 100.0,
+      "usedCapacity": 0.0,
+      "maxCapacity": 100.0,
+      "queueName": "root",
+      "queues": {
+        "queue": [
+          {
+            "type": "capacitySchedulerLeafQueueInfo",
+            "capacity": 100.0,
+            "usedCapacity": 0.0,
+            "maxCapacity": 100.0,
+            "absoluteCapacity": 100.0,
+            "absoluteMaxCapacity": 100.0,
+            "absoluteUsedCapacity": 0.0,
+            "numApplications": 484,
+            "queueName": "default",
+            "state": "RUNNING",
+            "resourcesUsed": {
+              "memory": 0,
+              "vCores": 0
+            },
+            "hideReservationQueues": false,
+            "nodeLabels": [
+              "*"
+            ],
+            "numActiveApplications": 484,
+            "numPendingApplications": 0,
+            "numContainers": 0,
+            "maxApplications": 10000,
+            "maxApplicationsPerUser": 10000,
+            "userLimit": 100,
+            "users": {
+              "user": [
+                {
+                  "username": "Default",
+                  "resourcesUsed": {
+                    "memory": 0,
+                    "vCores": 0
+                  },
+                  "numPendingApplications": 0,
+                  "numActiveApplications": 468,
+                  "AMResourceUsed": {
+                    "memory": 30191616,
+                    "vCores": 468
+                  },
+                  "userResourceLimit": {
+                    "memory": 31490048,
+                    "vCores": 7612
+                  }
+                }
+              ]
+            },
+            "userLimitFactor": 1.0,
+            "AMResourceLimit": {
+              "memory": 31490048,
+              "vCores": 7612
+            },
+            "usedAMResource": {
+              "memory": 30388224,
+              "vCores": 532
+            },
+            "userAMResourceLimit": {
+              "memory": 31490048,
+              "vCores": 7612
+            },
+            "preemptionDisabled": true
+          },
+          {
+            "type": "capacitySchedulerLeafQueueInfo",
+            "capacity": 100.0,
+            "usedCapacity": 0.0,
+            "maxCapacity": 100.0,
+            "absoluteCapacity": 100.0,
+            "absoluteMaxCapacity": 100.0,
+            "absoluteUsedCapacity": 0.0,
+            "numApplications": 484,
+            "queueName": "default2",
+            "state": "RUNNING",
+            "resourcesUsed": {
+              "memory": 0,
+              "vCores": 0
+            },
+            "hideReservationQueues": false,
+            "nodeLabels": [
+              "*"
+            ],
+            "numActiveApplications": 484,
+            "numPendingApplications": 0,
+            "numContainers": 0,
+            "maxApplications": 10000,
+            "maxApplicationsPerUser": 10000,
+            "userLimit": 100,
+            "users": {
+              "user": [
+                {
+                  "username": "Default",
+                  "resourcesUsed": {
+                    "memory": 0,
+                    "vCores": 0
+                  },
+                  "numPendingApplications": 0,
+                  "numActiveApplications": 468,
+                  "AMResourceUsed": {
+                    "memory": 30191616,
+                    "vCores": 468
+                  },
+                  "userResourceLimit": {
+                    "memory": 31490048,
+                    "vCores": 7612
+                  }
+                }
+              ]
+            },
+            "userLimitFactor": 1.0,
+            "AMResourceLimit": {
+              "memory": 31490048,
+              "vCores": 7612
+            },
+            "usedAMResource": {
+              "memory": 30388224,
+              "vCores": 532
+            },
+            "userAMResourceLimit": {
+              "memory": 31490048,
+              "vCores": 7612
+            },
+            "preemptionDisabled": true
+          }
+        ]
+      },
+      "health": {
+        "lastrun": 1517951638085,
+        "operationsInfo": {
+          "entry": {
+            "key": "last-allocation",
+            "value": {
+              "nodeId": "node0:0",
+              "containerId": "container_e61477_1517922128312_0340_01_000001",
+              "queue": "root.default"
+            }
+          },
+          "entry": {
+            "key": "last-reservation",
+            "value": {
+              "nodeId": "node0:1",
+              "containerId": "container_e61477_1517879828320_0249_01_000001",
+              "queue": "root.default"
+            }
+          },
+          "entry": {
+            "key": "last-release",
+            "value": {
+              "nodeId": "node0:2",
+              "containerId": "container_e61477_1517922128312_0340_01_000001",
+              "queue": "root.default"
+            }
+          },
+          "entry": {
+            "key": "last-preemption",
+            "value": {
+              "nodeId": "N/A",
+              "containerId": "N/A",
+              "queue": "N/A"
+            }
+          }
+        },
+        "lastRunDetails": [
+          {
+            "operation": "releases",
+            "count": 0,
+            "resources": {
+              "memory": 0,
+              "vCores": 0
+            }
+          },
+          {
+            "operation": "allocations",
+            "count": 0,
+            "resources": {
+              "memory": 0,
+              "vCores": 0
+            }
+          },
+          {
+            "operation": "reservations",
+            "count": 0,
+            "resources": {
+              "memory": 0,
+              "vCores": 0
+            }
+          }
+        ]
+      }
+    }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: YARN-8175. Add support for Node Labels in SLS. Contributed by Abhishek Modi.

Posted by bo...@apache.org.
YARN-8175. Add support for Node Labels in SLS. Contributed by Abhishek Modi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9fea5c9e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9fea5c9e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9fea5c9e

Branch: refs/heads/YARN-7402
Commit: 9fea5c9ee76bd36f273ae93afef5f3ef3c477a53
Parents: b28bdc7
Author: Inigo Goiri <in...@apache.org>
Authored: Tue Jul 31 09:36:34 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue Jul 31 09:36:34 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   | 93 +++++++++++++++-----
 .../hadoop/yarn/sls/appmaster/AMSimulator.java  |  9 +-
 .../yarn/sls/appmaster/MRAMSimulator.java       |  5 +-
 .../yarn/sls/appmaster/StreamAMSimulator.java   |  5 +-
 .../hadoop/yarn/sls/conf/SLSConfiguration.java  |  1 +
 .../yarn/sls/nodemanager/NMSimulator.java       | 13 ++-
 .../apache/hadoop/yarn/sls/utils/SLSUtils.java  | 58 ++++++++----
 .../yarn/sls/appmaster/TestAMSimulator.java     | 35 +++++++-
 .../hadoop/yarn/sls/utils/TestSLSUtils.java     | 64 ++++++++++----
 .../test/resources/nodes-with-resources.json    |  8 +-
 .../hadoop/yarn/client/cli/RMAdminCLI.java      | 71 +--------------
 .../yarn/client/util/YarnClientUtils.java       | 77 ++++++++++++++++
 12 files changed, 301 insertions(+), 138 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fea5c9e/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index e859732..1e83e40 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -298,30 +299,20 @@ public class SLSRunner extends Configured implements Tool {
         SLSConfiguration.NM_RESOURCE_UTILIZATION_RATIO,
         SLSConfiguration.NM_RESOURCE_UTILIZATION_RATIO_DEFAULT);
     // nm information (fetch from topology file, or from sls/rumen json file)
-    Map<String, Resource> nodeResourceMap = new HashMap<>();
-    Set<? extends  String> nodeSet;
+    Set<NodeDetails> nodeSet = null;
     if (nodeFile.isEmpty()) {
       for (String inputTrace : inputTraces) {
         switch (inputType) {
         case SLS:
           nodeSet = SLSUtils.parseNodesFromSLSTrace(inputTrace);
-          for (String node : nodeSet) {
-            nodeResourceMap.put(node, null);
-          }
           break;
         case RUMEN:
           nodeSet = SLSUtils.parseNodesFromRumenTrace(inputTrace);
-          for (String node : nodeSet) {
-            nodeResourceMap.put(node, null);
-          }
           break;
         case SYNTH:
           stjp = new SynthTraceJobProducer(getConf(), new Path(inputTraces[0]));
           nodeSet = SLSUtils.generateNodes(stjp.getNumNodes(),
               stjp.getNumNodes()/stjp.getNodesPerRack());
-          for (String node : nodeSet) {
-            nodeResourceMap.put(node, null);
-          }
           break;
         default:
           throw new YarnException("Input configuration not recognized, "
@@ -329,11 +320,11 @@ public class SLSRunner extends Configured implements Tool {
         }
       }
     } else {
-      nodeResourceMap = SLSUtils.parseNodesFromNodeFile(nodeFile,
+      nodeSet = SLSUtils.parseNodesFromNodeFile(nodeFile,
           nodeManagerResource);
     }
 
-    if (nodeResourceMap.size() == 0) {
+    if (nodeSet == null || nodeSet.isEmpty()) {
       throw new YarnException("No node! Please configure nodes.");
     }
 
@@ -344,20 +335,21 @@ public class SLSRunner extends Configured implements Tool {
         SLSConfiguration.RUNNER_POOL_SIZE_DEFAULT);
     ExecutorService executorService = Executors.
         newFixedThreadPool(threadPoolSize);
-    for (Map.Entry<String, Resource> entry : nodeResourceMap.entrySet()) {
+    for (NodeDetails nodeDetails : nodeSet) {
       executorService.submit(new Runnable() {
         @Override public void run() {
           try {
             // we randomize the heartbeat start time from zero to 1 interval
             NMSimulator nm = new NMSimulator();
             Resource nmResource = nodeManagerResource;
-            String hostName = entry.getKey();
-            if (entry.getValue() != null) {
-              nmResource = entry.getValue();
+            String hostName = nodeDetails.getHostname();
+            if (nodeDetails.getNodeResource() != null) {
+              nmResource = nodeDetails.getNodeResource();
             }
+            Set<NodeLabel> nodeLabels = nodeDetails.getLabels();
             nm.init(hostName, nmResource,
                 random.nextInt(heartbeatInterval),
-                heartbeatInterval, rm, resourceUtilizationRatio);
+                heartbeatInterval, rm, resourceUtilizationRatio, nodeLabels);
             nmMap.put(nm.getNode().getNodeID(), nm);
             runner.schedule(nm);
             rackSet.add(nm.getNode().getRackName());
@@ -452,6 +444,11 @@ public class SLSRunner extends Configured implements Tool {
           jsonJob.get(SLSConfiguration.JOB_END_MS).toString());
     }
 
+    String jobLabelExpr = null;
+    if (jsonJob.containsKey(SLSConfiguration.JOB_LABEL_EXPR)) {
+      jobLabelExpr = jsonJob.get(SLSConfiguration.JOB_LABEL_EXPR).toString();
+    }
+
     String user = (String) jsonJob.get(SLSConfiguration.JOB_USER);
     if (user == null) {
       user = "default";
@@ -481,7 +478,8 @@ public class SLSRunner extends Configured implements Tool {
 
     for (int i = 0; i < jobCount; i++) {
       runNewAM(amType, user, queue, oldAppId, jobStartTime, jobFinishTime,
-          getTaskContainers(jsonJob), getAMContainerResource(jsonJob));
+          getTaskContainers(jsonJob), getAMContainerResource(jsonJob),
+          jobLabelExpr);
     }
   }
 
@@ -730,7 +728,7 @@ public class SLSRunner extends Configured implements Tool {
 
       runNewAM(job.getType(), user, jobQueue, oldJobId,
           jobStartTimeMS, jobFinishTimeMS, containerList, reservationId,
-          job.getDeadline(), getAMContainerResource(null),
+          job.getDeadline(), getAMContainerResource(null), null,
           job.getParams());
     }
   }
@@ -775,15 +773,24 @@ public class SLSRunner extends Configured implements Tool {
       Resource amContainerResource) {
     runNewAM(jobType, user, jobQueue, oldJobId, jobStartTimeMS,
         jobFinishTimeMS, containerList, null,  -1,
-        amContainerResource, null);
+        amContainerResource, null, null);
   }
 
   private void runNewAM(String jobType, String user,
       String jobQueue, String oldJobId, long jobStartTimeMS,
       long jobFinishTimeMS, List<ContainerSimulator> containerList,
-      ReservationId reservationId, long deadline, Resource amContainerResource,
-      Map<String, String> params) {
+      Resource amContainerResource, String labelExpr) {
+    runNewAM(jobType, user, jobQueue, oldJobId, jobStartTimeMS,
+        jobFinishTimeMS, containerList, null,  -1,
+        amContainerResource, labelExpr, null);
+  }
 
+  @SuppressWarnings("checkstyle:parameternumber")
+  private void runNewAM(String jobType, String user,
+      String jobQueue, String oldJobId, long jobStartTimeMS,
+      long jobFinishTimeMS, List<ContainerSimulator> containerList,
+      ReservationId reservationId, long deadline, Resource amContainerResource,
+      String labelExpr, Map<String, String> params) {
     AMSimulator amSim = (AMSimulator) ReflectionUtils.newInstance(
         amClassMap.get(jobType), new Configuration());
 
@@ -799,7 +806,7 @@ public class SLSRunner extends Configured implements Tool {
       AM_ID++;
       amSim.init(heartbeatInterval, containerList, rm, this, jobStartTimeMS,
           jobFinishTimeMS, user, jobQueue, isTracked, oldJobId,
-          runner.getStartTimeMS(), amContainerResource, params);
+          runner.getStartTimeMS(), amContainerResource, labelExpr, params);
       if(reservationId != null) {
         // if we have a ReservationId, delegate reservation creation to
         // AMSim (reservation shape is impl specific)
@@ -985,4 +992,42 @@ public class SLSRunner extends Configured implements Tool {
     System.err.println();
   }
 
+  /**
+   * Class to encapsulate all details about the node.
+   */
+  @Private
+  @Unstable
+  public static class NodeDetails {
+    private String hostname;
+    private Resource nodeResource;
+    private Set<NodeLabel> labels;
+
+    public NodeDetails(String nodeHostname) {
+      this.hostname = nodeHostname;
+    }
+
+    public String getHostname() {
+      return hostname;
+    }
+
+    public void setHostname(String hostname) {
+      this.hostname = hostname;
+    }
+
+    public Resource getNodeResource() {
+      return nodeResource;
+    }
+
+    public void setNodeResource(Resource nodeResource) {
+      this.nodeResource = nodeResource;
+    }
+
+    public Set<NodeLabel> getLabels() {
+      return labels;
+    }
+
+    public void setLabels(Set<NodeLabel> labels) {
+      this.labels = labels;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fea5c9e/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
index 8e1c256..5f34cfc 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
@@ -88,6 +88,8 @@ public abstract class AMSimulator extends TaskRunner.Task {
   private int responseId = 0;
   // user name
   private String user;
+  // nodelabel expression
+  private String nodeLabelExpression;
   // queue name
   protected String queue;
   // am type
@@ -123,7 +125,8 @@ public abstract class AMSimulator extends TaskRunner.Task {
       List<ContainerSimulator> containerList, ResourceManager resourceManager,
       SLSRunner slsRunnner, long startTime, long finishTime, String simUser,
       String simQueue, boolean tracked, String oldApp, long baseTimeMS,
-      Resource amResource, Map<String, String> params) {
+      Resource amResource, String nodeLabelExpr,
+      Map<String, String> params) {
     super.init(startTime, startTime + 1000000L * heartbeatInterval,
         heartbeatInterval);
     this.user = simUser;
@@ -136,6 +139,7 @@ public abstract class AMSimulator extends TaskRunner.Task {
     this.traceStartTimeMS = startTime;
     this.traceFinishTimeMS = finishTime;
     this.amContainerResource = amResource;
+    this.nodeLabelExpression = nodeLabelExpr;
   }
 
   /**
@@ -327,6 +331,9 @@ public abstract class AMSimulator extends TaskRunner.Task {
     conLauContext.setServiceData(new HashMap<>());
     appSubContext.setAMContainerSpec(conLauContext);
     appSubContext.setResource(amContainerResource);
+    if (nodeLabelExpression != null) {
+      appSubContext.setNodeLabelExpression(nodeLabelExpression);
+    }
 
     if(reservationId != null) {
       appSubContext.setReservationID(reservationId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fea5c9e/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
index 6f0f85f..71fc5b2 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
@@ -126,10 +126,11 @@ public class MRAMSimulator extends AMSimulator {
       List<ContainerSimulator> containerList, ResourceManager rm, SLSRunner se,
       long traceStartTime, long traceFinishTime, String user, String queue,
       boolean isTracked, String oldAppId, long baselineStartTimeMS,
-      Resource amContainerResource, Map<String, String> params) {
+      Resource amContainerResource, String nodeLabelExpr,
+      Map<String, String> params) {
     super.init(heartbeatInterval, containerList, rm, se,
         traceStartTime, traceFinishTime, user, queue, isTracked, oldAppId,
-        baselineStartTimeMS, amContainerResource, params);
+        baselineStartTimeMS, amContainerResource, nodeLabelExpr, params);
     amtype = "mapreduce";
 
     // get map/reduce tasks

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fea5c9e/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/StreamAMSimulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/StreamAMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/StreamAMSimulator.java
index b41f5f2..862e5ec 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/StreamAMSimulator.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/StreamAMSimulator.java
@@ -96,10 +96,11 @@ public class StreamAMSimulator extends AMSimulator {
       List<ContainerSimulator> containerList, ResourceManager rm, SLSRunner se,
       long traceStartTime, long traceFinishTime, String user, String queue,
       boolean isTracked, String oldAppId, long baselineStartTimeMS,
-      Resource amContainerResource, Map<String, String> params) {
+      Resource amContainerResource, String nodeLabelExpr,
+      Map<String, String> params) {
     super.init(heartbeatInterval, containerList, rm, se, traceStartTime,
         traceFinishTime, user, queue, isTracked, oldAppId, baselineStartTimeMS,
-        amContainerResource, params);
+        amContainerResource, nodeLabelExpr, params);
     amtype = "stream";
 
     allStreams.addAll(containerList);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fea5c9e/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java
index ea73bef..09f653f 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java
@@ -104,6 +104,7 @@ public class SLSConfiguration {
   public static final String JOB_START_MS = JOB_PREFIX + "start.ms";
   public static final String JOB_END_MS = JOB_PREFIX + "end.ms";
   public static final String JOB_QUEUE_NAME = JOB_PREFIX + "queue.name";
+  public static final String JOB_LABEL_EXPR = JOB_PREFIX + "label.expression";
   public static final String JOB_USER = JOB_PREFIX + "user";
   public static final String JOB_COUNT = JOB_PREFIX + "count";
   public static final String JOB_TASKS = JOB_PREFIX + "tasks";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fea5c9e/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java
index 428a839..6a8430e 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.DelayQueue;
 
@@ -35,6 +36,7 @@ import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceUtilization;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -78,7 +80,7 @@ public class NMSimulator extends TaskRunner.Task {
   
   public void init(String nodeIdStr, Resource nodeResource, int dispatchTime,
       int heartBeatInterval, ResourceManager pRm,
-      float pResourceUtilizationRatio)
+      float pResourceUtilizationRatio, Set<NodeLabel> labels)
       throws IOException, YarnException {
     super.init(dispatchTime, dispatchTime + 1000000L * heartBeatInterval,
         heartBeatInterval);
@@ -102,6 +104,7 @@ public class NMSimulator extends TaskRunner.Task {
             Records.newRecord(RegisterNodeManagerRequest.class);
     req.setNodeId(node.getNodeID());
     req.setResource(node.getTotalCapability());
+    req.setNodeLabels(labels);
     req.setHttpPort(80);
     RegisterNodeManagerResponse response = this.rm.getResourceTrackerService()
             .registerNodeManager(req);
@@ -109,6 +112,14 @@ public class NMSimulator extends TaskRunner.Task {
     this.resourceUtilizationRatio = pResourceUtilizationRatio;
   }
 
+  public void init(String nodeIdStr, Resource nodeResource, int dispatchTime,
+      int heartBeatInterval, ResourceManager pRm,
+      float pResourceUtilizationRatio)
+      throws IOException, YarnException {
+    init(nodeIdStr, nodeResource, dispatchTime, heartBeatInterval, pRm,
+        pResourceUtilizationRatio, null);
+  }
+
   @Override
   public void firstStep() {
     // do nothing

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fea5c9e/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
index f2129d0..8bb4871 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
@@ -23,7 +23,6 @@ import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.Reader;
 
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
@@ -41,8 +40,11 @@ import org.apache.hadoop.tools.rumen.JobTraceReader;
 import org.apache.hadoop.tools.rumen.LoggedJob;
 import org.apache.hadoop.tools.rumen.LoggedTask;
 import org.apache.hadoop.tools.rumen.LoggedTaskAttempt;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.client.util.YarnClientUtils;
+import org.apache.hadoop.yarn.sls.SLSRunner.NodeDetails;
 import org.apache.hadoop.yarn.sls.conf.SLSConfiguration;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -52,6 +54,10 @@ import org.apache.hadoop.yarn.util.resource.Resources;
 public class SLSUtils {
   public final static String DEFAULT_JOB_TYPE = "mapreduce";
 
+  private static final String LABEL_FORMAT_ERR_MSG =
+      "Input format for adding node-labels is not correct, it should be "
+          + "labelName1[(exclusive=true/false)],labelName2[] ..";
+
   // hostname includes the network path and the host name. for example
   // "/default-rack/hostFoo" or "/coreSwitchA/TORSwitchB/hostBar".
   // the function returns two Strings, the first element is the network
@@ -66,9 +72,9 @@ public class SLSUtils {
   /**
    * parse the rumen trace file, return each host name
    */
-  public static Set<String> parseNodesFromRumenTrace(String jobTrace)
-          throws IOException {
-    Set<String> nodeSet = new HashSet<String>();
+  public static Set<NodeDetails> parseNodesFromRumenTrace(
+      String jobTrace) throws IOException {
+    Set<NodeDetails> nodeSet = new HashSet<>();
 
     File fin = new File(jobTrace);
     Configuration conf = new Configuration();
@@ -85,7 +91,8 @@ public class SLSUtils {
           }
           LoggedTaskAttempt taskAttempt = mapTask.getAttempts()
                   .get(mapTask.getAttempts().size() - 1);
-          nodeSet.add(taskAttempt.getHostName().getValue());
+          nodeSet.add(new NodeDetails(
+              taskAttempt.getHostName().getValue()));
         }
         for(LoggedTask reduceTask : job.getReduceTasks()) {
           if (reduceTask.getAttempts().size() == 0) {
@@ -93,7 +100,8 @@ public class SLSUtils {
           }
           LoggedTaskAttempt taskAttempt = reduceTask.getAttempts()
                   .get(reduceTask.getAttempts().size() - 1);
-          nodeSet.add(taskAttempt.getHostName().getValue());
+          nodeSet.add(new NodeDetails(
+              taskAttempt.getHostName().getValue()));
         }
       }
     } finally {
@@ -106,9 +114,9 @@ public class SLSUtils {
   /**
    * parse the sls trace file, return each host name
    */
-  public static Set<String> parseNodesFromSLSTrace(String jobTrace)
-          throws IOException {
-    Set<String> nodeSet = new HashSet<>();
+  public static Set<NodeDetails> parseNodesFromSLSTrace(
+      String jobTrace) throws IOException {
+    Set<NodeDetails> nodeSet = new HashSet<>();
     JsonFactory jsonF = new JsonFactory();
     ObjectMapper mapper = new ObjectMapper();
     Reader input =
@@ -124,7 +132,8 @@ public class SLSUtils {
     return nodeSet;
   }
 
-  private static void addNodes(Set<String> nodeSet, Map jsonEntry) {
+  private static void addNodes(Set<NodeDetails> nodeSet,
+      Map jsonEntry) {
     if (jsonEntry.containsKey(SLSConfiguration.NUM_NODES)) {
       int numNodes = Integer.parseInt(
           jsonEntry.get(SLSConfiguration.NUM_NODES).toString());
@@ -142,7 +151,7 @@ public class SLSUtils {
         Map jsonTask = (Map) o;
         String hostname = (String) jsonTask.get(SLSConfiguration.TASK_HOST);
         if (hostname != null) {
-          nodeSet.add(hostname);
+          nodeSet.add(new NodeDetails(hostname));
         }
       }
     }
@@ -150,10 +159,11 @@ public class SLSUtils {
 
   /**
    * parse the input node file, return each host name
+   * sample input: label1(exclusive=true),label2(exclusive=false),label3
    */
-  public static Map<String, Resource> parseNodesFromNodeFile(String nodeFile,
-      Resource nmDefaultResource) throws IOException {
-    Map<String, Resource> nodeResourceMap = new HashMap<>();
+  public static Set<NodeDetails> parseNodesFromNodeFile(
+      String nodeFile, Resource nmDefaultResource) throws IOException {
+    Set<NodeDetails> nodeSet = new HashSet<>();
     JsonFactory jsonF = new JsonFactory();
     ObjectMapper mapper = new ObjectMapper();
     Reader input =
@@ -166,6 +176,8 @@ public class SLSUtils {
         List tasks = (List) jsonE.get("nodes");
         for (Object o : tasks) {
           Map jsonNode = (Map) o;
+          NodeDetails nodeDetails = new NodeDetails(
+              rack + "/" + jsonNode.get("node"));
           Resource nodeResource = Resources.clone(nmDefaultResource);
           ResourceInformation[] infors = ResourceUtils.getResourceTypesArray();
           for (ResourceInformation info : infors) {
@@ -174,18 +186,25 @@ public class SLSUtils {
                   Integer.parseInt(jsonNode.get(info.getName()).toString()));
             }
           }
-          nodeResourceMap.put(rack + "/" + jsonNode.get("node"), nodeResource);
+          nodeDetails.setNodeResource(nodeResource);
+          if (jsonNode.get("labels") != null) {
+            Set<NodeLabel> nodeLabels =  new HashSet<>(
+                YarnClientUtils.buildNodeLabelsFromStr(
+                    jsonNode.get("labels").toString()));
+            nodeDetails.setLabels(nodeLabels);
+          }
+          nodeSet.add(nodeDetails);
         }
       }
     } finally {
       input.close();
     }
-    return nodeResourceMap;
+    return nodeSet;
   }
 
-  public static Set<? extends String> generateNodes(int numNodes,
+  public static Set<NodeDetails> generateNodes(int numNodes,
       int numRacks){
-    Set<String> nodeSet = new HashSet<>();
+    Set<NodeDetails> nodeSet = new HashSet<>();
     if (numRacks < 1) {
       numRacks = 1;
     }
@@ -195,7 +214,8 @@ public class SLSUtils {
     }
 
     for (int i = 0; i < numNodes; i++) {
-      nodeSet.add("/rack" + i % numRacks + "/node" + i);
+      nodeSet.add(new NodeDetails(
+          "/rack" + i % numRacks + "/node" + i));
     }
     return nodeSet;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fea5c9e/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java
index bc8ea70..2efa846 100644
--- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java
@@ -19,10 +19,13 @@ package org.apache.hadoop.yarn.sls.appmaster;
 
 import com.codahale.metrics.MetricRegistry;
 import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ReservationId;
+import org.apache.hadoop.yarn.client.cli.RMAdminCLI;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.sls.conf.SLSConfiguration;
@@ -42,6 +45,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
+import java.util.concurrent.ConcurrentMap;
 
 @RunWith(Parameterized.class)
 public class TestAMSimulator {
@@ -73,6 +77,7 @@ public class TestAMSimulator {
     conf.set(SLSConfiguration.METRICS_OUTPUT_DIR, metricOutputDir.toString());
     conf.set(YarnConfiguration.RM_SCHEDULER, slsScheduler.getName());
     conf.set(SLSConfiguration.RM_SCHEDULER, scheduler.getName());
+    conf.set(YarnConfiguration.NODE_LABELS_ENABLED, "true");
     conf.setBoolean(SLSConfiguration.METRICS_SWITCH, true);
     rm = new ResourceManager();
     rm.init(conf);
@@ -140,7 +145,7 @@ public class TestAMSimulator {
     String queue = "default";
     List<ContainerSimulator> containers = new ArrayList<>();
     app.init(1000, containers, rm, null, 0, 1000000L, "user1", queue, true,
-        appId, 0, SLSConfiguration.getAMContainerResource(conf), null);
+        appId, 0, SLSConfiguration.getAMContainerResource(conf), null, null);
     app.firstStep();
 
     verifySchedulerMetrics(appId);
@@ -152,6 +157,34 @@ public class TestAMSimulator {
     app.lastStep();
   }
 
+  @Test
+  public void testAMSimulatorWithNodeLabels() throws Exception {
+    if (scheduler.equals(CapacityScheduler.class)) {
+      // add label to the cluster
+      RMAdminCLI rmAdminCLI = new RMAdminCLI(conf);
+      String[] args = {"-addToClusterNodeLabels", "label1"};
+      rmAdminCLI.run(args);
+
+      MockAMSimulator app = new MockAMSimulator();
+      String appId = "app1";
+      String queue = "default";
+      List<ContainerSimulator> containers = new ArrayList<>();
+      app.init(1000, containers, rm, null, 0, 1000000L, "user1", queue, true,
+          appId, 0, SLSConfiguration.getAMContainerResource(conf),
+          "label1", null);
+      app.firstStep();
+
+      verifySchedulerMetrics(appId);
+
+      ConcurrentMap<ApplicationId, RMApp> rmApps =
+          rm.getRMContext().getRMApps();
+      Assert.assertEquals(1, rmApps.size());
+      RMApp rmApp = rmApps.get(app.appId);
+      Assert.assertNotNull(rmApp);
+      Assert.assertEquals("label1", rmApp.getAmNodeLabelExpression());
+    }
+  }
+
   @After
   public void tearDown() {
     if (rm != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fea5c9e/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java
index 5e586b13..c59c2af 100644
--- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java
@@ -18,13 +18,13 @@
 
 package org.apache.hadoop.yarn.sls.utils;
 
-import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
+import org.apache.hadoop.yarn.sls.SLSRunner.NodeDetails;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.Assert;
 import org.junit.Test;
 
 import java.util.HashSet;
-import java.util.Map;
 import java.util.Set;
 
 public class TestSLSUtils {
@@ -45,28 +45,54 @@ public class TestSLSUtils {
   @Test
   public void testParseNodesFromNodeFile() throws Exception {
     String nodeFile = "src/test/resources/nodes.json";
-    Map<String, Resource> nodeResourceMap = SLSUtils.parseNodesFromNodeFile(
+    Set<NodeDetails> nodeDetails = SLSUtils.parseNodesFromNodeFile(
         nodeFile, Resources.createResource(1024, 2));
-    Assert.assertEquals(20, nodeResourceMap.size());
+    Assert.assertEquals(20, nodeDetails.size());
 
     nodeFile = "src/test/resources/nodes-with-resources.json";
-    nodeResourceMap = SLSUtils.parseNodesFromNodeFile(
+    nodeDetails = SLSUtils.parseNodesFromNodeFile(
         nodeFile, Resources.createResource(1024, 2));
-    Assert.assertEquals(4,
-        nodeResourceMap.size());
-    Assert.assertEquals(2048,
-        nodeResourceMap.get("/rack1/node1").getMemorySize());
-    Assert.assertEquals(6,
-        nodeResourceMap.get("/rack1/node1").getVirtualCores());
-    Assert.assertEquals(1024,
-        nodeResourceMap.get("/rack1/node2").getMemorySize());
-    Assert.assertEquals(2,
-        nodeResourceMap.get("/rack1/node2").getVirtualCores());
+    Assert.assertEquals(4, nodeDetails.size());
+    for (NodeDetails nodeDetail : nodeDetails) {
+      if (nodeDetail.getHostname().equals("/rack1/node1")) {
+        Assert.assertEquals(2048,
+            nodeDetail.getNodeResource().getMemorySize());
+        Assert.assertEquals(6,
+            nodeDetail.getNodeResource().getVirtualCores());
+      } else if (nodeDetail.getHostname().equals("/rack1/node2")) {
+        Assert.assertEquals(1024,
+            nodeDetail.getNodeResource().getMemorySize());
+        Assert.assertEquals(2,
+            nodeDetail.getNodeResource().getVirtualCores());
+        Assert.assertNull(nodeDetail.getLabels());
+      } else if (nodeDetail.getHostname().equals("/rack1/node3")) {
+        Assert.assertEquals(1024,
+            nodeDetail.getNodeResource().getMemorySize());
+        Assert.assertEquals(2,
+            nodeDetail.getNodeResource().getVirtualCores());
+        Assert.assertEquals(2, nodeDetail.getLabels().size());
+        for (NodeLabel nodeLabel : nodeDetail.getLabels()) {
+          if (nodeLabel.getName().equals("label1")) {
+            Assert.assertTrue(nodeLabel.isExclusive());
+          } else if(nodeLabel.getName().equals("label2")) {
+            Assert.assertFalse(nodeLabel.isExclusive());
+          } else {
+            Assert.assertTrue("Unexepected label", false);
+          }
+        }
+      } else if (nodeDetail.getHostname().equals("/rack1/node4")) {
+        Assert.assertEquals(6144,
+            nodeDetail.getNodeResource().getMemorySize());
+        Assert.assertEquals(12,
+            nodeDetail.getNodeResource().getVirtualCores());
+        Assert.assertEquals(2, nodeDetail.getLabels().size());
+      }
+    }
   }
 
   @Test
   public void testGenerateNodes() {
-    Set<? extends String> nodes = SLSUtils.generateNodes(3, 3);
+    Set<NodeDetails> nodes = SLSUtils.generateNodes(3, 3);
     Assert.assertEquals("Number of nodes is wrong.", 3, nodes.size());
     Assert.assertEquals("Number of racks is wrong.", 3, getNumRack(nodes));
 
@@ -83,10 +109,10 @@ public class TestSLSUtils {
     Assert.assertEquals("Number of racks is wrong.", 1, getNumRack(nodes));
   }
 
-  private int getNumRack(Set<? extends String> nodes) {
+  private int getNumRack(Set<NodeDetails> nodes) {
     Set<String> racks = new HashSet<>();
-    for (String node : nodes) {
-      String[] rackHostname = SLSUtils.getRackHostName(node);
+    for (NodeDetails node : nodes) {
+      String[] rackHostname = SLSUtils.getRackHostName(node.getHostname());
       racks.add(rackHostname[0]);
     }
     return racks.size();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fea5c9e/hadoop-tools/hadoop-sls/src/test/resources/nodes-with-resources.json
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/resources/nodes-with-resources.json b/hadoop-tools/hadoop-sls/src/test/resources/nodes-with-resources.json
index 0039181..dc5f020 100644
--- a/hadoop-tools/hadoop-sls/src/test/resources/nodes-with-resources.json
+++ b/hadoop-tools/hadoop-sls/src/test/resources/nodes-with-resources.json
@@ -10,10 +10,14 @@
       "node": "node2"
     },
     {
-      "node": "node3"
+      "node": "node3",
+      "labels": "label1, label2(exclusive=false)"
     },
     {
-      "node": "node4"
+      "node": "node4",
+      "labels": "label1, label2(exclusive=false)",
+      "memory-mb" : 6144,
+      "vcores" : 12
     }
   ]
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fea5c9e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index 8d1d56b..a24c398 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.yarn.client.cli;
 
 import java.io.IOException;
 import java.io.PrintStream;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
@@ -54,6 +53,7 @@ import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.client.ClientRMProxy;
 import org.apache.hadoop.yarn.client.RMHAServiceTarget;
+import org.apache.hadoop.yarn.client.util.YarnClientUtils;
 import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -82,7 +82,8 @@ import org.apache.hadoop.yarn.util.resource.Resources;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
+
+import static org.apache.hadoop.yarn.client.util.YarnClientUtils.NO_LABEL_ERR_MSG;
 
 @Private
 @Unstable
@@ -91,15 +92,10 @@ public class RMAdminCLI extends HAAdmin {
   private final RecordFactory recordFactory = 
     RecordFactoryProvider.getRecordFactory(null);
   static CommonNodeLabelsManager localNodeLabelsManager = null;
-  private static final String NO_LABEL_ERR_MSG =
-      "No cluster node-labels are specified";
   private static final String NO_MAPPING_ERR_MSG =
       "No node-to-labels mappings are specified";
   private static final String INVALID_TIMEOUT_ERR_MSG =
       "Invalid timeout specified : ";
-  private static final String ADD_LABEL_FORMAT_ERR_MSG =
-      "Input format for adding node-labels is not correct, it should be "
-          + "labelName1[(exclusive=true/false)],LabelName2[] ..";
   private static final Pattern RESOURCE_TYPES_ARGS_PATTERN =
       Pattern.compile("^[0-9]*$");
 
@@ -533,65 +529,6 @@ public class RMAdminCLI extends HAAdmin {
     }
     return localNodeLabelsManager;
   }
-  
-  private List<NodeLabel> buildNodeLabelsFromStr(String args) {
-    List<NodeLabel> nodeLabels = new ArrayList<>();
-    for (String p : args.split(",")) {
-      if (!p.trim().isEmpty()) {
-        String labelName = p;
-
-        // Try to parse exclusive
-        boolean exclusive = NodeLabel.DEFAULT_NODE_LABEL_EXCLUSIVITY;
-        int leftParenthesisIdx = p.indexOf("(");
-        int rightParenthesisIdx = p.indexOf(")");
-
-        if ((leftParenthesisIdx == -1 && rightParenthesisIdx != -1)
-            || (leftParenthesisIdx != -1 && rightParenthesisIdx == -1)) {
-          // Parenthese not match
-          throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG);
-        }
-
-        if (leftParenthesisIdx > 0 && rightParenthesisIdx > 0) {
-          if (leftParenthesisIdx > rightParenthesisIdx) {
-            // Parentese not match
-            throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG);
-          }
-
-          String property = p.substring(p.indexOf("(") + 1, p.indexOf(")"));
-          if (property.contains("=")) {
-            String key = property.substring(0, property.indexOf("=")).trim();
-            String value =
-                property
-                    .substring(property.indexOf("=") + 1, property.length())
-                    .trim();
-
-            // Now we only support one property, which is exclusive, so check if
-            // key = exclusive and value = {true/false}
-            if (key.equals("exclusive")
-                && ImmutableSet.of("true", "false").contains(value)) {
-              exclusive = Boolean.parseBoolean(value);
-            } else {
-              throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG);
-            }
-          } else if (!property.trim().isEmpty()) {
-            throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG);
-          }
-        }
-
-        // Try to get labelName if there's "(..)"
-        if (labelName.contains("(")) {
-          labelName = labelName.substring(0, labelName.indexOf("(")).trim();
-        }
-
-        nodeLabels.add(NodeLabel.newInstance(labelName, exclusive));
-      }
-    }
-
-    if (nodeLabels.isEmpty()) {
-      throw new IllegalArgumentException(NO_LABEL_ERR_MSG);
-    }
-    return nodeLabels;
-  }
 
   private Set<String> buildNodeLabelNamesFromStr(String args) {
     Set<String> labels = new HashSet<String>();
@@ -624,7 +561,7 @@ public class RMAdminCLI extends HAAdmin {
       return exitCode;
     }
 
-    List<NodeLabel> labels = buildNodeLabelsFromStr(
+    List<NodeLabel> labels = YarnClientUtils.buildNodeLabelsFromStr(
         cliParser.getOptionValue("addToClusterNodeLabels"));
     if (cliParser.hasOption("directlyAccessNodeLabelStore")) {
       getNodeLabelManagerInstance(getConf()).addToCluserNodeLabels(labels);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fea5c9e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/util/YarnClientUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/util/YarnClientUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/util/YarnClientUtils.java
index 1e3112a..1717675 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/util/YarnClientUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/util/YarnClientUtils.java
@@ -19,8 +19,13 @@ package org.apache.hadoop.yarn.client.util;
 
 import com.google.common.annotations.VisibleForTesting;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import com.google.common.collect.ImmutableSet;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 
@@ -29,6 +34,14 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
  * YARN clients.
  */
 public abstract class YarnClientUtils {
+
+  private static final String ADD_LABEL_FORMAT_ERR_MSG =
+      "Input format for adding node-labels is not correct, it should be "
+          + "labelName1[(exclusive=true/false)],LabelName2[] ..";
+
+  public static final String NO_LABEL_ERR_MSG =
+      "No cluster node-labels are specified";
+
   /**
    * Look up and return the resource manager's principal. This method
    * automatically does the <code>_HOST</code> replacement in the principal and
@@ -80,6 +93,70 @@ public abstract class YarnClientUtils {
   }
 
   /**
+   * Creates node labels from string
+   * @param args nodelabels string to be parsed
+   * @return list of node labels
+   */
+  public static List<NodeLabel> buildNodeLabelsFromStr(String args) {
+    List<NodeLabel> nodeLabels = new ArrayList<>();
+    for (String p : args.split(",")) {
+      if (!p.trim().isEmpty()) {
+        String labelName = p;
+
+        // Try to parse exclusive
+        boolean exclusive = NodeLabel.DEFAULT_NODE_LABEL_EXCLUSIVITY;
+        int leftParenthesisIdx = p.indexOf("(");
+        int rightParenthesisIdx = p.indexOf(")");
+
+        if ((leftParenthesisIdx == -1 && rightParenthesisIdx != -1)
+            || (leftParenthesisIdx != -1 && rightParenthesisIdx == -1)) {
+          // Parentheses not match
+          throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG);
+        }
+
+        if (leftParenthesisIdx > 0 && rightParenthesisIdx > 0) {
+          if (leftParenthesisIdx > rightParenthesisIdx) {
+            // Parentheses not match
+            throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG);
+          }
+
+          String property = p.substring(p.indexOf("(") + 1, p.indexOf(")"));
+          if (property.contains("=")) {
+            String key = property.substring(0, property.indexOf("=")).trim();
+            String value =
+                property
+                    .substring(property.indexOf("=") + 1, property.length())
+                    .trim();
+
+            // Now we only support one property, which is exclusive, so check if
+            // key = exclusive and value = {true/false}
+            if (key.equals("exclusive")
+                && ImmutableSet.of("true", "false").contains(value)) {
+              exclusive = Boolean.parseBoolean(value);
+            } else {
+              throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG);
+            }
+          } else if (!property.trim().isEmpty()) {
+            throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG);
+          }
+        }
+
+        // Try to get labelName if there's "(..)"
+        if (labelName.contains("(")) {
+          labelName = labelName.substring(0, labelName.indexOf("(")).trim();
+        }
+
+        nodeLabels.add(NodeLabel.newInstance(labelName, exclusive));
+      }
+    }
+
+    if (nodeLabels.isEmpty()) {
+      throw new IllegalArgumentException(NO_LABEL_ERR_MSG);
+    }
+    return nodeLabels;
+  }
+
+  /**
    * Returns a {@link YarnConfiguration} built from the {@code conf} parameter
    * that is guaranteed to have the {@link YarnConfiguration#RM_HA_ID}
    * property set.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: HDDS-304. Process ContainerAction from datanode heartbeat in SCM. Contributed by Nanda Kumar.

Posted by bo...@apache.org.
HDDS-304. Process ContainerAction from datanode heartbeat in SCM. Contributed by Nanda Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c368575
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c368575
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c368575

Branch: refs/heads/YARN-7402
Commit: 7c368575a319f5ba98019418166524bac982086f
Parents: 97870ec
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Thu Aug 2 17:34:17 2018 +0530
Committer: Mukul Kumar Singh <ms...@apache.org>
Committed: Thu Aug 2 17:34:17 2018 +0530

----------------------------------------------------------------------
 .../scm/container/ContainerActionsHandler.java  | 60 +++++++++++++++++
 .../hadoop/hdds/scm/events/SCMEvents.java       | 16 ++++-
 .../server/SCMDatanodeHeartbeatDispatcher.java  | 22 +++++++
 .../scm/server/StorageContainerManager.java     |  3 +
 .../container/TestContainerActionsHandler.java  | 68 ++++++++++++++++++++
 5 files changed, 168 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c368575/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java
new file mode 100644
index 0000000..ce399eb
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerAction;
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
+    .ContainerActionsFromDatanode;
+import org.apache.hadoop.hdds.server.events.EventHandler;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Handles container reports from datanode.
+ */
+public class ContainerActionsHandler implements
+    EventHandler<ContainerActionsFromDatanode> {
+
+  private static final Logger LOG = LoggerFactory.getLogger(
+      ContainerActionsHandler.class);
+
+  @Override
+  public void onMessage(
+      ContainerActionsFromDatanode containerReportFromDatanode,
+      EventPublisher publisher) {
+    DatanodeDetails dd = containerReportFromDatanode.getDatanodeDetails();
+    for (ContainerAction action : containerReportFromDatanode.getReport()
+        .getContainerActionsList()) {
+      ContainerID containerId = ContainerID.valueof(action.getContainerID());
+      switch (action.getAction()) {
+      case CLOSE:
+        LOG.debug("Closing container {} in datanode {} because the" +
+            " container is {}.", containerId, dd, action.getReason());
+        publisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerId);
+        break;
+      default:
+        LOG.warn("Invalid action {} with reason {}, from datanode {}. ",
+            action.getAction(), action.getReason(), dd); }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c368575/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
index ad1702b..d49dd4f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
@@ -20,9 +20,16 @@
 package org.apache.hadoop.hdds.scm.events;
 
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler.*;
+import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler
+    .CloseContainerStatus;
+import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler
+    .DeleteBlockCommandStatus;
+import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler
+    .ReplicationStatus;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
+    .ContainerActionsFromDatanode;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
     .CommandStatusReportFromDatanode;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
     .ContainerReportFromDatanode;
@@ -57,6 +64,13 @@ public final class SCMEvents {
       new TypedEvent<>(ContainerReportFromDatanode.class, "Container_Report");
 
   /**
+   * ContainerActions are sent by Datanode. This event is received by
+   * SCMDatanodeHeartbeatDispatcher and CONTAINER_ACTIONS event is generated.
+   */
+  public static final TypedEvent<ContainerActionsFromDatanode>
+      CONTAINER_ACTIONS = new TypedEvent<>(ContainerActionsFromDatanode.class,
+      "Container_Actions");
+  /**
    * A Command status report will be sent by datanodes. This repoort is received
    * by SCMDatanodeHeartbeatDispatcher and CommandReport event is generated.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c368575/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
index 2461d37..c259141 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.hdds.scm.server;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerActionsProto;
 import org.apache.hadoop.hdds.protocol.proto.
     StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
 import org.apache.hadoop.hdds.protocol.proto
@@ -37,6 +39,7 @@ import org.slf4j.LoggerFactory;
 
 import java.util.List;
 
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_ACTIONS;
 import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_REPORT;
 import static org.apache.hadoop.hdds.scm.events.SCMEvents.NODE_REPORT;
 import static org.apache.hadoop.hdds.scm.events.SCMEvents.CMD_STATUS_REPORT;
@@ -89,6 +92,13 @@ public final class SCMDatanodeHeartbeatDispatcher {
 
     }
 
+    if (heartbeat.hasContainerActions()) {
+      LOG.debug("Dispatching Container Actions.");
+      eventPublisher.fireEvent(CONTAINER_ACTIONS,
+          new ContainerActionsFromDatanode(datanodeDetails,
+              heartbeat.getContainerActions()));
+    }
+
     if (heartbeat.hasCommandStatusReport()) {
       eventPublisher.fireEvent(CMD_STATUS_REPORT,
           new CommandStatusReportFromDatanode(datanodeDetails,
@@ -146,6 +156,18 @@ public final class SCMDatanodeHeartbeatDispatcher {
   }
 
   /**
+   * Container action event payload with origin.
+   */
+  public static class ContainerActionsFromDatanode
+      extends ReportFromDatanode<ContainerActionsProto> {
+
+    public ContainerActionsFromDatanode(DatanodeDetails datanodeDetails,
+                                       ContainerActionsProto actions) {
+      super(datanodeDetails, actions);
+    }
+  }
+
+  /**
    * Container report event payload with origin.
    */
   public static class CommandStatusReportFromDatanode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c368575/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index be8fb43..9cb1318 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdds.scm.block.BlockManager;
 import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
 import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler;
 import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler;
+import org.apache.hadoop.hdds.scm.container.ContainerActionsHandler;
 import org.apache.hadoop.hdds.scm.container.ContainerMapping;
 import org.apache.hadoop.hdds.scm.container.ContainerReportHandler;
 import org.apache.hadoop.hdds.scm.container.Mapping;
@@ -209,10 +210,12 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     NewNodeHandler newNodeHandler = new NewNodeHandler(node2ContainerMap);
     StaleNodeHandler staleNodeHandler = new StaleNodeHandler(node2ContainerMap);
     DeadNodeHandler deadNodeHandler = new DeadNodeHandler(node2ContainerMap);
+    ContainerActionsHandler actionsHandler = new ContainerActionsHandler();
 
     eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, scmNodeManager);
     eventQueue.addHandler(SCMEvents.NODE_REPORT, nodeReportHandler);
     eventQueue.addHandler(SCMEvents.CONTAINER_REPORT, containerReportHandler);
+    eventQueue.addHandler(SCMEvents.CONTAINER_ACTIONS, actionsHandler);
     eventQueue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerHandler);
     eventQueue.addHandler(SCMEvents.NEW_NODE, newNodeHandler);
     eventQueue.addHandler(SCMEvents.STALE_NODE, staleNodeHandler);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c368575/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
new file mode 100644
index 0000000..0997e1f
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container;
+
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerActionsProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction;
+import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerActionsFromDatanode;
+import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+/**
+ * Tests ContainerActionsHandler.
+ */
+public class TestContainerActionsHandler {
+
+  @Test
+  public void testCloseContainerAction() {
+    EventQueue queue = new EventQueue();
+    ContainerActionsHandler actionsHandler = new ContainerActionsHandler();
+    CloseContainerEventHandler closeContainerEventHandler = Mockito.mock(
+        CloseContainerEventHandler.class);
+    queue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerEventHandler);
+    queue.addHandler(SCMEvents.CONTAINER_ACTIONS, actionsHandler);
+
+    ContainerAction action = ContainerAction.newBuilder()
+        .setContainerID(1L)
+        .setAction(ContainerAction.Action.CLOSE)
+        .setReason(ContainerAction.Reason.CONTAINER_FULL)
+        .build();
+
+    ContainerActionsProto cap = ContainerActionsProto.newBuilder()
+        .addContainerActions(action)
+        .build();
+
+    ContainerActionsFromDatanode containerActions =
+        new ContainerActionsFromDatanode(
+            TestUtils.randomDatanodeDetails(), cap);
+
+    queue.fireEvent(SCMEvents.CONTAINER_ACTIONS, containerActions);
+
+    verify(closeContainerEventHandler, times(1))
+        .onMessage(ContainerID.valueof(1L), queue);
+
+  }
+
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: YARN-8397. Potential thread leak in ActivitiesManager. Contributed by Rohith Sharma K S.

Posted by bo...@apache.org.
YARN-8397. Potential thread leak in ActivitiesManager. Contributed by Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6310c0d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6310c0d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6310c0d1

Branch: refs/heads/YARN-7402
Commit: 6310c0d17d6422a595f856a55b4f1fb82be43739
Parents: 40f9b0c
Author: Sunil G <su...@apache.org>
Authored: Wed Aug 1 08:33:01 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Wed Aug 1 08:33:30 2018 +0530

----------------------------------------------------------------------
 .../scheduler/activities/ActivitiesManager.java | 20 +++++++++++++++-----
 .../scheduler/capacity/CapacityScheduler.java   |  1 +
 2 files changed, 16 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6310c0d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java
index af73ae3..8498c40 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java
@@ -57,6 +57,7 @@ public class ActivitiesManager extends AbstractService {
   private Thread cleanUpThread;
   private int timeThreshold = 600 * 1000;
   private final RMContext rmContext;
+  private volatile boolean stopped;
 
   public ActivitiesManager(RMContext rmContext) {
     super(ActivitiesManager.class.getName());
@@ -113,7 +114,7 @@ public class ActivitiesManager extends AbstractService {
     cleanUpThread = new Thread(new Runnable() {
       @Override
       public void run() {
-        while (true) {
+        while (!stopped && !Thread.currentThread().isInterrupted()) {
           Iterator<Map.Entry<NodeId, List<NodeAllocation>>> ite =
               completedNodeAllocations.entrySet().iterator();
           while (ite.hasNext()) {
@@ -140,20 +141,29 @@ public class ActivitiesManager extends AbstractService {
 
           try {
             Thread.sleep(5000);
-          } catch (Exception e) {
-            // ignore
+          } catch (InterruptedException e) {
+            LOG.info(getName() + " thread interrupted");
+            break;
           }
         }
       }
     });
-
+    cleanUpThread.setName("ActivitiesManager thread.");
     cleanUpThread.start();
     super.serviceStart();
   }
 
   @Override
   protected void serviceStop() throws Exception {
-    cleanUpThread.interrupt();
+    stopped = true;
+    if (cleanUpThread != null) {
+      cleanUpThread.interrupt();
+      try {
+        cleanUpThread.join();
+      } catch (InterruptedException ie) {
+        LOG.warn("Interrupted Exception while stopping", ie);
+      }
+    }
     super.serviceStop();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6310c0d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 37f56de..0b7fe92 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -439,6 +439,7 @@ public class CapacityScheduler extends
   public void serviceStop() throws Exception {
     try {
       writeLock.lock();
+      this.activitiesManager.stop();
       if (scheduleAsynchronously && asyncSchedulerThreads != null) {
         for (Thread t : asyncSchedulerThreads) {
           t.interrupt();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: HDDS-273. DeleteLog entries should be purged only after corresponding DNs commit the transaction. Contributed by Lokesh Jain.

Posted by bo...@apache.org.
HDDS-273. DeleteLog entries should be purged only after corresponding DNs commit the transaction. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/feb795b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/feb795b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/feb795b5

Branch: refs/heads/YARN-7402
Commit: feb795b58d2a3c20bdbddea1638a83f6637d3fc9
Parents: 6b038f8
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Sun Jul 29 01:02:24 2018 +0530
Committer: Mukul Kumar Singh <ms...@apache.org>
Committed: Sun Jul 29 01:02:24 2018 +0530

----------------------------------------------------------------------
 .../DeleteBlocksCommandHandler.java             |  12 +-
 .../StorageContainerDatanodeProtocol.proto      |   4 +-
 .../hadoop/hdds/scm/block/BlockManagerImpl.java |   2 +-
 .../block/DatanodeDeletedBlockTransactions.java |  47 ++--
 .../hadoop/hdds/scm/block/DeletedBlockLog.java  |  23 +-
 .../hdds/scm/block/DeletedBlockLogImpl.java     | 123 ++++++----
 .../scm/server/SCMDatanodeProtocolServer.java   |  19 +-
 .../hdds/scm/block/TestDeletedBlockLog.java     | 232 ++++++++++---------
 8 files changed, 256 insertions(+), 206 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb795b5/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index 9640f93..b0d4cbc 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@ -113,8 +113,8 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
         DeleteBlockTransactionResult.Builder txResultBuilder =
             DeleteBlockTransactionResult.newBuilder();
         txResultBuilder.setTxID(entry.getTxID());
+        long containerId = entry.getContainerID();
         try {
-          long containerId = entry.getContainerID();
           Container cont = containerSet.getContainer(containerId);
           if (cont == null) {
             throw new StorageContainerException("Unable to find the container "
@@ -126,7 +126,8 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
             KeyValueContainerData containerData = (KeyValueContainerData)
                 cont.getContainerData();
             deleteKeyValueContainerBlocks(containerData, entry);
-            txResultBuilder.setSuccess(true);
+            txResultBuilder.setContainerID(containerId)
+                .setSuccess(true);
             break;
           default:
             LOG.error(
@@ -136,9 +137,12 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
         } catch (IOException e) {
           LOG.warn("Failed to delete blocks for container={}, TXID={}",
               entry.getContainerID(), entry.getTxID(), e);
-          txResultBuilder.setSuccess(false);
+          txResultBuilder.setContainerID(containerId)
+              .setSuccess(false);
         }
-        resultBuilder.addResults(txResultBuilder.build());
+        resultBuilder.addResults(txResultBuilder.build())
+            .setDnId(context.getParent().getDatanodeDetails()
+                .getUuid().toString());
       });
       ContainerBlocksDeletionACKProto blockDeletionACK = resultBuilder.build();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb795b5/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
index d89567b..0c52efb 100644
--- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
@@ -229,9 +229,11 @@ message DeletedBlocksTransaction {
 message ContainerBlocksDeletionACKProto {
   message DeleteBlockTransactionResult {
     required int64 txID = 1;
-    required bool success = 2;
+    required int64 containerID = 2;
+    required bool success = 3;
   }
   repeated DeleteBlockTransactionResult results = 1;
+  required string dnId = 2;
 }
 
 // SendACK response returned by datanode to SCM, currently empty.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb795b5/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index 6825ca4..8e1c2cc 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -112,7 +112,7 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
     mxBean = MBeans.register("BlockManager", "BlockManagerImpl", this);
 
     // SCM block deleting transaction log and deleting service.
-    deletedBlockLog = new DeletedBlockLogImpl(conf);
+    deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager);
     long svcInterval =
         conf.getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
             OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb795b5/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
index d71e7b0..e33a700 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
@@ -53,7 +53,8 @@ public class DatanodeDeletedBlockTransactions {
     this.nodeNum = nodeNum;
   }
 
-  public void addTransaction(DeletedBlocksTransaction tx) throws IOException {
+  public void addTransaction(DeletedBlocksTransaction tx,
+      Set<UUID> dnsWithTransactionCommitted) throws IOException {
     Pipeline pipeline = null;
     try {
       pipeline = mappingService.getContainerWithPipeline(tx.getContainerID())
@@ -71,29 +72,37 @@ public class DatanodeDeletedBlockTransactions {
 
     for (DatanodeDetails dd : pipeline.getMachines()) {
       UUID dnID = dd.getUuid();
-      if (transactions.containsKey(dnID)) {
-        List<DeletedBlocksTransaction> txs = transactions.get(dnID);
-        if (txs != null && txs.size() < maximumAllowedTXNum) {
-          boolean hasContained = false;
-          for (DeletedBlocksTransaction t : txs) {
-            if (t.getContainerID() == tx.getContainerID()) {
-              hasContained = true;
-              break;
-            }
-          }
+      if (dnsWithTransactionCommitted == null ||
+          !dnsWithTransactionCommitted.contains(dnID)) {
+        // Transaction need not be sent to dns which have already committed it
+        addTransactionToDN(dnID, tx);
+      }
+    }
+  }
 
-          if (!hasContained) {
-            txs.add(tx);
-            currentTXNum++;
+  private void addTransactionToDN(UUID dnID, DeletedBlocksTransaction tx) {
+    if (transactions.containsKey(dnID)) {
+      List<DeletedBlocksTransaction> txs = transactions.get(dnID);
+      if (txs != null && txs.size() < maximumAllowedTXNum) {
+        boolean hasContained = false;
+        for (DeletedBlocksTransaction t : txs) {
+          if (t.getContainerID() == tx.getContainerID()) {
+            hasContained = true;
+            break;
           }
         }
-      } else {
-        currentTXNum++;
-        transactions.put(dnID, tx);
+
+        if (!hasContained) {
+          txs.add(tx);
+          currentTXNum++;
+        }
       }
-      SCMBlockDeletingService.LOG.debug("Transaction added: {} <- TX({})", dnID,
-          tx.getTxID());
+    } else {
+      currentTXNum++;
+      transactions.put(dnID, tx);
     }
+    SCMBlockDeletingService.LOG
+        .debug("Transaction added: {} <- TX({})", dnID, tx.getTxID());
   }
 
   Set<UUID> getDatanodeIDs() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb795b5/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
index 28103be..2bb5686 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
@@ -18,12 +18,16 @@
 package org.apache.hadoop.hdds.scm.block;
 
 import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto
+    .DeleteBlockTransactionResult;
+import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
 
 import java.io.Closeable;
 import java.io.IOException;
 import java.util.List;
 import java.util.Map;
+import java.util.UUID;
 
 /**
  * The DeletedBlockLog is a persisted log in SCM to keep tracking
@@ -34,18 +38,6 @@ import java.util.Map;
 public interface DeletedBlockLog extends Closeable {
 
   /**
-   *  A limit size list of transactions. Note count is the max number
-   *  of TXs to return, we might not be able to always return this
-   *  number. and the processCount of those transactions
-   *  should be [0, MAX_RETRY).
-   *
-   * @param count - number of transactions.
-   * @return a list of BlockDeletionTransaction.
-   */
-  List<DeletedBlocksTransaction> getTransactions(int count)
-      throws IOException;
-
-  /**
    * Scan entire log once and returns TXs to DatanodeDeletedBlockTransactions.
    * Once DatanodeDeletedBlockTransactions is full, the scan behavior will
    * stop.
@@ -81,10 +73,11 @@ public interface DeletedBlockLog extends Closeable {
    * Commits a transaction means to delete all footprints of a transaction
    * from the log. This method doesn't guarantee all transactions can be
    * successfully deleted, it tolerate failures and tries best efforts to.
-   *
-   * @param txIDs - transaction IDs.
+   *  @param transactionResults - delete block transaction results.
+   * @param dnID - ID of datanode which acknowledges the delete block command.
    */
-  void commitTransactions(List<Long> txIDs) throws IOException;
+  void commitTransactions(List<DeleteBlockTransactionResult> transactionResults,
+      UUID dnID);
 
   /**
    * Creates a block deletion transaction and adds that into the log.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb795b5/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index 48fa2eb..752c9c7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -21,27 +21,36 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import com.google.common.primitives.Longs;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto
+    .DeleteBlockTransactionResult;
+import org.apache.hadoop.hdds.scm.container.Mapping;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.utils.BatchOperation;
-import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
 import org.apache.hadoop.utils.MetadataStore;
 import org.apache.hadoop.utils.MetadataStoreBuilder;
+import org.eclipse.jetty.util.ConcurrentHashSet;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.UUID;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
+import java.util.stream.Collectors;
 
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_BLOCK_DELETION_MAX_RETRY;
@@ -74,12 +83,15 @@ public class DeletedBlockLogImpl implements DeletedBlockLog {
 
   private final int maxRetry;
   private final MetadataStore deletedStore;
+  private final Mapping containerManager;
   private final Lock lock;
   // The latest id of deleted blocks in the db.
   private long lastTxID;
-  private long lastReadTxID;
+  // Maps txId to set of DNs which are successful in committing the transaction
+  private Map<Long, Set<UUID>> transactionToDNsCommitMap;
 
-  public DeletedBlockLogImpl(Configuration conf) throws IOException {
+  public DeletedBlockLogImpl(Configuration conf, Mapping containerManager)
+      throws IOException {
     maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY,
         OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT);
 
@@ -95,11 +107,17 @@ public class DeletedBlockLogImpl implements DeletedBlockLog {
         .setDbFile(deletedLogDbPath)
         .setCacheSize(cacheSize * OzoneConsts.MB)
         .build();
+    this.containerManager = containerManager;
 
     this.lock = new ReentrantLock();
     // start from the head of deleted store.
-    lastReadTxID = 0;
     lastTxID = findLatestTxIDInStore();
+
+    // transactionToDNsCommitMap is updated only when
+    // transaction is added to the log and when it is removed.
+
+    // maps transaction to dns which have committed it.
+    transactionToDNsCommitMap = new ConcurrentHashMap<>();
   }
 
   @VisibleForTesting
@@ -124,39 +142,6 @@ public class DeletedBlockLogImpl implements DeletedBlockLog {
   }
 
   @Override
-  public List<DeletedBlocksTransaction> getTransactions(
-      int count) throws IOException {
-    List<DeletedBlocksTransaction> result = new ArrayList<>();
-    MetadataKeyFilter getNextTxID = (preKey, currentKey, nextKey)
-        -> Longs.fromByteArray(currentKey) > lastReadTxID;
-    MetadataKeyFilter avoidInvalidTxid = (preKey, currentKey, nextKey)
-        -> !Arrays.equals(LATEST_TXID, currentKey);
-    lock.lock();
-    try {
-      deletedStore.iterate(null, (key, value) -> {
-        if (getNextTxID.filterKey(null, key, null) &&
-            avoidInvalidTxid.filterKey(null, key, null)) {
-          DeletedBlocksTransaction block = DeletedBlocksTransaction
-              .parseFrom(value);
-          if (block.getCount() > -1 && block.getCount() <= maxRetry) {
-            result.add(block);
-          }
-        }
-        return result.size() < count;
-      });
-      // Scan the metadata from the beginning.
-      if (result.size() < count || result.size() < 1) {
-        lastReadTxID = 0;
-      } else {
-        lastReadTxID = result.get(result.size() - 1).getTxID();
-      }
-    } finally {
-      lock.unlock();
-    }
-    return result;
-  }
-
-  @Override
   public List<DeletedBlocksTransaction> getFailedTransactions()
       throws IOException {
     lock.lock();
@@ -235,18 +220,50 @@ public class DeletedBlockLogImpl implements DeletedBlockLog {
   /**
    * {@inheritDoc}
    *
-   * @param txIDs - transaction IDs.
+   * @param transactionResults - transaction IDs.
+   * @param dnID - Id of Datanode which has acknowledged a delete block command.
    * @throws IOException
    */
   @Override
-  public void commitTransactions(List<Long> txIDs) throws IOException {
+  public void commitTransactions(
+      List<DeleteBlockTransactionResult> transactionResults, UUID dnID) {
     lock.lock();
     try {
-      for (Long txID : txIDs) {
+      Set<UUID> dnsWithCommittedTxn;
+      for (DeleteBlockTransactionResult transactionResult : transactionResults) {
+        if (isTransactionFailed(transactionResult)) {
+          continue;
+        }
         try {
-          deletedStore.delete(Longs.toByteArray(txID));
-        } catch (IOException ex) {
-          LOG.warn("Cannot commit txID " + txID, ex);
+          long txID = transactionResult.getTxID();
+          // set of dns which have successfully committed transaction txId.
+          dnsWithCommittedTxn = transactionToDNsCommitMap.get(txID);
+          Long containerId = transactionResult.getContainerID();
+          if (dnsWithCommittedTxn == null || containerId == null) {
+            LOG.warn("Transaction txId={} commit by dnId={} failed."
+                + " Corresponding entry not found.", txID, dnID);
+            return;
+          }
+
+          dnsWithCommittedTxn.add(dnID);
+          Collection<DatanodeDetails> containerDnsDetails =
+              containerManager.getContainerWithPipeline(containerId)
+                  .getPipeline().getDatanodes().values();
+          // The delete entry can be safely removed from the log if all the
+          // corresponding nodes commit the txn.
+          if (dnsWithCommittedTxn.size() >= containerDnsDetails.size()) {
+            List<UUID> containerDns = containerDnsDetails.stream()
+                .map(dnDetails -> dnDetails.getUuid())
+                .collect(Collectors.toList());
+            if (dnsWithCommittedTxn.containsAll(containerDns)) {
+              transactionToDNsCommitMap.remove(txID);
+              LOG.debug("Purging txId={} from block deletion log", txID);
+              deletedStore.delete(Longs.toByteArray(txID));
+            }
+          }
+        } catch (IOException e) {
+          LOG.warn("Could not commit delete block transaction: " +
+              transactionResult.getTxID(), e);
         }
       }
     } finally {
@@ -254,6 +271,20 @@ public class DeletedBlockLogImpl implements DeletedBlockLog {
     }
   }
 
+  private boolean isTransactionFailed(DeleteBlockTransactionResult result) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(
+          "Got block deletion ACK from datanode, TXIDs={}, " + "success={}",
+          result.getTxID(), result.getSuccess());
+    }
+    if (!result.getSuccess()) {
+      LOG.warn("Got failed ACK for TXID={}, prepare to resend the "
+          + "TX in next interval", result.getTxID());
+      return true;
+    }
+    return false;
+  }
+
   /**
    * {@inheritDoc}
    *
@@ -355,7 +386,9 @@ public class DeletedBlockLogImpl implements DeletedBlockLog {
               .parseFrom(value);
 
           if (block.getCount() > -1 && block.getCount() <= maxRetry) {
-            transactions.addTransaction(block);
+            Set<UUID> dnsWithTransactionCommitted = transactionToDNsCommitMap
+                .putIfAbsent(block.getTxID(), new ConcurrentHashSet<>());
+            transactions.addTransaction(block, dnsWithTransactionCommitted);
           }
           return !transactions.isFull();
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb795b5/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index aee64b9..0d34787 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -91,9 +91,9 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.util.Collections;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.UUID;
 import java.util.stream.Collectors;
 
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
@@ -230,21 +230,8 @@ public class SCMDatanodeProtocolServer implements
       ContainerBlocksDeletionACKProto acks) throws IOException {
     if (acks.getResultsCount() > 0) {
       List<DeleteBlockTransactionResult> resultList = acks.getResultsList();
-      for (DeleteBlockTransactionResult result : resultList) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Got block deletion ACK from datanode, TXIDs={}, "
-              + "success={}", result.getTxID(), result.getSuccess());
-        }
-        if (result.getSuccess()) {
-          LOG.debug("Purging TXID={} from block deletion log",
-              result.getTxID());
-          scm.getScmBlockManager().getDeletedBlockLog()
-              .commitTransactions(Collections.singletonList(result.getTxID()));
-        } else {
-          LOG.warn("Got failed ACK for TXID={}, prepare to resend the "
-              + "TX in next interval", result.getTxID());
-        }
-      }
+      scm.getScmBlockManager().getDeletedBlockLog()
+          .commitTransactions(resultList, UUID.fromString(acks.getDnId()));
     }
     return ContainerBlocksDeletionACKResponseProto.newBuilder()
         .getDefaultInstanceForType();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb795b5/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
index 9255ec7..e86717b 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
@@ -32,6 +32,9 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto
+    .DeleteBlockTransactionResult;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.utils.MetadataKeyFilters;
 import org.apache.hadoop.utils.MetadataStore;
@@ -45,6 +48,7 @@ import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -56,7 +60,8 @@ import java.util.stream.Collectors;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_BLOCK_DELETION_MAX_RETRY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
-import static org.mockito.Mockito.mock;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Mockito.when;
 
 /**
  * Tests for DeletedBlockLog.
@@ -66,6 +71,8 @@ public class TestDeletedBlockLog {
   private static DeletedBlockLogImpl deletedBlockLog;
   private OzoneConfiguration conf;
   private File testDir;
+  private Mapping containerManager;
+  private List<DatanodeDetails> dnList;
 
   @Before
   public void setup() throws Exception {
@@ -74,7 +81,36 @@ public class TestDeletedBlockLog {
     conf = new OzoneConfiguration();
     conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
     conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
-    deletedBlockLog = new DeletedBlockLogImpl(conf);
+    containerManager = Mockito.mock(ContainerMapping.class);
+    deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager);
+    dnList = new ArrayList<>(3);
+    setupContainerManager();
+  }
+
+  private void setupContainerManager() throws IOException {
+    dnList.add(
+        DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString())
+            .build());
+    dnList.add(
+        DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString())
+            .build());
+    dnList.add(
+        DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString())
+            .build());
+
+    ContainerInfo containerInfo =
+        new ContainerInfo.Builder().setContainerID(1).build();
+    Pipeline pipeline =
+        new Pipeline(null, LifeCycleState.CLOSED, ReplicationType.RATIS,
+            ReplicationFactor.THREE, null);
+    pipeline.addMember(dnList.get(0));
+    pipeline.addMember(dnList.get(1));
+    pipeline.addMember(dnList.get(2));
+    ContainerWithPipeline containerWithPipeline =
+        new ContainerWithPipeline(containerInfo, pipeline);
+    when(containerManager.getContainerWithPipeline(anyLong()))
+        .thenReturn(containerWithPipeline);
+    when(containerManager.getContainer(anyLong())).thenReturn(containerInfo);
   }
 
   @After
@@ -101,45 +137,50 @@ public class TestDeletedBlockLog {
     return blockMap;
   }
 
-  @Test
-  public void testGetTransactions() throws Exception {
-    List<DeletedBlocksTransaction> blocks =
-        deletedBlockLog.getTransactions(30);
-    Assert.assertEquals(0, blocks.size());
-
-    // Creates 40 TX in the log.
-    for (Map.Entry<Long, List<Long>> entry : generateData(40).entrySet()){
-      deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
+  private void commitTransactions(
+      List<DeleteBlockTransactionResult> transactionResults,
+      DatanodeDetails... dns) {
+    for (DatanodeDetails dnDetails : dns) {
+      deletedBlockLog
+          .commitTransactions(transactionResults, dnDetails.getUuid());
     }
+  }
 
-    // Get first 30 TXs.
-    blocks = deletedBlockLog.getTransactions(30);
-    Assert.assertEquals(30, blocks.size());
-    for (int i = 0; i < 30; i++) {
-      Assert.assertEquals(i + 1, blocks.get(i).getTxID());
-    }
+  private void commitTransactions(
+      List<DeleteBlockTransactionResult> transactionResults) {
+    commitTransactions(transactionResults,
+        dnList.toArray(new DatanodeDetails[3]));
+  }
 
-    // Get another 30 TXs.
-    // The log only 10 left, so this time it will only return 10 TXs.
-    blocks = deletedBlockLog.getTransactions(30);
-    Assert.assertEquals(10, blocks.size());
-    for (int i = 30; i < 40; i++) {
-      Assert.assertEquals(i + 1, blocks.get(i - 30).getTxID());
-    }
+  private void commitTransactions(
+      Collection<DeletedBlocksTransaction> deletedBlocksTransactions,
+      DatanodeDetails... dns) {
+    commitTransactions(deletedBlocksTransactions.stream()
+        .map(this::createDeleteBlockTransactionResult)
+        .collect(Collectors.toList()), dns);
+  }
 
-    // Get another 50 TXs.
-    // By now the position should have moved to the beginning,
-    // this call will return all 40 TXs.
-    blocks = deletedBlockLog.getTransactions(50);
-    Assert.assertEquals(40, blocks.size());
-    for (int i = 0; i < 40; i++) {
-      Assert.assertEquals(i + 1, blocks.get(i).getTxID());
-    }
-    List<Long> txIDs = new ArrayList<>();
-    for (DeletedBlocksTransaction block : blocks) {
-      txIDs.add(block.getTxID());
-    }
-    deletedBlockLog.commitTransactions(txIDs);
+  private void commitTransactions(
+      Collection<DeletedBlocksTransaction> deletedBlocksTransactions) {
+    commitTransactions(deletedBlocksTransactions.stream()
+        .map(this::createDeleteBlockTransactionResult)
+        .collect(Collectors.toList()));
+  }
+
+  private DeleteBlockTransactionResult createDeleteBlockTransactionResult(
+      DeletedBlocksTransaction transaction) {
+    return DeleteBlockTransactionResult.newBuilder()
+        .setContainerID(transaction.getContainerID()).setSuccess(true)
+        .setTxID(transaction.getTxID()).build();
+  }
+
+  private List<DeletedBlocksTransaction> getTransactions(
+      int maximumAllowedTXNum) throws IOException {
+    DatanodeDeletedBlockTransactions transactions =
+        new DatanodeDeletedBlockTransactions(containerManager,
+            maximumAllowedTXNum, 3);
+    deletedBlockLog.getTransactions(transactions);
+    return transactions.getDatanodeTransactions(dnList.get(0).getUuid());
   }
 
   @Test
@@ -153,7 +194,7 @@ public class TestDeletedBlockLog {
 
     // This will return all TXs, total num 30.
     List<DeletedBlocksTransaction> blocks =
-        deletedBlockLog.getTransactions(40);
+        getTransactions(40);
     List<Long> txIDs = blocks.stream().map(DeletedBlocksTransaction::getTxID)
         .collect(Collectors.toList());
 
@@ -164,13 +205,13 @@ public class TestDeletedBlockLog {
     // Increment another time so it exceed the maxRetry.
     // On this call, count will be set to -1 which means TX eventually fails.
     deletedBlockLog.incrementCount(txIDs);
-    blocks = deletedBlockLog.getTransactions(40);
+    blocks = getTransactions(40);
     for (DeletedBlocksTransaction block : blocks) {
       Assert.assertEquals(-1, block.getCount());
     }
 
     // If all TXs are failed, getTransactions call will always return nothing.
-    blocks = deletedBlockLog.getTransactions(40);
+    blocks = getTransactions(40);
     Assert.assertEquals(blocks.size(), 0);
   }
 
@@ -180,16 +221,26 @@ public class TestDeletedBlockLog {
       deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
     }
     List<DeletedBlocksTransaction> blocks =
-        deletedBlockLog.getTransactions(20);
-    List<Long> txIDs = new ArrayList<>();
-    for (DeletedBlocksTransaction block : blocks) {
-      txIDs.add(block.getTxID());
-    }
-    // Add an invalid txID.
-    txIDs.add(70L);
-    deletedBlockLog.commitTransactions(txIDs);
-    blocks = deletedBlockLog.getTransactions(50);
+        getTransactions(20);
+    // Add an invalid txn.
+    blocks.add(
+        DeletedBlocksTransaction.newBuilder().setContainerID(1).setTxID(70)
+            .setCount(0).addLocalID(0).build());
+    commitTransactions(blocks);
+    blocks.remove(blocks.size() - 1);
+
+    blocks = getTransactions(50);
+    Assert.assertEquals(30, blocks.size());
+    commitTransactions(blocks, dnList.get(1), dnList.get(2),
+        DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString())
+            .build());
+
+    blocks = getTransactions(50);
     Assert.assertEquals(30, blocks.size());
+    commitTransactions(blocks, dnList.get(0));
+
+    blocks = getTransactions(50);
+    Assert.assertEquals(0, blocks.size());
   }
 
   @Test
@@ -213,20 +264,16 @@ public class TestDeletedBlockLog {
         }
         added += 10;
       } else if (state == 1) {
-        blocks = deletedBlockLog.getTransactions(20);
+        blocks = getTransactions(20);
         txIDs = new ArrayList<>();
         for (DeletedBlocksTransaction block : blocks) {
           txIDs.add(block.getTxID());
         }
         deletedBlockLog.incrementCount(txIDs);
       } else if (state == 2) {
-        txIDs = new ArrayList<>();
-        for (DeletedBlocksTransaction block : blocks) {
-          txIDs.add(block.getTxID());
-        }
+        commitTransactions(blocks);
+        committed += blocks.size();
         blocks = new ArrayList<>();
-        committed += txIDs.size();
-        deletedBlockLog.commitTransactions(txIDs);
       } else {
         // verify the number of added and committed.
         List<Map.Entry<byte[], byte[]>> result =
@@ -234,6 +281,8 @@ public class TestDeletedBlockLog {
         Assert.assertEquals(added, result.size() + committed);
       }
     }
+    blocks = getTransactions(1000);
+    commitTransactions(blocks);
   }
 
   @Test
@@ -244,16 +293,13 @@ public class TestDeletedBlockLog {
     // close db and reopen it again to make sure
     // transactions are stored persistently.
     deletedBlockLog.close();
-    deletedBlockLog = new DeletedBlockLogImpl(conf);
+    deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager);
     List<DeletedBlocksTransaction> blocks =
-        deletedBlockLog.getTransactions(10);
-    List<Long> txIDs = new ArrayList<>();
-    for (DeletedBlocksTransaction block : blocks) {
-      txIDs.add(block.getTxID());
-    }
-    deletedBlockLog.commitTransactions(txIDs);
-    blocks = deletedBlockLog.getTransactions(10);
-    Assert.assertEquals(10, blocks.size());
+        getTransactions(10);
+    commitTransactions(blocks);
+    blocks = getTransactions(100);
+    Assert.assertEquals(40, blocks.size());
+    commitTransactions(blocks);
   }
 
   @Test
@@ -262,32 +308,11 @@ public class TestDeletedBlockLog {
     int maximumAllowedTXNum = 5;
     List<DeletedBlocksTransaction> blocks = null;
     List<Long> containerIDs = new LinkedList<>();
+    DatanodeDetails dnId1 = dnList.get(0), dnId2 = dnList.get(1);
 
     int count = 0;
     long containerID = 0L;
-    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.STANDALONE, 0);
-    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.RATIS, 0);
-    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.REST, 0);
-    DatanodeDetails dnId1 = DatanodeDetails.newBuilder()
-        .setUuid(UUID.randomUUID().toString())
-        .setIpAddress("127.0.0.1")
-        .setHostName("localhost")
-        .addPort(containerPort)
-        .addPort(ratisPort)
-        .addPort(restPort)
-        .build();
-    DatanodeDetails dnId2 = DatanodeDetails.newBuilder()
-        .setUuid(UUID.randomUUID().toString())
-        .setIpAddress("127.0.0.1")
-        .setHostName("localhost")
-        .addPort(containerPort)
-        .addPort(ratisPort)
-        .addPort(restPort)
-        .build();
-    Mapping mappingService = mock(ContainerMapping.class);
+
     // Creates {TXNum} TX in the log.
     for (Map.Entry<Long, List<Long>> entry : generateData(txNum)
         .entrySet()) {
@@ -298,29 +323,25 @@ public class TestDeletedBlockLog {
 
       // make TX[1-6] for datanode1; TX[7-10] for datanode2
       if (count <= (maximumAllowedTXNum + 1)) {
-        mockContainerInfo(mappingService, containerID, dnId1);
+        mockContainerInfo(containerID, dnId1);
       } else {
-        mockContainerInfo(mappingService, containerID, dnId2);
+        mockContainerInfo(containerID, dnId2);
       }
     }
 
     DatanodeDeletedBlockTransactions transactions =
-        new DatanodeDeletedBlockTransactions(mappingService,
+        new DatanodeDeletedBlockTransactions(containerManager,
             maximumAllowedTXNum, 2);
     deletedBlockLog.getTransactions(transactions);
 
-    List<Long> txIDs = new LinkedList<>();
     for (UUID id : transactions.getDatanodeIDs()) {
       List<DeletedBlocksTransaction> txs = transactions
           .getDatanodeTransactions(id);
-      for (DeletedBlocksTransaction tx : txs) {
-        txIDs.add(tx.getTxID());
-      }
+      // delete TX ID
+      commitTransactions(txs);
     }
 
-    // delete TX ID
-    deletedBlockLog.commitTransactions(txIDs);
-    blocks = deletedBlockLog.getTransactions(txNum);
+    blocks = getTransactions(txNum);
     // There should be one block remained since dnID1 reaches
     // the maximum value (5).
     Assert.assertEquals(1, blocks.size());
@@ -337,7 +358,8 @@ public class TestDeletedBlockLog {
     builder.setTxID(11);
     builder.setContainerID(containerID);
     builder.setCount(0);
-    transactions.addTransaction(builder.build());
+    transactions.addTransaction(builder.build(),
+        null);
 
     // The number of TX in dnID2 should not be changed.
     Assert.assertEquals(size,
@@ -349,14 +371,14 @@ public class TestDeletedBlockLog {
     builder.setTxID(12);
     builder.setContainerID(containerID);
     builder.setCount(0);
-    mockContainerInfo(mappingService, containerID, dnId2);
-    transactions.addTransaction(builder.build());
+    mockContainerInfo(containerID, dnId2);
+    transactions.addTransaction(builder.build(),
+        null);
     // Since all node are full, then transactions is full.
     Assert.assertTrue(transactions.isFull());
   }
 
-  private void mockContainerInfo(Mapping mappingService, long containerID,
-      DatanodeDetails dd) throws IOException {
+  private void mockContainerInfo(long containerID, DatanodeDetails dd) throws IOException {
     Pipeline pipeline =
         new Pipeline("fake", LifeCycleState.OPEN,
             ReplicationType.STAND_ALONE, ReplicationFactor.ONE, "fake");
@@ -370,9 +392,9 @@ public class TestDeletedBlockLog {
     ContainerInfo containerInfo = builder.build();
     ContainerWithPipeline containerWithPipeline = new ContainerWithPipeline(
         containerInfo, pipeline);
-    Mockito.doReturn(containerInfo).when(mappingService)
+    Mockito.doReturn(containerInfo).when(containerManager)
         .getContainer(containerID);
-    Mockito.doReturn(containerWithPipeline).when(mappingService)
+    Mockito.doReturn(containerWithPipeline).when(containerManager)
         .getContainerWithPipeline(containerID);
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: YARN-8263. DockerClient still touches hadoop.tmp.dir. Contributed by Craig Condit

Posted by bo...@apache.org.
YARN-8263. DockerClient still touches hadoop.tmp.dir. Contributed by Craig Condit


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7526815e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7526815e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7526815e

Branch: refs/heads/YARN-7402
Commit: 7526815e3234ca352854ecfb142a13f1a188d5bd
Parents: 5033d7d
Author: Jason Lowe <jl...@apache.org>
Authored: Thu Aug 2 10:43:48 2018 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Thu Aug 2 10:43:48 2018 -0500

----------------------------------------------------------------------
 .../nodemanager/LinuxContainerExecutor.java     |  6 +--
 .../runtime/DockerLinuxContainerRuntime.java    | 17 +++----
 .../linux/runtime/docker/DockerClient.java      | 53 --------------------
 .../linux/runtime/docker/DockerCommand.java     |  6 +--
 .../runtime/docker/DockerCommandExecutor.java   | 15 ++----
 .../runtime/docker/DockerInspectCommand.java    |  3 +-
 .../linux/runtime/docker/DockerRmCommand.java   |  3 +-
 .../linux/runtime/docker/TestDockerClient.java  |  2 +-
 .../docker/TestDockerCommandExecutor.java       | 20 ++++----
 9 files changed, 30 insertions(+), 95 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7526815e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 4253f2f..f75ead2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -944,12 +944,12 @@ public class LinuxContainerExecutor extends ContainerExecutor {
       PrivilegedOperationExecutor privOpExecutor =
           PrivilegedOperationExecutor.getInstance(super.getConf());
       if (DockerCommandExecutor.isRemovable(
-          DockerCommandExecutor.getContainerStatus(containerId,
-              super.getConf(), privOpExecutor, nmContext))) {
+          DockerCommandExecutor.getContainerStatus(containerId, privOpExecutor,
+              nmContext))) {
         LOG.info("Removing Docker container : " + containerId);
         DockerRmCommand dockerRmCommand = new DockerRmCommand(containerId);
         DockerCommandExecutor.executeDockerCommand(dockerRmCommand, containerId,
-            null, super.getConf(), privOpExecutor, false, nmContext);
+            null, privOpExecutor, false, nmContext);
       }
     } catch (ContainerExecutionException e) {
       LOG.warn("Unable to remove docker container: " + containerId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7526815e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 88e6c91..5d6f61e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -298,7 +298,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
       throws ContainerExecutionException {
     this.nmContext = nmContext;
     this.conf = conf;
-    dockerClient = new DockerClient(conf);
+    dockerClient = new DockerClient();
     allowedNetworks.clear();
     defaultROMounts.clear();
     defaultRWMounts.clear();
@@ -973,7 +973,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     String containerIdStr = containerId.toString();
     // Check to see if the container already exists for relaunch
     DockerCommandExecutor.DockerContainerStatus containerStatus =
-        DockerCommandExecutor.getContainerStatus(containerIdStr, conf,
+        DockerCommandExecutor.getContainerStatus(containerIdStr,
             privilegedOperationExecutor, nmContext);
     if (containerStatus != null &&
         DockerCommandExecutor.isStartable(containerStatus)) {
@@ -1219,13 +1219,13 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
   private void handleContainerStop(String containerId, Map<String, String> env)
       throws ContainerExecutionException {
     DockerCommandExecutor.DockerContainerStatus containerStatus =
-        DockerCommandExecutor.getContainerStatus(containerId, conf,
+        DockerCommandExecutor.getContainerStatus(containerId,
             privilegedOperationExecutor, nmContext);
     if (DockerCommandExecutor.isStoppable(containerStatus)) {
       DockerStopCommand dockerStopCommand = new DockerStopCommand(
           containerId).setGracePeriod(dockerStopGracePeriod);
       DockerCommandExecutor.executeDockerCommand(dockerStopCommand, containerId,
-          env, conf, privilegedOperationExecutor, false, nmContext);
+          env, privilegedOperationExecutor, false, nmContext);
     } else {
       if (LOG.isDebugEnabled()) {
         LOG.debug(
@@ -1247,14 +1247,13 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     if (isContainerRequestedAsPrivileged(container)) {
       String containerId = container.getContainerId().toString();
       DockerCommandExecutor.DockerContainerStatus containerStatus =
-          DockerCommandExecutor.getContainerStatus(containerId, conf,
+          DockerCommandExecutor.getContainerStatus(containerId,
           privilegedOperationExecutor, nmContext);
       if (DockerCommandExecutor.isKillable(containerStatus)) {
         DockerKillCommand dockerKillCommand =
             new DockerKillCommand(containerId).setSignal(signal.name());
         DockerCommandExecutor.executeDockerCommand(dockerKillCommand,
-            containerId, env, conf, privilegedOperationExecutor, false,
-            nmContext);
+            containerId, env, privilegedOperationExecutor, false, nmContext);
       } else {
         LOG.debug(
             "Container status is {}, skipping kill - {}",
@@ -1292,12 +1291,12 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
           + containerId);
     } else {
       DockerCommandExecutor.DockerContainerStatus containerStatus =
-          DockerCommandExecutor.getContainerStatus(containerId, conf,
+          DockerCommandExecutor.getContainerStatus(containerId,
               privilegedOperationExecutor, nmContext);
       if (DockerCommandExecutor.isRemovable(containerStatus)) {
         DockerRmCommand dockerRmCommand = new DockerRmCommand(containerId);
         DockerCommandExecutor.executeDockerCommand(dockerRmCommand, containerId,
-            env, conf, privilegedOperationExecutor, false, nmContext);
+            env, privilegedOperationExecutor, false, nmContext);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7526815e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
index fca707c..3a516c4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerClient.java
@@ -22,7 +22,6 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -50,58 +49,6 @@ public final class DockerClient {
   private static final String TMP_FILE_PREFIX = "docker.";
   private static final String TMP_FILE_SUFFIX = ".cmd";
   private static final String TMP_ENV_FILE_SUFFIX = ".env";
-  private final String tmpDirPath;
-
-  public DockerClient(Configuration conf) throws ContainerExecutionException {
-
-    String tmpDirBase = conf.get("hadoop.tmp.dir");
-    if (tmpDirBase == null) {
-      throw new ContainerExecutionException("hadoop.tmp.dir not set!");
-    }
-    tmpDirPath = tmpDirBase + "/nm-docker-cmds";
-
-    File tmpDir = new File(tmpDirPath);
-    if (!(tmpDir.exists() || tmpDir.mkdirs())) {
-      LOG.warn("Unable to create directory: " + tmpDirPath);
-      throw new ContainerExecutionException("Unable to create directory: " +
-          tmpDirPath);
-    }
-  }
-
-  public String writeCommandToTempFile(DockerCommand cmd, String filePrefix)
-      throws ContainerExecutionException {
-    try {
-      File dockerCommandFile = File.createTempFile(TMP_FILE_PREFIX + filePrefix,
-        TMP_FILE_SUFFIX, new
-        File(tmpDirPath));
-      try (
-        Writer writer = new OutputStreamWriter(
-            new FileOutputStream(dockerCommandFile), "UTF-8");
-        PrintWriter printWriter = new PrintWriter(writer);
-      ) {
-        printWriter.println("[docker-command-execution]");
-        for (Map.Entry<String, List<String>> entry :
-            cmd.getDockerCommandWithArguments().entrySet()) {
-          if (entry.getKey().contains("=")) {
-            throw new ContainerExecutionException(
-                "'=' found in entry for docker command file, key = " + entry
-                    .getKey() + "; value = " + entry.getValue());
-          }
-          if (entry.getValue().contains("\n")) {
-            throw new ContainerExecutionException(
-                "'\\n' found in entry for docker command file, key = " + entry
-                    .getKey() + "; value = " + entry.getValue());
-          }
-          printWriter.println("  " + entry.getKey() + "=" + StringUtils
-              .join(",", entry.getValue()));
-        }
-        return dockerCommandFile.getAbsolutePath();
-      }
-    } catch (IOException e) {
-      LOG.warn("Unable to write docker command to temporary file!");
-      throw new ContainerExecutionException(e);
-    }
-  }
 
   private String writeEnvFile(DockerRunCommand cmd, String filePrefix,
       File cmdDir) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7526815e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java
index 366457d..260c5b5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommand.java
@@ -22,7 +22,6 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
@@ -117,16 +116,15 @@ public abstract class DockerCommand {
    * @param dockerCommand Specific command to be run by docker.
    * @param containerName
    * @param env
-   * @param conf
    * @param nmContext
    * @return Returns the PrivilegedOperation object to be used.
    * @throws ContainerExecutionException
    */
   public PrivilegedOperation preparePrivilegedOperation(
       DockerCommand dockerCommand, String containerName, Map<String,
-      String> env, Configuration conf, Context nmContext)
+      String> env, Context nmContext)
       throws ContainerExecutionException {
-    DockerClient dockerClient = new DockerClient(conf);
+    DockerClient dockerClient = new DockerClient();
     String commandFile =
         dockerClient.writeCommandToTempFile(dockerCommand,
         ContainerId.fromString(containerName),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7526815e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java
index 8a4888c..7b6497c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerCommandExecutor.java
@@ -16,7 +16,6 @@
  */
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException;
@@ -68,19 +67,18 @@ public final class DockerCommandExecutor {
    * @param dockerCommand               the docker command to run.
    * @param containerId                 the id of the container.
    * @param env                         environment for the container.
-   * @param conf                        the hadoop configuration.
    * @param privilegedOperationExecutor the privileged operations executor.
    * @param disableFailureLogging       disable logging for known rc failures.
    * @return the output of the operation.
    * @throws ContainerExecutionException if the operation fails.
    */
   public static String executeDockerCommand(DockerCommand dockerCommand,
-      String containerId, Map<String, String> env, Configuration conf,
+      String containerId, Map<String, String> env,
       PrivilegedOperationExecutor privilegedOperationExecutor,
       boolean disableFailureLogging, Context nmContext)
       throws ContainerExecutionException {
     PrivilegedOperation dockerOp = dockerCommand.preparePrivilegedOperation(
-        dockerCommand, containerId, env, conf, nmContext);
+        dockerCommand, containerId, env, nmContext);
 
     if (disableFailureLogging) {
       dockerOp.disableFailureLogging();
@@ -108,18 +106,16 @@ public final class DockerCommandExecutor {
    * an exception and the nonexistent status is returned.
    *
    * @param containerId                 the id of the container.
-   * @param conf                        the hadoop configuration.
    * @param privilegedOperationExecutor the privileged operations executor.
    * @return a {@link DockerContainerStatus} representing the current status.
    */
   public static DockerContainerStatus getContainerStatus(String containerId,
-      Configuration conf,
       PrivilegedOperationExecutor privilegedOperationExecutor,
       Context nmContext) {
     try {
       DockerContainerStatus dockerContainerStatus;
       String currentContainerStatus =
-          executeStatusCommand(containerId, conf,
+          executeStatusCommand(containerId,
           privilegedOperationExecutor, nmContext);
       if (currentContainerStatus == null) {
         dockerContainerStatus = DockerContainerStatus.UNKNOWN;
@@ -170,13 +166,11 @@ public final class DockerCommandExecutor {
    * status.
    *
    * @param containerId                 the id of the container.
-   * @param conf                        the hadoop configuration.
    * @param privilegedOperationExecutor the privileged operations executor.
    * @return the current container status.
    * @throws ContainerExecutionException if the docker operation fails to run.
    */
   private static String executeStatusCommand(String containerId,
-      Configuration conf,
       PrivilegedOperationExecutor privilegedOperationExecutor,
       Context nmContext)
       throws ContainerExecutionException {
@@ -184,8 +178,7 @@ public final class DockerCommandExecutor {
         new DockerInspectCommand(containerId).getContainerStatus();
     try {
       return DockerCommandExecutor.executeDockerCommand(dockerInspectCommand,
-          containerId, null, conf, privilegedOperationExecutor, true,
-          nmContext);
+          containerId, null, privilegedOperationExecutor, true, nmContext);
     } catch (ContainerExecutionException e) {
       throw new ContainerExecutionException(e);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7526815e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerInspectCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerInspectCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerInspectCommand.java
index 3ed9c18..e946161 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerInspectCommand.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerInspectCommand.java
@@ -20,7 +20,6 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
 
@@ -58,7 +57,7 @@ public class DockerInspectCommand extends DockerCommand {
   @Override
   public PrivilegedOperation preparePrivilegedOperation(
       DockerCommand dockerCommand, String containerName, Map<String,
-      String> env, Configuration conf, Context nmContext) {
+      String> env, Context nmContext) {
     PrivilegedOperation dockerOp = new PrivilegedOperation(
         PrivilegedOperation.OperationType.INSPECT_DOCKER_CONTAINER);
     dockerOp.appendArgs(commandArguments, containerName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7526815e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRmCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRmCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRmCommand.java
index 3a02982..490cf9e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRmCommand.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRmCommand.java
@@ -16,7 +16,6 @@
  */
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
 
@@ -37,7 +36,7 @@ public class DockerRmCommand extends DockerCommand {
   @Override
   public PrivilegedOperation preparePrivilegedOperation(
       DockerCommand dockerCommand, String containerName, Map<String,
-      String> env, Configuration conf, Context nmContext) {
+      String> env, Context nmContext) {
     PrivilegedOperation dockerOp = new PrivilegedOperation(
         PrivilegedOperation.OperationType.REMOVE_DOCKER_CONTAINER);
     dockerOp.appendArgs(containerName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7526815e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerClient.java
index efd7db5..31645bc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerClient.java
@@ -68,7 +68,7 @@ public class TestDockerClient {
     doReturn(conf).when(mockContext).getConf();
     doReturn(dirsHandler).when(mockContext).getLocalDirsHandler();
 
-    DockerClient dockerClient = new DockerClient(conf);
+    DockerClient dockerClient = new DockerClient();
     dirsHandler.init(conf);
     dirsHandler.start();
     String tmpPath = dockerClient.writeCommandToTempFile(dockerCmd, cid,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7526815e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
index 50d00bb..46415c1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java
@@ -138,7 +138,7 @@ public class TestDockerCommandExecutor {
     DockerStopCommand dockerStopCommand =
         new DockerStopCommand(MOCK_CONTAINER_ID);
     DockerCommandExecutor.executeDockerCommand(dockerStopCommand,
-        cId.toString(), env, configuration, mockExecutor, false, nmContext);
+        cId.toString(), env, mockExecutor, false, nmContext);
     List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor
         .capturePrivilegedOperations(mockExecutor, 1, true);
     assertEquals(1, ops.size());
@@ -150,7 +150,7 @@ public class TestDockerCommandExecutor {
   public void testExecuteDockerRm() throws Exception {
     DockerRmCommand dockerCommand = new DockerRmCommand(MOCK_CONTAINER_ID);
     DockerCommandExecutor.executeDockerCommand(dockerCommand, MOCK_CONTAINER_ID,
-        env, configuration, mockExecutor, false, nmContext);
+        env, mockExecutor, false, nmContext);
     List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor
         .capturePrivilegedOperations(mockExecutor, 1, true);
     PrivilegedOperation privOp = ops.get(0);
@@ -167,7 +167,7 @@ public class TestDockerCommandExecutor {
   public void testExecuteDockerStop() throws Exception {
     DockerStopCommand dockerCommand = new DockerStopCommand(MOCK_CONTAINER_ID);
     DockerCommandExecutor.executeDockerCommand(dockerCommand, MOCK_CONTAINER_ID,
-        env, configuration, mockExecutor, false, nmContext);
+        env, mockExecutor, false, nmContext);
     List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor
         .capturePrivilegedOperations(mockExecutor, 1, true);
     List<String> dockerCommands = getValidatedDockerCommands(ops);
@@ -185,7 +185,7 @@ public class TestDockerCommandExecutor {
     DockerInspectCommand dockerCommand =
         new DockerInspectCommand(MOCK_CONTAINER_ID).getContainerStatus();
     DockerCommandExecutor.executeDockerCommand(dockerCommand, MOCK_CONTAINER_ID,
-        env, configuration, mockExecutor, false, nmContext);
+        env, mockExecutor, false, nmContext);
     List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor
         .capturePrivilegedOperations(mockExecutor, 1, true);
     PrivilegedOperation privOp = ops.get(0);
@@ -204,7 +204,7 @@ public class TestDockerCommandExecutor {
     DockerPullCommand dockerCommand =
         new DockerPullCommand(MOCK_IMAGE_NAME);
     DockerCommandExecutor.executeDockerCommand(dockerCommand, MOCK_CONTAINER_ID,
-        env, configuration, mockExecutor, false, nmContext);
+        env, mockExecutor, false, nmContext);
     List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor
         .capturePrivilegedOperations(mockExecutor, 1, true);
     List<String> dockerCommands = getValidatedDockerCommands(ops);
@@ -222,7 +222,7 @@ public class TestDockerCommandExecutor {
     DockerLoadCommand dockerCommand =
         new DockerLoadCommand(MOCK_LOCAL_IMAGE_NAME);
     DockerCommandExecutor.executeDockerCommand(dockerCommand, MOCK_CONTAINER_ID,
-        env, configuration, mockExecutor, false, nmContext);
+        env, mockExecutor, false, nmContext);
     List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor
         .capturePrivilegedOperations(mockExecutor, 1, true);
     List<String> dockerCommands = getValidatedDockerCommands(ops);
@@ -244,7 +244,7 @@ public class TestDockerCommandExecutor {
           any(PrivilegedOperation.class), eq(null), any(), eq(true), eq(false)))
           .thenReturn(status.getName());
       assertEquals(status, DockerCommandExecutor.getContainerStatus(
-          MOCK_CONTAINER_ID, configuration, mockExecutor, nmContext));
+          MOCK_CONTAINER_ID, mockExecutor, nmContext));
     }
   }
 
@@ -254,7 +254,7 @@ public class TestDockerCommandExecutor {
         new DockerKillCommand(MOCK_CONTAINER_ID)
             .setSignal(ContainerExecutor.Signal.QUIT.name());
     DockerCommandExecutor.executeDockerCommand(dockerKillCommand,
-        MOCK_CONTAINER_ID, env, configuration, mockExecutor, false, nmContext);
+        MOCK_CONTAINER_ID, env, mockExecutor, false, nmContext);
     List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor
         .capturePrivilegedOperations(mockExecutor, 1, true);
     List<String> dockerCommands = getValidatedDockerCommands(ops);
@@ -275,7 +275,7 @@ public class TestDockerCommandExecutor {
         new DockerKillCommand(MOCK_CONTAINER_ID)
             .setSignal(ContainerExecutor.Signal.KILL.name());
     DockerCommandExecutor.executeDockerCommand(dockerKillCommand,
-        MOCK_CONTAINER_ID, env, configuration, mockExecutor, false, nmContext);
+        MOCK_CONTAINER_ID, env, mockExecutor, false, nmContext);
     List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor
         .capturePrivilegedOperations(mockExecutor, 1, true);
     List<String> dockerCommands = getValidatedDockerCommands(ops);
@@ -296,7 +296,7 @@ public class TestDockerCommandExecutor {
         new DockerKillCommand(MOCK_CONTAINER_ID)
             .setSignal(ContainerExecutor.Signal.TERM.name());
     DockerCommandExecutor.executeDockerCommand(dockerKillCommand,
-        MOCK_CONTAINER_ID, env, configuration, mockExecutor, false, nmContext);
+        MOCK_CONTAINER_ID, env, mockExecutor, false, nmContext);
     List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor
         .capturePrivilegedOperations(mockExecutor, 1, true);
     List<String> dockerCommands = getValidatedDockerCommands(ops);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: YARN-7707. [GPG] Policy generator framework. Contributed by Young Chen

Posted by bo...@apache.org.
YARN-7707. [GPG] Policy generator framework. Contributed by Young Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6800cf70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6800cf70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6800cf70

Branch: refs/heads/YARN-7402
Commit: 6800cf7015d81cc0085ad0f9159e246842e72187
Parents: f833e1b
Author: Botong Huang <bo...@apache.org>
Authored: Fri Mar 23 17:07:10 2018 -0700
Committer: Botong Huang <bo...@apache.org>
Committed: Thu Aug 2 09:59:48 2018 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |  36 +-
 .../src/main/resources/yarn-default.xml         |  40 +++
 .../utils/FederationStateStoreFacade.java       |  13 +
 .../pom.xml                                     |  18 +
 .../globalpolicygenerator/GPGContext.java       |   4 +
 .../globalpolicygenerator/GPGContextImpl.java   |  10 +
 .../globalpolicygenerator/GPGPolicyFacade.java  | 220 ++++++++++++
 .../server/globalpolicygenerator/GPGUtils.java  |  80 +++++
 .../GlobalPolicyGenerator.java                  |  17 +
 .../policygenerator/GlobalPolicy.java           |  76 +++++
 .../policygenerator/NoOpGlobalPolicy.java       |  36 ++
 .../policygenerator/PolicyGenerator.java        | 261 ++++++++++++++
 .../UniformWeightedLocalityGlobalPolicy.java    |  71 ++++
 .../policygenerator/package-info.java           |  24 ++
 .../TestGPGPolicyFacade.java                    | 202 +++++++++++
 .../policygenerator/TestPolicyGenerator.java    | 338 +++++++++++++++++++
 .../src/test/resources/schedulerInfo1.json      | 134 ++++++++
 .../src/test/resources/schedulerInfo2.json      | 196 +++++++++++
 18 files changed, 1775 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index ec88411..61535fc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3342,7 +3342,7 @@ public class YarnConfiguration extends Configuration {
   public static final boolean DEFAULT_ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED =
       false;
 
-  private static final String FEDERATION_GPG_PREFIX =
+  public static final String FEDERATION_GPG_PREFIX =
       FEDERATION_PREFIX + "gpg.";
 
   // The number of threads to use for the GPG scheduled executor service
@@ -3360,6 +3360,40 @@ public class YarnConfiguration extends Configuration {
       FEDERATION_GPG_PREFIX + "subcluster.heartbeat.expiration-ms";
   public static final long DEFAULT_GPG_SUBCLUSTER_EXPIRATION_MS = 1800000;
 
+  public static final String FEDERATION_GPG_POLICY_PREFIX =
+      FEDERATION_GPG_PREFIX + "policy.generator.";
+
+  /** The interval at which the policy generator runs, default is one hour. */
+  public static final String GPG_POLICY_GENERATOR_INTERVAL_MS =
+      FEDERATION_GPG_POLICY_PREFIX + "interval-ms";
+  public static final long DEFAULT_GPG_POLICY_GENERATOR_INTERVAL_MS = -1;
+
+  /**
+   * The configured policy generator class, runs NoOpGlobalPolicy by
+   * default.
+   */
+  public static final String GPG_GLOBAL_POLICY_CLASS =
+      FEDERATION_GPG_POLICY_PREFIX + "class";
+  public static final String DEFAULT_GPG_GLOBAL_POLICY_CLASS =
+      "org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator."
+          + "NoOpGlobalPolicy";
+
+  /**
+   * Whether or not the policy generator is running in read only (won't modify
+   * policies), default is false.
+   */
+  public static final String GPG_POLICY_GENERATOR_READONLY =
+      FEDERATION_GPG_POLICY_PREFIX + "readonly";
+  public static final boolean DEFAULT_GPG_POLICY_GENERATOR_READONLY =
+      false;
+
+  /**
+   * Which sub-clusters the policy generator should blacklist.
+   */
+  public static final String GPG_POLICY_GENERATOR_BLACKLIST =
+      FEDERATION_GPG_POLICY_PREFIX + "blacklist";
+
+
   ////////////////////////////////
   // Other Configs
   ////////////////////////////////

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 66493f3..755f3e5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3557,6 +3557,46 @@
 
   <property>
     <description>
+      The interval at which the policy generator runs, default is one hour
+    </description>
+    <name>yarn.federation.gpg.policy.generator.interval-ms</name>
+    <value>3600000</value>
+  </property>
+
+  <property>
+    <description>
+      The configured policy generator class, runs NoOpGlobalPolicy by default
+    </description>
+    <name>yarn.federation.gpg.policy.generator.class</name>
+    <value>org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator.NoOpGlobalPolicy</value>
+  </property>
+
+  <property>
+    <description>
+      Whether or not the policy generator is running in read only (won't modify policies), default is false
+    </description>
+    <name>yarn.federation.gpg.policy.generator.readonly</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>
+      Whether or not the policy generator is running in read only (won't modify policies), default is false
+    </description>
+    <name>yarn.federation.gpg.policy.generator.readonly</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <description>
+      Which subclusters the gpg should blacklist, default is none
+    </description>
+    <name>yarn.federation.gpg.policy.generator.blacklist</name>
+    <value></value>
+  </property>
+
+  <property>
+    <description>
        It is TimelineClient 1.5 configuration whether to store active
        application’s timeline data with in user directory i.e
        ${yarn.timeline-service.entity-group-fs-store.active-dir}/${user.name}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
index 4c3bed0..25a9e52 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolic
 import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse;
 import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest;
 import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
@@ -373,6 +374,18 @@ public final class FederationStateStoreFacade {
   }
 
   /**
+   * Set a policy configuration into the state store.
+   *
+   * @param policyConf the policy configuration to set
+   * @throws YarnException if the request is invalid/fails
+   */
+  public void setPolicyConfiguration(SubClusterPolicyConfiguration policyConf)
+      throws YarnException {
+    stateStore.setPolicyConfiguration(
+        SetSubClusterPolicyConfigurationRequest.newInstance(policyConf));
+  }
+
+  /**
    * Adds the home {@link SubClusterId} for the specified {@link ApplicationId}.
    *
    * @param appHomeSubCluster the mapping of the application to it's home

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
index 9bbb936..9398b0b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml
@@ -63,6 +63,12 @@
 
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-timelineservice</artifactId>
+      <scope>provided</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
     </dependency>
 
@@ -73,6 +79,12 @@
     </dependency>
 
     <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-yarn-server-common</artifactId>
       <type>test-jar</type>
@@ -92,6 +104,12 @@
       <plugin>
         <groupId>org.apache.rat</groupId>
         <artifactId>apache-rat-plugin</artifactId>
+        <configuration>
+          <excludes>
+            <exclude>src/test/resources/schedulerInfo1.json</exclude>
+            <exclude>src/test/resources/schedulerInfo2.json</exclude>
+          </excludes>
+        </configuration>
       </plugin>
     </plugins>
   </build>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContext.java
index da8a383..6b0a5a4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContext.java
@@ -28,4 +28,8 @@ public interface GPGContext {
   FederationStateStoreFacade getStateStoreFacade();
 
   void setStateStoreFacade(FederationStateStoreFacade facade);
+
+  GPGPolicyFacade getPolicyFacade();
+
+  void setPolicyFacade(GPGPolicyFacade facade);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContextImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContextImpl.java
index 3884ace..bb49844 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContextImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGContextImpl.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade
 public class GPGContextImpl implements GPGContext {
 
   private FederationStateStoreFacade facade;
+  private GPGPolicyFacade policyFacade;
 
   @Override
   public FederationStateStoreFacade getStateStoreFacade() {
@@ -38,4 +39,13 @@ public class GPGContextImpl implements GPGContext {
     this.facade = federationStateStoreFacade;
   }
 
+  @Override
+  public GPGPolicyFacade getPolicyFacade(){
+    return policyFacade;
+  }
+
+  @Override
+  public void setPolicyFacade(GPGPolicyFacade gpgPolicyfacade){
+    policyFacade = gpgPolicyfacade;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGPolicyFacade.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGPolicyFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGPolicyFacade.java
new file mode 100644
index 0000000..4c61a14
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGPolicyFacade.java
@@ -0,0 +1,220 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyUtils;
+import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
+import org.apache.hadoop.yarn.server.federation.policies.manager.WeightedLocalityPolicyManager;
+import org.apache.hadoop.yarn.server.federation.policies.router.FederationRouterPolicy;
+import org.apache.hadoop.yarn.server.federation.policies.amrmproxy.FederationAMRMProxyPolicy;
+import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import org.apache.hadoop.yarn.server.federation.policies.manager.FederationPolicyManager;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration;
+import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * A utility class for the GPG Policy Generator to read and write policies
+ * into the FederationStateStore. Policy specific logic is abstracted away in
+ * this class, so the PolicyGenerator can avoid dealing with policy
+ * construction, reinitialization, and serialization.
+ *
+ * There are only two exposed methods:
+ *
+ * {@link #getPolicyManager(String)}
+ * Gets the PolicyManager via queue name. Null if there is no policy
+ * configured for the specified queue. The PolicyManager can be used to
+ * extract the {@link FederationRouterPolicy} and
+ * {@link FederationAMRMProxyPolicy}, as well as any policy specific parameters
+ *
+ * {@link #setPolicyManager(FederationPolicyManager)}
+ * Sets the PolicyManager. If the policy configuration is the same, no change
+ * occurs. Otherwise, the internal cache is updated and the new configuration
+ * is written into the FederationStateStore
+ *
+ * This class assumes that the GPG is the only service
+ * writing policies. Thus, the only FederationStateStore reads occur the first
+ * time a queue policy is retrieved - after that, the GPG only writes to the
+ * FederationStateStore.
+ *
+ * The class uses a PolicyManager cache and a SubClusterPolicyConfiguration
+ * cache. The primary use for these caches are to serve reads, and to
+ * identify when the PolicyGenerator has actually changed the policy
+ * so unnecessary FederationStateStore policy writes can be avoided.
+ */
+
+public class GPGPolicyFacade {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(GPGPolicyFacade.class);
+
+  private FederationStateStoreFacade stateStore;
+
+  private Map<String, FederationPolicyManager> policyManagerMap;
+  private Map<String, SubClusterPolicyConfiguration> policyConfMap;
+
+  private boolean readOnly;
+
+  public GPGPolicyFacade(FederationStateStoreFacade stateStore,
+      Configuration conf) {
+    this.stateStore = stateStore;
+    this.policyManagerMap = new HashMap<>();
+    this.policyConfMap = new HashMap<>();
+    this.readOnly =
+        conf.getBoolean(YarnConfiguration.GPG_POLICY_GENERATOR_READONLY,
+            YarnConfiguration.DEFAULT_GPG_POLICY_GENERATOR_READONLY);
+  }
+
+  /**
+   * Provides a utility for the policy generator to read the policy manager
+   * from the FederationStateStore. Because the policy generator should be the
+   * only component updating the policy, this implementation does not use the
+   * reinitialization feature.
+   *
+   * @param queueName the name of the queue we want the policy manager for.
+   * @return the policy manager responsible for the queue policy.
+   */
+  public FederationPolicyManager getPolicyManager(String queueName)
+      throws YarnException {
+    FederationPolicyManager policyManager = policyManagerMap.get(queueName);
+    // If we don't have the policy manager cached, pull configuration
+    // from the FederationStateStore to create and cache it
+    if (policyManager == null) {
+      try {
+        // If we don't have the configuration cached, pull it
+        // from the stateStore
+        SubClusterPolicyConfiguration conf = policyConfMap.get(queueName);
+        if (conf == null) {
+          conf = stateStore.getPolicyConfiguration(queueName);
+        }
+        // If configuration is still null, it does not exist in the
+        // FederationStateStore
+        if (conf == null) {
+          LOG.info("Read null policy for queue {}", queueName);
+          return null;
+        }
+        policyManager =
+            FederationPolicyUtils.instantiatePolicyManager(conf.getType());
+        policyManager.setQueue(queueName);
+
+        // TODO there is currently no way to cleanly deserialize a policy
+        // manager sub type from just the configuration
+        if (policyManager instanceof WeightedLocalityPolicyManager) {
+          WeightedPolicyInfo wpinfo =
+              WeightedPolicyInfo.fromByteBuffer(conf.getParams());
+          WeightedLocalityPolicyManager wlpmanager =
+              (WeightedLocalityPolicyManager) policyManager;
+          LOG.info("Updating policy for queue {} to configured weights router: "
+                  + "{}, amrmproxy: {}", queueName,
+              wpinfo.getRouterPolicyWeights(),
+              wpinfo.getAMRMPolicyWeights());
+          wlpmanager.setWeightedPolicyInfo(wpinfo);
+        } else {
+          LOG.warn("Warning: FederationPolicyManager of unsupported type {}, "
+              + "initialization may be incomplete ", policyManager.getClass());
+        }
+
+        policyManagerMap.put(queueName, policyManager);
+        policyConfMap.put(queueName, conf);
+      } catch (YarnException e) {
+        LOG.error("Error reading SubClusterPolicyConfiguration from state "
+            + "store for queue: {}", queueName);
+        throw e;
+      }
+    }
+    return policyManager;
+  }
+
+  /**
+   * Provides a utility for the policy generator to write a policy manager
+   * into the FederationStateStore. The facade keeps a cache and will only write
+   * into the FederationStateStore if the policy configuration has changed.
+   *
+   * @param policyManager The policy manager we want to update into the state
+   *                      store. It contains policy information as well as
+   *                      the queue name we will update for.
+   */
+  public void setPolicyManager(FederationPolicyManager policyManager)
+      throws YarnException {
+    if (policyManager == null) {
+      LOG.warn("Attempting to set null policy manager");
+      return;
+    }
+    // Extract the configuration from the policy manager
+    String queue = policyManager.getQueue();
+    SubClusterPolicyConfiguration conf;
+    try {
+      conf = policyManager.serializeConf();
+    } catch (FederationPolicyInitializationException e) {
+      LOG.warn("Error serializing policy for queue {}", queue);
+      throw e;
+    }
+    if (conf == null) {
+      // State store does not currently support setting a policy back to null
+      // because it reads the queue name to set from the policy!
+      LOG.warn("Skip setting policy to null for queue {} into state store",
+          queue);
+      return;
+    }
+    // Compare with configuration cache, if different, write the conf into
+    // store and update our conf and manager cache
+    if (!confCacheEqual(queue, conf)) {
+      try {
+        if (readOnly) {
+          LOG.info("[read-only] Skipping policy update for queue {}", queue);
+          return;
+        }
+        LOG.info("Updating policy for queue {} into state store", queue);
+        stateStore.setPolicyConfiguration(conf);
+        policyConfMap.put(queue, conf);
+        policyManagerMap.put(queue, policyManager);
+      } catch (YarnException e) {
+        LOG.warn("Error writing SubClusterPolicyConfiguration to state "
+            + "store for queue: {}", queue);
+        throw e;
+      }
+    } else {
+      LOG.info("Setting unchanged policy - state store write skipped");
+    }
+  }
+
+  /**
+   * @param queue the queue to check the cached policy configuration for
+   * @param conf the new policy configuration
+   * @return whether or not the conf is equal to the cached conf
+   */
+  private boolean confCacheEqual(String queue,
+      SubClusterPolicyConfiguration conf) {
+    SubClusterPolicyConfiguration cachedConf = policyConfMap.get(queue);
+    if (conf == null && cachedConf == null) {
+      return true;
+    } else if (conf != null && cachedConf != null) {
+      if (conf.equals(cachedConf)) {
+        return true;
+      }
+    }
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
new file mode 100644
index 0000000..429bec4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.MediaType;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo;
+
+/**
+ * GPGUtils contains utility functions for the GPG.
+ *
+ */
+public final class GPGUtils {
+
+  // hide constructor
+  private GPGUtils() {
+  }
+
+  /**
+   * Performs an invocation of the the remote RMWebService.
+   */
+  public static <T> T invokeRMWebService(Configuration conf, String webAddr,
+      String path, final Class<T> returnType) {
+    Client client = Client.create();
+    T obj = null;
+
+    WebResource webResource = client.resource(webAddr);
+    ClientResponse response = webResource.path("ws/v1/cluster").path(path)
+        .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+    if (response.getStatus() == HttpServletResponse.SC_OK) {
+      obj = response.getEntity(returnType);
+    } else {
+      throw new YarnRuntimeException("Bad response from remote web service: "
+          + response.getStatus());
+    }
+    return obj;
+  }
+
+  /**
+   * Creates a uniform weighting of 1.0 for each sub cluster.
+   */
+  public static Map<SubClusterIdInfo, Float> createUniformWeights(
+      Set<SubClusterId> ids) {
+    Map<SubClusterIdInfo, Float> weights =
+        new HashMap<>();
+    for(SubClusterId id : ids) {
+      weights.put(new SubClusterIdInfo(id), 1.0f);
+    }
+    return weights;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
index f6cfba0..88b9f2b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator.PolicyGenerator;
 import org.apache.hadoop.yarn.server.globalpolicygenerator.subclustercleaner.SubClusterCleaner;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -62,6 +63,7 @@ public class GlobalPolicyGenerator extends CompositeService {
   // Scheduler service that runs tasks periodically
   private ScheduledThreadPoolExecutor scheduledExecutorService;
   private SubClusterCleaner subClusterCleaner;
+  private PolicyGenerator policyGenerator;
 
   public GlobalPolicyGenerator() {
     super(GlobalPolicyGenerator.class.getName());
@@ -73,11 +75,15 @@ public class GlobalPolicyGenerator extends CompositeService {
     // Set up the context
     this.gpgContext
         .setStateStoreFacade(FederationStateStoreFacade.getInstance());
+    this.gpgContext
+        .setPolicyFacade(new GPGPolicyFacade(
+            this.gpgContext.getStateStoreFacade(), conf));
 
     this.scheduledExecutorService = new ScheduledThreadPoolExecutor(
         conf.getInt(YarnConfiguration.GPG_SCHEDULED_EXECUTOR_THREADS,
             YarnConfiguration.DEFAULT_GPG_SCHEDULED_EXECUTOR_THREADS));
     this.subClusterCleaner = new SubClusterCleaner(conf, this.gpgContext);
+    this.policyGenerator = new PolicyGenerator(conf, this.gpgContext);
 
     DefaultMetricsSystem.initialize(METRICS_NAME);
 
@@ -99,6 +105,17 @@ public class GlobalPolicyGenerator extends CompositeService {
       LOG.info("Scheduled sub-cluster cleaner with interval: {}",
           DurationFormatUtils.formatDurationISO(scCleanerIntervalMs));
     }
+
+    // Schedule PolicyGenerator
+    long policyGeneratorIntervalMillis = getConfig().getLong(
+        YarnConfiguration.GPG_POLICY_GENERATOR_INTERVAL_MS,
+        YarnConfiguration.DEFAULT_GPG_POLICY_GENERATOR_INTERVAL_MS);
+    if(policyGeneratorIntervalMillis > 0){
+      this.scheduledExecutorService.scheduleAtFixedRate(this.policyGenerator,
+          0, policyGeneratorIntervalMillis, TimeUnit.MILLISECONDS);
+      LOG.info("Scheduled policygenerator with interval: {}",
+          DurationFormatUtils.formatDurationISO(policyGeneratorIntervalMillis));
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/GlobalPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/GlobalPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/GlobalPolicy.java
new file mode 100644
index 0000000..38d762d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/GlobalPolicy.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.federation.policies.manager.FederationPolicyManager;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+
+import java.util.Collections;
+import java.util.Map;
+
+/**
+ * This interface defines the plug-able policy that the PolicyGenerator uses
+ * to update policies into the state store.
+ */
+
+public abstract class GlobalPolicy implements Configurable {
+
+  private Configuration conf;
+
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  /**
+   * Return a map of the object type and RM path to request it from - the
+   * framework will query these paths and provide the objects to the policy.
+   * Delegating this responsibility to the PolicyGenerator enables us to avoid
+   * duplicate calls to the same * endpoints as the GlobalPolicy is invoked
+   * once per queue.
+   */
+  protected Map<Class, String> registerPaths() {
+    // Default register nothing
+    return Collections.emptyMap();
+  }
+
+  /**
+   * Given a queue, cluster metrics, and policy manager, update the policy
+   * to account for the cluster status. This method defines the policy generator
+   * behavior.
+   *
+   * @param queueName   name of the queue
+   * @param clusterInfo subClusterId map to cluster information about the
+   *                    SubCluster used to make policy decisions
+   * @param manager     the FederationPolicyManager for the queue's existing
+   *                    policy the manager may be null, in which case the policy
+   *                    will need to be created
+   * @return policy manager that handles the updated (or created) policy
+   */
+  protected abstract FederationPolicyManager updatePolicy(String queueName,
+      Map<SubClusterId, Map<Class, Object>> clusterInfo,
+      FederationPolicyManager manager);
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/NoOpGlobalPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/NoOpGlobalPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/NoOpGlobalPolicy.java
new file mode 100644
index 0000000..c2d578f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/NoOpGlobalPolicy.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator;
+
+import org.apache.hadoop.yarn.server.federation.policies.manager.FederationPolicyManager;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+
+import java.util.Map;
+
+/**
+ * Default policy that does not update any policy configurations.
+ */
+public class NoOpGlobalPolicy extends GlobalPolicy{
+
+  @Override
+  public FederationPolicyManager updatePolicy(String queueName,
+      Map<SubClusterId, Map<Class, Object>> clusterInfo,
+      FederationPolicyManager manager) {
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/PolicyGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/PolicyGenerator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/PolicyGenerator.java
new file mode 100644
index 0000000..5681ff0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/PolicyGenerator.java
@@ -0,0 +1,261 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.federation.policies.manager.FederationPolicyManager;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
+import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGContext;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGUtils;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * The PolicyGenerator runs periodically and updates the policy configuration
+ * for each queue into the FederationStateStore. The policy update behavior is
+ * defined by the GlobalPolicy instance that is used.
+ */
+
+public class PolicyGenerator implements Runnable, Configurable {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(PolicyGenerator.class);
+
+  private GPGContext gpgContext;
+  private Configuration conf;
+
+  // Information request map
+  private Map<Class, String> pathMap = new HashMap<>();
+
+  // Global policy instance
+  @VisibleForTesting
+  protected GlobalPolicy policy;
+
+  /**
+   * The PolicyGenerator periodically reads SubCluster load and updates
+   * policies into the FederationStateStore.
+   */
+  public PolicyGenerator(Configuration conf, GPGContext context) {
+    setConf(conf);
+    init(context);
+  }
+
+  private void init(GPGContext context) {
+    this.gpgContext = context;
+    LOG.info("Initialized PolicyGenerator");
+  }
+
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+    this.policy = FederationStateStoreFacade
+        .createInstance(conf, YarnConfiguration.GPG_GLOBAL_POLICY_CLASS,
+            YarnConfiguration.DEFAULT_GPG_GLOBAL_POLICY_CLASS,
+            GlobalPolicy.class);
+    policy.setConf(conf);
+    pathMap.putAll(policy.registerPaths());
+  }
+
+  @Override
+  public Configuration getConf() {
+    return this.conf;
+  }
+
+  @Override
+  public final void run() {
+    Map<SubClusterId, SubClusterInfo> activeSubClusters;
+    try {
+      activeSubClusters = gpgContext.getStateStoreFacade().getSubClusters(true);
+    } catch (YarnException e) {
+      LOG.error("Error retrieving active sub-clusters", e);
+      return;
+    }
+
+    // Parse the scheduler information from all the SCs
+    Map<SubClusterId, SchedulerInfo> schedInfo =
+        getSchedulerInfo(activeSubClusters);
+
+    // Extract and enforce that all the schedulers have matching type
+    Set<String> queueNames = extractQueues(schedInfo);
+
+    // Remove black listed SubClusters
+    activeSubClusters.keySet().removeAll(getBlackList());
+    LOG.info("Active non-blacklist sub-clusters: {}",
+        activeSubClusters.keySet());
+
+    // Get cluster metrics information from non black listed RMs - later used
+    // to evaluate SubCluster load
+    Map<SubClusterId, Map<Class, Object>> clusterInfo =
+        getInfos(activeSubClusters);
+
+    // Update into the FederationStateStore
+    for (String queueName : queueNames) {
+      // Retrieve the manager from the policy facade
+      FederationPolicyManager manager;
+      try {
+        manager = this.gpgContext.getPolicyFacade().getPolicyManager(queueName);
+      } catch (YarnException e) {
+        LOG.error("GetPolicy for queue {} failed", queueName, e);
+        continue;
+      }
+      LOG.info("Updating policy for queue {}", queueName);
+      manager = policy.updatePolicy(queueName, clusterInfo, manager);
+      try {
+        this.gpgContext.getPolicyFacade().setPolicyManager(manager);
+      } catch (YarnException e) {
+        LOG.error("SetPolicy for queue {} failed", queueName, e);
+      }
+    }
+  }
+
+  /**
+   * Helper to retrieve metrics from the RM REST endpoints.
+   *
+   * @param activeSubClusters A map of active SubCluster IDs to info
+   */
+  @VisibleForTesting
+  protected Map<SubClusterId, Map<Class, Object>> getInfos(
+      Map<SubClusterId, SubClusterInfo> activeSubClusters) {
+
+    Map<SubClusterId, Map<Class, Object>> clusterInfo = new HashMap<>();
+    for (SubClusterInfo sci : activeSubClusters.values()) {
+      for (Map.Entry<Class, String> e : this.pathMap.entrySet()) {
+        if (!clusterInfo.containsKey(sci.getSubClusterId())) {
+          clusterInfo.put(sci.getSubClusterId(), new HashMap<Class, Object>());
+        }
+        Object ret = GPGUtils
+            .invokeRMWebService(conf, sci.getRMWebServiceAddress(),
+                e.getValue(), e.getKey());
+        clusterInfo.get(sci.getSubClusterId()).put(e.getKey(), ret);
+      }
+    }
+
+    return clusterInfo;
+  }
+
+  /**
+   * Helper to retrieve SchedulerInfos.
+   *
+   * @param activeSubClusters A map of active SubCluster IDs to info
+   */
+  @VisibleForTesting
+  protected Map<SubClusterId, SchedulerInfo> getSchedulerInfo(
+      Map<SubClusterId, SubClusterInfo> activeSubClusters) {
+    Map<SubClusterId, SchedulerInfo> schedInfo =
+        new HashMap<>();
+    for (SubClusterInfo sci : activeSubClusters.values()) {
+      SchedulerTypeInfo sti = GPGUtils
+          .invokeRMWebService(conf, sci.getRMWebServiceAddress(),
+              RMWSConsts.SCHEDULER, SchedulerTypeInfo.class);
+      if(sti != null){
+        schedInfo.put(sci.getSubClusterId(), sti.getSchedulerInfo());
+      } else {
+        LOG.warn("Skipped null scheduler info from SubCluster " + sci
+            .getSubClusterId().toString());
+      }
+    }
+    return schedInfo;
+  }
+
+  /**
+   * Helper to get a set of blacklisted SubCluster Ids from configuration.
+   */
+  private Set<SubClusterId> getBlackList() {
+    String blackListParam =
+        conf.get(YarnConfiguration.GPG_POLICY_GENERATOR_BLACKLIST);
+    if(blackListParam == null){
+      return Collections.emptySet();
+    }
+    Set<SubClusterId> blackList = new HashSet<>();
+    for (String id : blackListParam.split(",")) {
+      blackList.add(SubClusterId.newInstance(id));
+    }
+    return blackList;
+  }
+
+  /**
+   * Given the scheduler information for all RMs, extract the union of
+   * queue names - right now we only consider instances of capacity scheduler.
+   *
+   * @param schedInfo the scheduler information
+   * @return a set of queue names
+   */
+  private Set<String> extractQueues(
+      Map<SubClusterId, SchedulerInfo> schedInfo) {
+    Set<String> queueNames = new HashSet<String>();
+    for (Map.Entry<SubClusterId, SchedulerInfo> entry : schedInfo.entrySet()) {
+      if (entry.getValue() instanceof CapacitySchedulerInfo) {
+        // Flatten the queue structure and get only non leaf queues
+        queueNames.addAll(flattenQueue((CapacitySchedulerInfo) entry.getValue())
+            .get(CapacitySchedulerQueueInfo.class));
+      } else {
+        LOG.warn("Skipping SubCluster {}, not configured with capacity "
+            + "scheduler", entry.getKey());
+      }
+    }
+    return queueNames;
+  }
+
+  // Helpers to flatten the queue structure into a multimap of
+  // queue type to set of queue names
+  private Map<Class, Set<String>> flattenQueue(CapacitySchedulerInfo csi) {
+    Map<Class, Set<String>> flattened = new HashMap<Class, Set<String>>();
+    addOrAppend(flattened, csi.getClass(), csi.getQueueName());
+    for (CapacitySchedulerQueueInfo csqi : csi.getQueues().getQueueInfoList()) {
+      flattenQueue(csqi, flattened);
+    }
+    return flattened;
+  }
+
+  private void flattenQueue(CapacitySchedulerQueueInfo csi,
+      Map<Class, Set<String>> flattened) {
+    addOrAppend(flattened, csi.getClass(), csi.getQueueName());
+    if (csi.getQueues() != null) {
+      for (CapacitySchedulerQueueInfo csqi : csi.getQueues()
+          .getQueueInfoList()) {
+        flattenQueue(csqi, flattened);
+      }
+    }
+  }
+
+  private <K, V> void addOrAppend(Map<K, Set<V>> multimap, K key, V value) {
+    if (!multimap.containsKey(key)) {
+      multimap.put(key, new HashSet<V>());
+    }
+    multimap.get(key).add(value);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/UniformWeightedLocalityGlobalPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/UniformWeightedLocalityGlobalPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/UniformWeightedLocalityGlobalPolicy.java
new file mode 100644
index 0000000..826cb02
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/UniformWeightedLocalityGlobalPolicy.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator;
+
+import org.apache.commons.math3.optim.nonlinear.vector.Weight;
+import org.apache.hadoop.yarn.server.federation.policies.manager.FederationPolicyManager;
+import org.apache.hadoop.yarn.server.federation.policies.manager.WeightedLocalityPolicyManager;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
+
+/**
+ * Simple policy that generates and updates uniform weighted locality
+ * policies.
+ */
+public class UniformWeightedLocalityGlobalPolicy extends GlobalPolicy{
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(UniformWeightedLocalityGlobalPolicy.class);
+
+  @Override
+  protected FederationPolicyManager updatePolicy(String queueName,
+      Map<SubClusterId, Map<Class, Object>> clusterInfo,
+      FederationPolicyManager currentManager){
+    if(currentManager == null){
+      // Set uniform weights for all SubClusters
+      LOG.info("Creating uniform weighted policy queue {}", queueName);
+      WeightedLocalityPolicyManager manager =
+          new WeightedLocalityPolicyManager();
+      manager.setQueue(queueName);
+      Map<SubClusterIdInfo, Float> policyWeights =
+          GPGUtils.createUniformWeights(clusterInfo.keySet());
+      manager.getWeightedPolicyInfo().setAMRMPolicyWeights(policyWeights);
+      manager.getWeightedPolicyInfo().setRouterPolicyWeights(policyWeights);
+      currentManager = manager;
+    }
+    if(currentManager instanceof WeightedLocalityPolicyManager){
+      LOG.info("Updating policy for queue {} to default weights", queueName);
+      WeightedLocalityPolicyManager wlpmanager =
+          (WeightedLocalityPolicyManager) currentManager;
+      wlpmanager.getWeightedPolicyInfo().setAMRMPolicyWeights(
+          GPGUtils.createUniformWeights(clusterInfo.keySet()));
+      wlpmanager.getWeightedPolicyInfo().setRouterPolicyWeights(
+          GPGUtils.createUniformWeights(clusterInfo.keySet()));
+    } else {
+      LOG.info("Policy for queue {} is of type {}, expected {}",
+          queueName, currentManager.getClass(), Weight.class);
+    }
+    return currentManager;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/package-info.java
new file mode 100644
index 0000000..e8ff436
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/package-info.java
@@ -0,0 +1,24 @@
+/**
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Classes comprising the policy generator for the GPG. Responsibilities include
+ * generating and updating policies based on the cluster status.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/TestGPGPolicyFacade.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/TestGPGPolicyFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/TestGPGPolicyFacade.java
new file mode 100644
index 0000000..d78c11f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/TestGPGPolicyFacade.java
@@ -0,0 +1,202 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.federation.policies.manager.FederationPolicyManager;
+import org.apache.hadoop.yarn.server.federation.policies.manager.WeightedLocalityPolicyManager;
+import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
+import org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration;
+import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Matchers;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ * Unit test for GPG Policy Facade.
+ */
+public class TestGPGPolicyFacade {
+
+  private Configuration conf;
+  private FederationStateStore stateStore;
+  private FederationStateStoreFacade facade =
+      FederationStateStoreFacade.getInstance();
+  private GPGPolicyFacade policyFacade;
+
+  private Set<SubClusterId> subClusterIds;
+
+  private SubClusterPolicyConfiguration testConf;
+
+  private static final String TEST_QUEUE = "test-queue";
+
+  public TestGPGPolicyFacade() {
+    conf = new Configuration();
+    conf.setInt(YarnConfiguration.FEDERATION_CACHE_TIME_TO_LIVE_SECS, 0);
+    subClusterIds = new HashSet<>();
+    subClusterIds.add(SubClusterId.newInstance("sc0"));
+    subClusterIds.add(SubClusterId.newInstance("sc1"));
+    subClusterIds.add(SubClusterId.newInstance("sc2"));
+  }
+
+  @Before
+  public void setUp() throws IOException, YarnException {
+    stateStore = new MemoryFederationStateStore();
+    stateStore.init(conf);
+    facade.reinitialize(stateStore, conf);
+    policyFacade = new GPGPolicyFacade(facade, conf);
+    WeightedLocalityPolicyManager manager =
+        new WeightedLocalityPolicyManager();
+    // Add a test policy for test queue
+    manager.setQueue(TEST_QUEUE);
+    manager.getWeightedPolicyInfo().setAMRMPolicyWeights(
+        GPGUtils.createUniformWeights(subClusterIds));
+    manager.getWeightedPolicyInfo().setRouterPolicyWeights(
+        GPGUtils.createUniformWeights(subClusterIds));
+    testConf = manager.serializeConf();
+    stateStore.setPolicyConfiguration(SetSubClusterPolicyConfigurationRequest
+        .newInstance(testConf));
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    stateStore.close();
+    stateStore = null;
+  }
+
+  @Test
+  public void testGetPolicy() throws YarnException {
+    WeightedLocalityPolicyManager manager =
+        (WeightedLocalityPolicyManager) policyFacade
+            .getPolicyManager(TEST_QUEUE);
+    Assert.assertEquals(testConf, manager.serializeConf());
+  }
+
+  /**
+   * Test that new policies are written into the state store.
+   */
+  @Test
+  public void testSetNewPolicy() throws YarnException {
+    WeightedLocalityPolicyManager manager =
+        new WeightedLocalityPolicyManager();
+    manager.setQueue(TEST_QUEUE + 0);
+    manager.getWeightedPolicyInfo().setAMRMPolicyWeights(
+        GPGUtils.createUniformWeights(subClusterIds));
+    manager.getWeightedPolicyInfo().setRouterPolicyWeights(
+        GPGUtils.createUniformWeights(subClusterIds));
+    SubClusterPolicyConfiguration policyConf = manager.serializeConf();
+    policyFacade.setPolicyManager(manager);
+
+    manager =
+        (WeightedLocalityPolicyManager) policyFacade
+            .getPolicyManager(TEST_QUEUE + 0);
+    Assert.assertEquals(policyConf, manager.serializeConf());
+  }
+
+  /**
+   * Test that overwriting policies are updated in the state store.
+   */
+  @Test
+  public void testOverwritePolicy() throws YarnException {
+    subClusterIds.add(SubClusterId.newInstance("sc3"));
+    WeightedLocalityPolicyManager manager =
+        new WeightedLocalityPolicyManager();
+    manager.setQueue(TEST_QUEUE);
+    manager.getWeightedPolicyInfo().setAMRMPolicyWeights(
+        GPGUtils.createUniformWeights(subClusterIds));
+    manager.getWeightedPolicyInfo().setRouterPolicyWeights(
+        GPGUtils.createUniformWeights(subClusterIds));
+    SubClusterPolicyConfiguration policyConf = manager.serializeConf();
+    policyFacade.setPolicyManager(manager);
+
+    manager =
+        (WeightedLocalityPolicyManager) policyFacade
+            .getPolicyManager(TEST_QUEUE);
+    Assert.assertEquals(policyConf, manager.serializeConf());
+  }
+
+  /**
+   * Test that the write through cache works.
+   */
+  @Test
+  public void testWriteCache() throws YarnException {
+    stateStore = mock(MemoryFederationStateStore.class);
+    facade.reinitialize(stateStore, conf);
+    when(stateStore.getPolicyConfiguration(Matchers.any(
+        GetSubClusterPolicyConfigurationRequest.class))).thenReturn(
+        GetSubClusterPolicyConfigurationResponse.newInstance(testConf));
+    policyFacade = new GPGPolicyFacade(facade, conf);
+
+    // Query once to fill the cache
+    FederationPolicyManager manager = policyFacade.getPolicyManager(TEST_QUEUE);
+    // State store should be contacted once
+    verify(stateStore, times(1)).getPolicyConfiguration(
+        Matchers.any(GetSubClusterPolicyConfigurationRequest.class));
+
+    // If we set the same policy, the state store should be untouched
+    policyFacade.setPolicyManager(manager);
+    verify(stateStore, times(0)).setPolicyConfiguration(
+        Matchers.any(SetSubClusterPolicyConfigurationRequest.class));
+  }
+
+  /**
+   * Test that when read only is enabled, the state store is not changed.
+   */
+  @Test
+  public void testReadOnly() throws YarnException {
+    conf.setBoolean(YarnConfiguration.GPG_POLICY_GENERATOR_READONLY, true);
+    stateStore = mock(MemoryFederationStateStore.class);
+    facade.reinitialize(stateStore, conf);
+    when(stateStore.getPolicyConfiguration(Matchers.any(
+        GetSubClusterPolicyConfigurationRequest.class))).thenReturn(
+        GetSubClusterPolicyConfigurationResponse.newInstance(testConf));
+    policyFacade = new GPGPolicyFacade(facade, conf);
+
+    // If we set a policy, the state store should be untouched
+    WeightedLocalityPolicyManager manager =
+        new WeightedLocalityPolicyManager();
+    // Add a test policy for test queue
+    manager.setQueue(TEST_QUEUE);
+    manager.getWeightedPolicyInfo().setAMRMPolicyWeights(
+        GPGUtils.createUniformWeights(subClusterIds));
+    manager.getWeightedPolicyInfo().setRouterPolicyWeights(
+        GPGUtils.createUniformWeights(subClusterIds));
+    policyFacade.setPolicyManager(manager);
+    verify(stateStore, times(0)).setPolicyConfiguration(
+        Matchers.any(SetSubClusterPolicyConfigurationRequest.class));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/TestPolicyGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/TestPolicyGenerator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/TestPolicyGenerator.java
new file mode 100644
index 0000000..9d27b3b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/java/org/apache/hadoop/yarn/server/globalpolicygenerator/policygenerator/TestPolicyGenerator.java
@@ -0,0 +1,338 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator;
+
+import com.sun.jersey.api.json.JSONConfiguration;
+import com.sun.jersey.api.json.JSONJAXBContext;
+import com.sun.jersey.api.json.JSONUnmarshaller;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.federation.policies.manager.FederationPolicyManager;
+import org.apache.hadoop.yarn.server.federation.policies.manager.WeightedLocalityPolicyManager;
+import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState;
+import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGContext;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGContextImpl;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGPolicyFacade;
+import org.apache.hadoop.yarn.server.globalpolicygenerator.GPGUtils;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+
+import javax.xml.bind.JAXBException;
+import java.io.IOException;
+import java.io.StringReader;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+/**
+ * Unit test for GPG Policy Generator.
+ */
+public class TestPolicyGenerator {
+
+  private static final int NUM_SC = 3;
+
+  private Configuration conf;
+  private FederationStateStore stateStore;
+  private FederationStateStoreFacade facade =
+      FederationStateStoreFacade.getInstance();
+
+  private List<SubClusterId> subClusterIds;
+  private Map<SubClusterId, SubClusterInfo> subClusterInfos;
+  private Map<SubClusterId, Map<Class, Object>> clusterInfos;
+  private Map<SubClusterId, SchedulerInfo> schedulerInfos;
+
+  private GPGContext gpgContext;
+
+  private PolicyGenerator policyGenerator;
+
+  public TestPolicyGenerator() {
+    conf = new Configuration();
+    conf.setInt(YarnConfiguration.FEDERATION_CACHE_TIME_TO_LIVE_SECS, 0);
+
+    gpgContext = new GPGContextImpl();
+    gpgContext.setPolicyFacade(new GPGPolicyFacade(facade, conf));
+    gpgContext.setStateStoreFacade(facade);
+  }
+
+  @Before
+  public void setUp() throws IOException, YarnException, JAXBException {
+    subClusterIds = new ArrayList<>();
+    subClusterInfos = new HashMap<>();
+    clusterInfos = new HashMap<>();
+    schedulerInfos = new HashMap<>();
+
+    CapacitySchedulerInfo sti1 =
+        readJSON("src/test/resources/schedulerInfo1.json",
+            CapacitySchedulerInfo.class);
+    CapacitySchedulerInfo sti2 =
+        readJSON("src/test/resources/schedulerInfo2.json",
+            CapacitySchedulerInfo.class);
+
+    // Set up sub clusters
+    for (int i = 0; i < NUM_SC; ++i) {
+      // Sub cluster Id
+      SubClusterId id = SubClusterId.newInstance("sc" + i);
+      subClusterIds.add(id);
+
+      // Sub cluster info
+      SubClusterInfo cluster = SubClusterInfo
+          .newInstance(id, "amrm:" + i, "clientrm:" + i, "rmadmin:" + i,
+              "rmweb:" + i, SubClusterState.SC_RUNNING, 0, "");
+      subClusterInfos.put(id, cluster);
+
+      // Cluster metrics info
+      ClusterMetricsInfo metricsInfo = new ClusterMetricsInfo();
+      metricsInfo.setAppsPending(2000);
+      if (!clusterInfos.containsKey(id)) {
+        clusterInfos.put(id, new HashMap<Class, Object>());
+      }
+      clusterInfos.get(id).put(ClusterMetricsInfo.class, metricsInfo);
+
+      schedulerInfos.put(id, sti1);
+    }
+
+    // Change one of the sub cluster schedulers
+    schedulerInfos.put(subClusterIds.get(0), sti2);
+
+    stateStore = mock(FederationStateStore.class);
+    when(stateStore.getSubClusters((GetSubClustersInfoRequest) any()))
+        .thenReturn(GetSubClustersInfoResponse.newInstance(
+            new ArrayList<SubClusterInfo>(subClusterInfos.values())));
+    facade.reinitialize(stateStore, conf);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    stateStore.close();
+    stateStore = null;
+  }
+
+  private <T> T readJSON(String pathname, Class<T> classy)
+      throws IOException, JAXBException {
+
+    JSONJAXBContext jc =
+        new JSONJAXBContext(JSONConfiguration.mapped().build(), classy);
+    JSONUnmarshaller unmarshaller = jc.createJSONUnmarshaller();
+    String contents = new String(Files.readAllBytes(Paths.get(pathname)));
+    return unmarshaller.unmarshalFromJSON(new StringReader(contents), classy);
+
+  }
+
+  @Test
+  public void testPolicyGenerator() throws YarnException {
+    policyGenerator = new TestablePolicyGenerator();
+    policyGenerator.policy = mock(GlobalPolicy.class);
+    policyGenerator.run();
+    verify(policyGenerator.policy, times(1))
+        .updatePolicy("default", clusterInfos, null);
+    verify(policyGenerator.policy, times(1))
+        .updatePolicy("default2", clusterInfos, null);
+  }
+
+  @Test
+  public void testBlacklist() throws YarnException {
+    conf.set(YarnConfiguration.GPG_POLICY_GENERATOR_BLACKLIST,
+        subClusterIds.get(0).toString());
+    Map<SubClusterId, Map<Class, Object>> blacklistedCMI =
+        new HashMap<>(clusterInfos);
+    blacklistedCMI.remove(subClusterIds.get(0));
+    policyGenerator = new TestablePolicyGenerator();
+    policyGenerator.policy = mock(GlobalPolicy.class);
+    policyGenerator.run();
+    verify(policyGenerator.policy, times(1))
+        .updatePolicy("default", blacklistedCMI, null);
+    verify(policyGenerator.policy, times(0))
+        .updatePolicy("default", clusterInfos, null);
+  }
+
+  @Test
+  public void testBlacklistTwo() throws YarnException {
+    conf.set(YarnConfiguration.GPG_POLICY_GENERATOR_BLACKLIST,
+        subClusterIds.get(0).toString() + "," + subClusterIds.get(1)
+            .toString());
+    Map<SubClusterId, Map<Class, Object>> blacklistedCMI =
+        new HashMap<>(clusterInfos);
+    blacklistedCMI.remove(subClusterIds.get(0));
+    blacklistedCMI.remove(subClusterIds.get(1));
+    policyGenerator = new TestablePolicyGenerator();
+    policyGenerator.policy = mock(GlobalPolicy.class);
+    policyGenerator.run();
+    verify(policyGenerator.policy, times(1))
+        .updatePolicy("default", blacklistedCMI, null);
+    verify(policyGenerator.policy, times(0))
+        .updatePolicy("default", clusterInfos, null);
+  }
+
+  @Test
+  public void testExistingPolicy() throws YarnException {
+    WeightedLocalityPolicyManager manager = new WeightedLocalityPolicyManager();
+    // Add a test policy for test queue
+    manager.setQueue("default");
+    manager.getWeightedPolicyInfo().setAMRMPolicyWeights(GPGUtils
+        .createUniformWeights(new HashSet<SubClusterId>(subClusterIds)));
+    manager.getWeightedPolicyInfo().setRouterPolicyWeights(GPGUtils
+        .createUniformWeights(new HashSet<SubClusterId>(subClusterIds)));
+    SubClusterPolicyConfiguration testConf = manager.serializeConf();
+    when(stateStore.getPolicyConfiguration(
+        GetSubClusterPolicyConfigurationRequest.newInstance("default")))
+        .thenReturn(
+            GetSubClusterPolicyConfigurationResponse.newInstance(testConf));
+
+    policyGenerator = new TestablePolicyGenerator();
+    policyGenerator.policy = mock(GlobalPolicy.class);
+    policyGenerator.run();
+
+    ArgumentCaptor<FederationPolicyManager> argCaptor =
+        ArgumentCaptor.forClass(FederationPolicyManager.class);
+    verify(policyGenerator.policy, times(1))
+        .updatePolicy(eq("default"), eq(clusterInfos), argCaptor.capture());
+    assertEquals(argCaptor.getValue().getClass(), manager.getClass());
+    assertEquals(argCaptor.getValue().serializeConf(), manager.serializeConf());
+  }
+
+  @Test
+  public void testCallRM() {
+
+    CapacitySchedulerConfiguration csConf =
+        new CapacitySchedulerConfiguration();
+
+    final String a = CapacitySchedulerConfiguration.ROOT + ".a";
+    final String b = CapacitySchedulerConfiguration.ROOT + ".b";
+    final String a1 = a + ".a1";
+    final String a2 = a + ".a2";
+    final String b1 = b + ".b1";
+    final String b2 = b + ".b2";
+    final String b3 = b + ".b3";
+    float aCapacity = 10.5f;
+    float bCapacity = 89.5f;
+    float a1Capacity = 30;
+    float a2Capacity = 70;
+    float b1Capacity = 79.2f;
+    float b2Capacity = 0.8f;
+    float b3Capacity = 20;
+
+    // Define top-level queues
+    csConf.setQueues(CapacitySchedulerConfiguration.ROOT,
+        new String[] {"a", "b"});
+
+    csConf.setCapacity(a, aCapacity);
+    csConf.setCapacity(b, bCapacity);
+
+    // Define 2nd-level queues
+    csConf.setQueues(a, new String[] {"a1", "a2"});
+    csConf.setCapacity(a1, a1Capacity);
+    csConf.setUserLimitFactor(a1, 100.0f);
+    csConf.setCapacity(a2, a2Capacity);
+    csConf.setUserLimitFactor(a2, 100.0f);
+
+    csConf.setQueues(b, new String[] {"b1", "b2", "b3"});
+    csConf.setCapacity(b1, b1Capacity);
+    csConf.setUserLimitFactor(b1, 100.0f);
+    csConf.setCapacity(b2, b2Capacity);
+    csConf.setUserLimitFactor(b2, 100.0f);
+    csConf.setCapacity(b3, b3Capacity);
+    csConf.setUserLimitFactor(b3, 100.0f);
+
+    YarnConfiguration rmConf = new YarnConfiguration(csConf);
+
+    ResourceManager resourceManager = new ResourceManager();
+    rmConf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
+    resourceManager.init(rmConf);
+    resourceManager.start();
+
+    String rmAddress = WebAppUtils.getRMWebAppURLWithScheme(this.conf);
+    SchedulerTypeInfo sti = GPGUtils
+        .invokeRMWebService(conf, rmAddress, RMWSConsts.SCHEDULER,
+            SchedulerTypeInfo.class);
+
+    Assert.assertNotNull(sti);
+  }
+
+  /**
+   * Testable policy generator overrides the methods that communicate
+   * with the RM REST endpoint, allowing us to inject faked responses.
+   */
+  class TestablePolicyGenerator extends PolicyGenerator {
+
+    TestablePolicyGenerator() {
+      super(conf, gpgContext);
+    }
+
+    @Override
+    protected Map<SubClusterId, Map<Class, Object>> getInfos(
+        Map<SubClusterId, SubClusterInfo> activeSubClusters) {
+      Map<SubClusterId, Map<Class, Object>> ret = new HashMap<>();
+      for (SubClusterId id : activeSubClusters.keySet()) {
+        if (!ret.containsKey(id)) {
+          ret.put(id, new HashMap<Class, Object>());
+        }
+        ret.get(id).put(ClusterMetricsInfo.class,
+            clusterInfos.get(id).get(ClusterMetricsInfo.class));
+      }
+      return ret;
+    }
+
+    @Override
+    protected Map<SubClusterId, SchedulerInfo> getSchedulerInfo(
+        Map<SubClusterId, SubClusterInfo> activeSubClusters) {
+      Map<SubClusterId, SchedulerInfo> ret =
+          new HashMap<SubClusterId, SchedulerInfo>();
+      for (SubClusterId id : activeSubClusters.keySet()) {
+        ret.put(id, schedulerInfos.get(id));
+      }
+      return ret;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo1.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo1.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo1.json
new file mode 100644
index 0000000..3ad4594
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo1.json
@@ -0,0 +1,134 @@
+{
+  "capacity": 100.0,
+  "usedCapacity": 0.0,
+  "maxCapacity": 100.0,
+  "queueName": "root",
+  "queues": {
+    "queue": [
+      {
+        "type": "capacitySchedulerLeafQueueInfo",
+        "capacity": 100.0,
+        "usedCapacity": 0.0,
+        "maxCapacity": 100.0,
+        "absoluteCapacity": 100.0,
+        "absoluteMaxCapacity": 100.0,
+        "absoluteUsedCapacity": 0.0,
+        "numApplications": 484,
+        "queueName": "default",
+        "state": "RUNNING",
+        "resourcesUsed": {
+          "memory": 0,
+          "vCores": 0
+        },
+        "hideReservationQueues": false,
+        "nodeLabels": [
+          "*"
+        ],
+        "numActiveApplications": 484,
+        "numPendingApplications": 0,
+        "numContainers": 0,
+        "maxApplications": 10000,
+        "maxApplicationsPerUser": 10000,
+        "userLimit": 100,
+        "users": {
+          "user": [
+            {
+              "username": "Default",
+              "resourcesUsed": {
+                "memory": 0,
+                "vCores": 0
+              },
+              "numPendingApplications": 0,
+              "numActiveApplications": 468,
+              "AMResourceUsed": {
+                "memory": 30191616,
+                "vCores": 468
+              },
+              "userResourceLimit": {
+                "memory": 31490048,
+                "vCores": 7612
+              }
+            }
+          ]
+        },
+        "userLimitFactor": 1.0,
+        "AMResourceLimit": {
+          "memory": 31490048,
+          "vCores": 7612
+        },
+        "usedAMResource": {
+          "memory": 30388224,
+          "vCores": 532
+        },
+        "userAMResourceLimit": {
+          "memory": 31490048,
+          "vCores": 7612
+        },
+        "preemptionDisabled": true
+      }
+    ]
+  },
+  "health": {
+    "lastrun": 1517951638085,
+    "operationsInfo": {
+      "entry": {
+        "key": "last-allocation",
+        "value": {
+          "nodeId": "node0:0",
+          "containerId": "container_e61477_1517922128312_0340_01_000001",
+          "queue": "root.default"
+        }
+      },
+      "entry": {
+        "key": "last-reservation",
+        "value": {
+          "nodeId": "node0:1",
+          "containerId": "container_e61477_1517879828320_0249_01_000001",
+          "queue": "root.default"
+        }
+      },
+      "entry": {
+        "key": "last-release",
+        "value": {
+          "nodeId": "node0:2",
+          "containerId": "container_e61477_1517922128312_0340_01_000001",
+          "queue": "root.default"
+        }
+      },
+      "entry": {
+        "key": "last-preemption",
+        "value": {
+          "nodeId": "N/A",
+          "containerId": "N/A",
+          "queue": "N/A"
+        }
+      }
+    },
+    "lastRunDetails": [
+      {
+        "operation": "releases",
+        "count": 0,
+        "resources": {
+          "memory": 0,
+          "vCores": 0
+        }
+      },
+      {
+        "operation": "allocations",
+        "count": 0,
+        "resources": {
+          "memory": 0,
+          "vCores": 0
+        }
+      },
+      {
+        "operation": "reservations",
+        "count": 0,
+        "resources": {
+          "memory": 0,
+          "vCores": 0
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: fix build after rebase

Posted by bo...@apache.org.
fix build after rebase


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8e71808
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8e71808
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8e71808

Branch: refs/heads/YARN-7402
Commit: b8e718082bb6a6a361deb64cd142019b09e5b3d5
Parents: 3213acd
Author: Botong Huang <bo...@apache.org>
Authored: Fri Jul 13 21:29:19 2018 -0700
Committer: Botong Huang <bo...@apache.org>
Committed: Thu Aug 2 09:59:48 2018 -0700

----------------------------------------------------------------------
 .../yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java   | 2 +-
 .../globalpolicygenerator/subclustercleaner/SubClusterCleaner.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8e71808/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
index 88b9f2b..1ae07f3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java
@@ -22,7 +22,7 @@ import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.commons.lang.time.DurationFormatUtils;
+import org.apache.commons.lang3.time.DurationFormatUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.service.CompositeService;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8e71808/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
index dad5121..6410a6d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.yarn.server.globalpolicygenerator.subclustercleaner;
 import java.util.Date;
 import java.util.Map;
 
-import org.apache.commons.lang.time.DurationFormatUtils;
+import org.apache.commons.lang3.time.DurationFormatUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: HDFS-12716. 'dfs.datanode.failed.volumes.tolerated' to support minimum number of volumes to be available. Contributed by Ranith Sardar and usharani

Posted by bo...@apache.org.
HDFS-12716. 'dfs.datanode.failed.volumes.tolerated' to support minimum number of volumes to be available. Contributed by Ranith Sardar and usharani


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3108d27e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3108d27e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3108d27e

Branch: refs/heads/YARN-7402
Commit: 3108d27edde941d153a58f71fb1096cce2995531
Parents: 63e08ec
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Mon Jul 30 15:50:04 2018 +0530
Committer: Brahma Reddy Battula <br...@apache.org>
Committed: Mon Jul 30 15:50:04 2018 +0530

----------------------------------------------------------------------
 .../hadoop/hdfs/server/datanode/DataNode.java   |  7 +++-
 .../datanode/checker/DatasetVolumeChecker.java  |  6 ++-
 .../checker/StorageLocationChecker.java         | 28 ++++++++++----
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 40 ++++++++++++++++----
 .../src/main/resources/hdfs-default.xml         |  2 +
 .../TestDataNodeVolumeFailureToleration.java    |  6 ++-
 6 files changed, 68 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3108d27e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 7df92f6..1e9c57a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -293,6 +293,8 @@ public class DataNode extends ReconfigurableBase
       "  and rolling upgrades.";
 
   static final int CURRENT_BLOCK_FORMAT_VERSION = 1;
+  public static final int MAX_VOLUME_FAILURE_TOLERATED_LIMIT = -1;
+  public static final String MAX_VOLUME_FAILURES_TOLERATED_MSG = "should be greater than -1";
 
   /** A list of property that are reconfigurable at runtime. */
   private static final List<String> RECONFIGURABLE_PROPERTIES =
@@ -1389,10 +1391,11 @@ public class DataNode extends ReconfigurableBase
 
     int volFailuresTolerated = dnConf.getVolFailuresTolerated();
     int volsConfigured = dnConf.getVolsConfigured();
-    if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) {
+    if (volFailuresTolerated < MAX_VOLUME_FAILURE_TOLERATED_LIMIT
+        || volFailuresTolerated >= volsConfigured) {
       throw new DiskErrorException("Invalid value configured for "
           + "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated
-          + ". Value configured is either less than 0 or >= "
+          + ". Value configured is either greater than -1 or >= "
           + "to the number of configured volumes (" + volsConfigured + ").");
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3108d27e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
index 3889e23..30602c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
@@ -28,6 +28,7 @@ import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -153,10 +154,11 @@ public class DatasetVolumeChecker {
 
     lastAllVolumesCheck = timer.monotonicNow() - minDiskCheckGapMs;
 
-    if (maxVolumeFailuresTolerated < 0) {
+    if (maxVolumeFailuresTolerated < DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
       throw new DiskErrorException("Invalid value configured for "
           + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - "
-          + maxVolumeFailuresTolerated + " (should be non-negative)");
+          + maxVolumeFailuresTolerated + " "
+          + DataNode.MAX_VOLUME_FAILURES_TOLERATED_MSG);
     }
 
     delegateChecker = new ThrottledAsyncChecker<>(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3108d27e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
index 81575e2..dabaa83 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation.CheckContext;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@@ -105,10 +106,11 @@ public class StorageLocationChecker {
         DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
         DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);
 
-    if (maxVolumeFailuresTolerated < 0) {
+    if (maxVolumeFailuresTolerated < DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
       throw new DiskErrorException("Invalid value configured for "
           + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - "
-          + maxVolumeFailuresTolerated + " (should be non-negative)");
+          + maxVolumeFailuresTolerated + " "
+          + DataNode.MAX_VOLUME_FAILURES_TOLERATED_MSG);
     }
 
     this.timer = timer;
@@ -213,12 +215,22 @@ public class StorageLocationChecker {
       }
     }
 
-    if (failedLocations.size() > maxVolumeFailuresTolerated) {
-      throw new DiskErrorException("Too many failed volumes - "
-          + "current valid volumes: " + goodLocations.size()
-          + ", volumes configured: " + dataDirs.size()
-          + ", volumes failed: " + failedLocations.size()
-          + ", volume failures tolerated: " + maxVolumeFailuresTolerated);
+    if (maxVolumeFailuresTolerated == DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
+      if (dataDirs.size() == failedLocations.size()) {
+        throw new DiskErrorException("Too many failed volumes - "
+            + "current valid volumes: " + goodLocations.size()
+            + ", volumes configured: " + dataDirs.size()
+            + ", volumes failed: " + failedLocations.size()
+            + ", volume failures tolerated: " + maxVolumeFailuresTolerated);
+      }
+    } else {
+      if (failedLocations.size() > maxVolumeFailuresTolerated) {
+        throw new DiskErrorException("Too many failed volumes - "
+            + "current valid volumes: " + goodLocations.size()
+            + ", volumes configured: " + dataDirs.size()
+            + ", volumes failed: " + failedLocations.size()
+            + ", volume failures tolerated: " + maxVolumeFailuresTolerated);
+      }
     }
 
     if (goodLocations.size() == 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3108d27e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 89c278a..d7f133e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -237,6 +237,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   final FsDatasetCache cacheManager;
   private final Configuration conf;
   private final int volFailuresTolerated;
+  private final int volsConfigured;
   private volatile boolean fsRunning;
 
   final ReplicaMap volumeMap;
@@ -285,15 +286,32 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     List<VolumeFailureInfo> volumeFailureInfos = getInitialVolumeFailureInfos(
         dataLocations, storage);
 
-    int volsConfigured = datanode.getDnConf().getVolsConfigured();
+    volsConfigured = datanode.getDnConf().getVolsConfigured();
     int volsFailed = volumeFailureInfos.size();
 
-    if (volsFailed > volFailuresTolerated) {
-      throw new DiskErrorException("Too many failed volumes - "
-          + "current valid volumes: " + storage.getNumStorageDirs() 
-          + ", volumes configured: " + volsConfigured 
-          + ", volumes failed: " + volsFailed
-          + ", volume failures tolerated: " + volFailuresTolerated);
+    if (volFailuresTolerated < DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT
+        || volFailuresTolerated >= volsConfigured) {
+      throw new DiskErrorException("Invalid value configured for "
+          + "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated
+          + ". Value configured is either less than maxVolumeFailureLimit or greater than "
+          + "to the number of configured volumes (" + volsConfigured + ").");
+    }
+    if (volFailuresTolerated == DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
+      if (volsConfigured == volsFailed) {
+        throw new DiskErrorException(
+            "Too many failed volumes - " + "current valid volumes: "
+                + storage.getNumStorageDirs() + ", volumes configured: "
+                + volsConfigured + ", volumes failed: " + volsFailed
+                + ", volume failures tolerated: " + volFailuresTolerated);
+      }
+    } else {
+      if (volsFailed > volFailuresTolerated) {
+        throw new DiskErrorException(
+            "Too many failed volumes - " + "current valid volumes: "
+                + storage.getNumStorageDirs() + ", volumes configured: "
+                + volsConfigured + ", volumes failed: " + volsFailed
+                + ", volume failures tolerated: " + volFailuresTolerated);
+      }
     }
 
     storageMap = new ConcurrentHashMap<String, DatanodeStorage>();
@@ -597,7 +615,13 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
    */
   @Override // FsDatasetSpi
   public boolean hasEnoughResource() {
-    return getNumFailedVolumes() <= volFailuresTolerated;
+    if (volFailuresTolerated == DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
+      // If volFailuresTolerated configured maxVolumeFailureLimit then minimum
+      // one volume is required.
+      return volumes.getVolumes().size() >= 1;
+    } else {
+      return getNumFailedVolumes() <= volFailuresTolerated;
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3108d27e/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index a10be27..9e73197 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1276,6 +1276,8 @@
   <description>The number of volumes that are allowed to
   fail before a datanode stops offering service. By default
   any volume failure will cause a datanode to shutdown.
+  The range of the value is -1 now, -1 represents the minimum
+  of volume valids is 1.
   </description>
 </property>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3108d27e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
index f83609a..825887c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
@@ -201,7 +201,11 @@ public class TestDataNodeVolumeFailureToleration {
   @Test
   public void testVolumeAndTolerableConfiguration() throws Exception {
     // Check if Block Pool Service exit for an invalid conf value.
-    testVolumeConfig(-1, 0, false, true);
+    testVolumeConfig(-2, 0, false, true);
+    // Test for one good volume at least
+    testVolumeConfig(-1, 0, true, true);
+    testVolumeConfig(-1, 1, true, true);
+    testVolumeConfig(-1, 2, false, true);
 
     // Ditto if the value is too big.
     testVolumeConfig(100, 0, false, true);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: YARN-8418. App local logs could leaked if log aggregation fails to initialize for the app. (Bibin A Chundatt via wangda)

Posted by bo...@apache.org.
YARN-8418. App local logs could leaked if log aggregation fails to initialize for the app. (Bibin A Chundatt via wangda)

Change-Id: I29a23ca4b219b48c92e7975cd44cddb8b0e04104


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b540bbf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b540bbf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b540bbf

Branch: refs/heads/YARN-7402
Commit: 4b540bbfcf02d828052999215c6135603d98f5db
Parents: 8aa93a5
Author: Wangda Tan <wa...@apache.org>
Authored: Tue Jul 31 12:07:51 2018 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Tue Jul 31 12:08:00 2018 -0700

----------------------------------------------------------------------
 .../LogAggregationFileController.java           |  7 ++
 .../nodemanager/NodeStatusUpdaterImpl.java      |  1 +
 .../containermanager/ContainerManager.java      |  1 +
 .../containermanager/ContainerManagerImpl.java  | 13 ++-
 .../logaggregation/AppLogAggregator.java        |  8 ++
 .../logaggregation/AppLogAggregatorImpl.java    | 15 ++++
 .../logaggregation/LogAggregationService.java   | 83 ++++++++++++++++----
 .../containermanager/loghandler/LogHandler.java |  7 ++
 .../loghandler/NonAggregatingLogHandler.java    |  9 +++
 .../loghandler/event/LogHandlerEventType.java   |  4 +-
 .../event/LogHandlerTokenUpdatedEvent.java      | 26 ++++++
 .../nodemanager/DummyContainerManager.java      |  7 ++
 .../TestLogAggregationService.java              | 34 +++++---
 13 files changed, 187 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b540bbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
index b047b1c..6b3c9a4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
@@ -43,11 +43,14 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils;
 import org.apache.hadoop.yarn.webapp.View.ViewContext;
@@ -365,6 +368,10 @@ public abstract class LogAggregationFileController {
         }
       });
     } catch (Exception e) {
+      if (e instanceof RemoteException) {
+        throw new YarnRuntimeException(((RemoteException) e)
+            .unwrapRemoteException(SecretManager.InvalidToken.class));
+      }
       throw new YarnRuntimeException(e);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b540bbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index 8154723..faf7adb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -1135,6 +1135,7 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
             if (systemCredentials != null && !systemCredentials.isEmpty()) {
               ((NMContext) context).setSystemCrendentialsForApps(
                   parseCredentials(systemCredentials));
+              context.getContainerManager().handleCredentialUpdate();
             }
             List<org.apache.hadoop.yarn.api.records.Container>
                 containersToUpdate = response.getContainersToUpdate();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b540bbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManager.java
index 2aeb245..356c2e0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManager.java
@@ -44,4 +44,5 @@ public interface ContainerManager extends ServiceStateChangeListener,
 
   ContainerScheduler getContainerScheduler();
 
+  void handleCredentialUpdate();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b540bbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index ce240bc..8b35258 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.ByteString;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.UpdateContainerTokenEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerTokenUpdatedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler.ContainerSchedulerEvent;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -170,7 +171,6 @@ import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -214,6 +214,7 @@ public class ContainerManagerImpl extends CompositeService implements
   protected final AsyncDispatcher dispatcher;
 
   private final DeletionService deletionService;
+  private LogHandler logHandler;
   private boolean serviceStopped = false;
   private final ReadLock readLock;
   private final WriteLock writeLock;
@@ -292,7 +293,7 @@ public class ContainerManagerImpl extends CompositeService implements
   @Override
   public void serviceInit(Configuration conf) throws Exception {
 
-    LogHandler logHandler =
+    logHandler =
       createLogHandler(conf, this.context, this.deletionService);
     addIfService(logHandler);
     dispatcher.register(LogHandlerEventType.class, logHandler);
@@ -1904,4 +1905,12 @@ public class ContainerManagerImpl extends CompositeService implements
   public ContainerScheduler getContainerScheduler() {
     return this.containerScheduler;
   }
+
+  @Override
+  public void handleCredentialUpdate() {
+    Set<ApplicationId> invalidApps = logHandler.getInvalidTokenApps();
+    if (!invalidApps.isEmpty()) {
+      dispatcher.getEventHandler().handle(new LogHandlerTokenUpdatedEvent());
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b540bbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregator.java
index 0178699..93436fa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregator.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation;
 
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.server.api.ContainerLogContext;
 
 public interface AppLogAggregator extends Runnable {
@@ -29,4 +31,10 @@ public interface AppLogAggregator extends Runnable {
   void finishLogAggregation();
 
   void disableLogAggregation();
+
+  void enableLogAggregation();
+
+  boolean isAggregationEnabled();
+
+  UserGroupInformation updateCredentials(Credentials cred);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b540bbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index 6630ba6..04503ef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -561,6 +561,16 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
     this.logAggregationDisabled = true;
   }
 
+  @Override
+  public void enableLogAggregation() {
+    this.logAggregationDisabled = false;
+  }
+
+  @Override
+  public boolean isAggregationEnabled() {
+    return !logAggregationDisabled;
+  }
+
   @Private
   @VisibleForTesting
   // This is only used for testing.
@@ -643,6 +653,11 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
     return this.userUgi;
   }
 
+  public UserGroupInformation updateCredentials(Credentials cred) {
+    this.userUgi.addCredentials(cred);
+    return userUgi;
+  }
+
   @Private
   @VisibleForTesting
   public int getLogAggregationTimes() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b540bbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
index dcc165f..d8db967 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
@@ -20,10 +20,14 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregatio
 
 import java.io.IOException;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.security.token.SecretManager;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -58,6 +62,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.eve
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerContainerFinishedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEvent;
 
+
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
@@ -83,6 +88,9 @@ public class LogAggregationService extends AbstractService implements
 
   private final ConcurrentMap<ApplicationId, AppLogAggregator> appLogAggregators;
 
+  // Holds applications whose aggregation is disable due to invalid Token
+  private final Set<ApplicationId> invalidTokenApps;
+
   @VisibleForTesting
   ExecutorService threadPool;
   
@@ -95,6 +103,7 @@ public class LogAggregationService extends AbstractService implements
     this.dirsHandler = dirsHandler;
     this.appLogAggregators =
         new ConcurrentHashMap<ApplicationId, AppLogAggregator>();
+    this.invalidTokenApps = ConcurrentHashMap.newKeySet();
   }
 
   protected void serviceInit(Configuration conf) throws Exception {
@@ -224,8 +233,8 @@ public class LogAggregationService extends AbstractService implements
       userUgi.addCredentials(credentials);
     }
 
-    LogAggregationFileController logAggregationFileController
-        = getLogAggregationFileController(getConfig());
+    LogAggregationFileController logAggregationFileController =
+        getLogAggregationFileController(getConfig());
     logAggregationFileController.verifyAndCreateRemoteLogDir();
     // New application
     final AppLogAggregator appLogAggregator =
@@ -245,14 +254,16 @@ public class LogAggregationService extends AbstractService implements
       logAggregationFileController.createAppDir(user, appId, userUgi);
     } catch (Exception e) {
       appLogAggregator.disableLogAggregation();
+
+      // add to disabled aggregators if due to InvalidToken
+      if (e.getCause() instanceof SecretManager.InvalidToken) {
+        invalidTokenApps.add(appId);
+      }
       if (!(e instanceof YarnRuntimeException)) {
         appDirException = new YarnRuntimeException(e);
       } else {
         appDirException = (YarnRuntimeException)e;
       }
-      appLogAggregators.remove(appId);
-      closeFileSystems(userUgi);
-      throw appDirException;
     }
 
     // TODO Get the user configuration for the list of containers that need log
@@ -270,6 +281,10 @@ public class LogAggregationService extends AbstractService implements
       }
     };
     this.threadPool.execute(aggregatorWrapper);
+
+    if (appDirException != null) {
+      throw appDirException;
+    }
   }
 
   protected void closeFileSystems(final UserGroupInformation userUgi) {
@@ -307,17 +322,20 @@ public class LogAggregationService extends AbstractService implements
 
     // App is complete. Finish up any containers' pending log aggregation and
     // close the application specific logFile.
-
-    AppLogAggregator aggregator = this.appLogAggregators.get(appId);
-    if (aggregator == null) {
-      LOG.warn("Log aggregation is not initialized for " + appId
-          + ", did it fail to start?");
-      this.dispatcher.getEventHandler().handle(
-          new ApplicationEvent(appId,
-              ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED));
-      return;
+    try {
+      AppLogAggregator aggregator = this.appLogAggregators.get(appId);
+      if (aggregator == null) {
+        LOG.warn("Log aggregation is not initialized for " + appId
+            + ", did it fail to start?");
+        this.dispatcher.getEventHandler().handle(new ApplicationEvent(appId,
+            ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED));
+        return;
+      }
+      aggregator.finishLogAggregation();
+    } finally {
+      // Remove invalid Token Apps
+      invalidTokenApps.remove(appId);
     }
-    aggregator.finishLogAggregation();
   }
 
   @Override
@@ -344,12 +362,47 @@ public class LogAggregationService extends AbstractService implements
             (LogHandlerAppFinishedEvent) event;
         stopApp(appFinishedEvent.getApplicationId());
         break;
+      case LOG_AGG_TOKEN_UPDATE:
+        checkAndEnableAppAggregators();
+        break;
       default:
         ; // Ignore
     }
 
   }
 
+  private void checkAndEnableAppAggregators() {
+    for (ApplicationId appId : invalidTokenApps) {
+      try {
+        AppLogAggregator aggregator = appLogAggregators.get(appId);
+        if (aggregator != null) {
+          Credentials credentials =
+              context.getSystemCredentialsForApps().get(appId);
+          if (credentials != null) {
+            // Create the app dir again with
+            LogAggregationFileController logAggregationFileController =
+                getLogAggregationFileController(getConfig());
+            UserGroupInformation userUgi =
+                aggregator.updateCredentials(credentials);
+            logAggregationFileController
+                .createAppDir(userUgi.getShortUserName(), appId, userUgi);
+            aggregator.enableLogAggregation();
+          }
+          invalidTokenApps.remove(appId);
+          LOG.info("LogAggregation enabled for application {}", appId);
+        }
+      } catch (Exception e) {
+        //Ignore exception
+        LOG.warn("Enable aggregators failed {}", appId);
+      }
+    }
+  }
+
+  @Override
+  public Set<ApplicationId> getInvalidTokenApps() {
+    return invalidTokenApps;
+  }
+
   @VisibleForTesting
   public ConcurrentMap<ApplicationId, AppLogAggregator> getAppLogAggregators() {
     return this.appLogAggregators;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b540bbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/LogHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/LogHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/LogHandler.java
index 6eb3fb4..459fdf4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/LogHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/LogHandler.java
@@ -18,9 +18,16 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler;
 
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEvent;
 
+
+
+import java.util.Set;
+
 public interface LogHandler extends EventHandler<LogHandlerEvent> {
   public void handle(LogHandlerEvent event);
+
+  public Set<ApplicationId> getInvalidTokenApps();
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b540bbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
index 9c43dde..d66aa12 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
@@ -19,8 +19,12 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.ThreadFactory;
@@ -204,6 +208,11 @@ public class NonAggregatingLogHandler extends AbstractService implements
     }
   }
 
+  @Override
+  public Set<ApplicationId> getInvalidTokenApps() {
+    return Collections.emptySet();
+  }
+
   ScheduledThreadPoolExecutor createScheduledThreadPoolExecutor(
       Configuration conf) {
     ThreadFactory tf =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b540bbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerEventType.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerEventType.java
index 684d6b2..ec477c2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerEventType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerEventType.java
@@ -19,5 +19,7 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event;
 
 public enum LogHandlerEventType {
-  APPLICATION_STARTED, CONTAINER_FINISHED, APPLICATION_FINISHED
+  APPLICATION_STARTED,
+  CONTAINER_FINISHED,
+  APPLICATION_FINISHED, LOG_AGG_TOKEN_UPDATE
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b540bbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerTokenUpdatedEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerTokenUpdatedEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerTokenUpdatedEvent.java
new file mode 100644
index 0000000..772a463
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/event/LogHandlerTokenUpdatedEvent.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event;
+
+public class LogHandlerTokenUpdatedEvent extends LogHandlerEvent {
+
+  public LogHandlerTokenUpdatedEvent() {
+    super(LogHandlerEventType.LOG_AGG_TOKEN_UPDATE);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b540bbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java
index b5cb43b..feabeb1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java
@@ -24,6 +24,8 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.Collection;
+import java.util.Collections;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -187,6 +189,11 @@ public class DummyContainerManager extends ContainerManagerImpl {
             // Ignore
           }
       }
+
+      @Override
+      public Set<ApplicationId> getInvalidTokenApps() {
+        return Collections.emptySet();
+      }
     };
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b540bbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
index 6268ad9..8b2e3cc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
@@ -73,6 +73,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
@@ -128,6 +129,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.Tes
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerContainerFinishedEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerTokenUpdatedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.ConverterUtils;
@@ -823,7 +825,8 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
         .getFileControllerForWrite();
     LogAggregationFileController spyLogAggregationFileFormat =
         spy(logAggregationFileFormat);
-    Exception e = new RuntimeException("KABOOM!");
+    Exception e =
+        new YarnRuntimeException(new SecretManager.InvalidToken("KABOOM!"));
     doThrow(e).when(spyLogAggregationFileFormat)
         .createAppDir(any(String.class), any(ApplicationId.class),
             any(UserGroupInformation.class));
@@ -862,29 +865,40 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     };
     checkEvents(appEventHandler, expectedEvents, false,
         "getType", "getApplicationID", "getDiagnostic");
-
+    Assert.assertEquals(logAggregationService.getInvalidTokenApps().size(), 1);
     // verify trying to collect logs for containers/apps we don't know about
     // doesn't blow up and tear down the NM
     logAggregationService.handle(new LogHandlerContainerFinishedEvent(
         BuilderUtils.newContainerId(4, 1, 1, 1),
         ContainerType.APPLICATION_MASTER, 0));
     dispatcher.await();
+
+    AppLogAggregator appAgg =
+        logAggregationService.getAppLogAggregators().get(appId);
+    Assert.assertFalse("Aggregation should be disabled",
+        appAgg.isAggregationEnabled());
+
+    // Enabled aggregation
+    logAggregationService.handle(new LogHandlerTokenUpdatedEvent());
+    dispatcher.await();
+
+    appAgg =
+        logAggregationService.getAppLogAggregators().get(appId);
+    Assert.assertFalse("Aggregation should be enabled",
+        appAgg.isAggregationEnabled());
+
+    // Check disabled apps are cleared
+    Assert.assertEquals(0, logAggregationService.getInvalidTokenApps().size());
+
     logAggregationService.handle(new LogHandlerAppFinishedEvent(
         BuilderUtils.newApplicationId(1, 5)));
     dispatcher.await();
 
     logAggregationService.stop();
     assertEquals(0, logAggregationService.getNumAggregators());
-    // local log dir shouldn't be deleted given log aggregation cannot
-    // continue due to aggregated log dir creation failure on remoteFS.
-    FileDeletionTask deletionTask = new FileDeletionTask(spyDelSrvc, user,
-        null, null);
-    verify(spyDelSrvc, never()).delete(deletionTask);
+    verify(spyDelSrvc).delete(any(FileDeletionTask.class));
     verify(logAggregationService).closeFileSystems(
         any(UserGroupInformation.class));
-    // make sure local log dir is not deleted in case log aggregation
-    // service cannot be initiated.
-    assertTrue(appLogDir.exists());
   }
 
   private void writeContainerLogs(File appLogDir, ContainerId containerId,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: YARN-7402. [GPG] Fix potential connection leak in GPGUtils. Contributed by Giovanni Matteo Fumarola.

Posted by bo...@apache.org.
YARN-7402. [GPG] Fix potential connection leak in GPGUtils. Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f83fc85b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f83fc85b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f83fc85b

Branch: refs/heads/YARN-7402
Commit: f83fc85bae757f4cd54156eccb8bec692aaf6a21
Parents: 6800cf7
Author: Botong Huang <bo...@apache.org>
Authored: Wed May 23 12:45:32 2018 -0700
Committer: Botong Huang <bo...@apache.org>
Committed: Thu Aug 2 09:59:48 2018 -0700

----------------------------------------------------------------------
 .../server/globalpolicygenerator/GPGUtils.java  | 31 +++++++++++++-------
 1 file changed, 20 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f83fc85b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
index 429bec4..31cee1c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java
@@ -18,21 +18,22 @@
 
 package org.apache.hadoop.yarn.server.globalpolicygenerator;
 
+import static javax.servlet.http.HttpServletResponse.SC_OK;
+
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 
-import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.core.MediaType;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo;
 
 import com.sun.jersey.api.client.Client;
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.WebResource;
-import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
-import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo;
 
 /**
  * GPGUtils contains utility functions for the GPG.
@@ -53,15 +54,23 @@ public final class GPGUtils {
     T obj = null;
 
     WebResource webResource = client.resource(webAddr);
-    ClientResponse response = webResource.path("ws/v1/cluster").path(path)
-        .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
-    if (response.getStatus() == HttpServletResponse.SC_OK) {
-      obj = response.getEntity(returnType);
-    } else {
-      throw new YarnRuntimeException("Bad response from remote web service: "
-          + response.getStatus());
+    ClientResponse response = null;
+    try {
+      response = webResource.path("ws/v1/cluster").path(path)
+          .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+      if (response.getStatus() == SC_OK) {
+        obj = response.getEntity(returnType);
+      } else {
+        throw new YarnRuntimeException(
+            "Bad response from remote web service: " + response.getStatus());
+      }
+      return obj;
+    } finally {
+      if (response != null) {
+        response.close();
+      }
+      client.destroy();
     }
-    return obj;
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: HDDS-302. Fix javadoc and add implementation details in ContainerStateMachine. Contributed by Shashikant Banerjee.

Posted by bo...@apache.org.
HDDS-302. Fix javadoc and add implementation details in ContainerStateMachine. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/952dc2fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/952dc2fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/952dc2fd

Branch: refs/heads/YARN-7402
Commit: 952dc2fd557f9aaf0f144ee32d0b7731a84bad73
Parents: 3108d27
Author: Mukul Kumar Singh <ms...@apache.org>
Authored: Mon Jul 30 18:45:58 2018 +0530
Committer: Mukul Kumar Singh <ms...@apache.org>
Committed: Mon Jul 30 18:45:58 2018 +0530

----------------------------------------------------------------------
 .../hadoop/hdds/scm/XceiverClientRatis.java     | 30 ++----------------
 .../java/org/apache/hadoop/hdds/HddsUtils.java  | 33 ++++++++++++++++++++
 .../server/ratis/ContainerStateMachine.java     | 14 ++++++++-
 3 files changed, 49 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/952dc2fd/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 0effa8f..2541415 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdds.scm;
 
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.ratis.shaded.com.google.protobuf
     .InvalidProtocolBufferException;
 import org.apache.hadoop.conf.Configuration;
@@ -183,34 +184,9 @@ public final class XceiverClientRatis extends XceiverClientSpi {
     return Objects.requireNonNull(client.get(), "client is null");
   }
 
-  private boolean isReadOnly(ContainerCommandRequestProto proto) {
-    switch (proto.getCmdType()) {
-    case ReadContainer:
-    case ReadChunk:
-    case ListKey:
-    case GetKey:
-    case GetSmallFile:
-    case ListContainer:
-    case ListChunk:
-      return true;
-    case CloseContainer:
-    case WriteChunk:
-    case UpdateContainer:
-    case CompactChunk:
-    case CreateContainer:
-    case DeleteChunk:
-    case DeleteContainer:
-    case DeleteKey:
-    case PutKey:
-    case PutSmallFile:
-    default:
-      return false;
-    }
-  }
-
   private RaftClientReply sendRequest(ContainerCommandRequestProto request)
       throws IOException {
-    boolean isReadOnlyRequest = isReadOnly(request);
+    boolean isReadOnlyRequest = HddsUtils.isReadOnly(request);
     ByteString byteString = request.toByteString();
     LOG.debug("sendCommand {} {}", isReadOnlyRequest, request);
     final RaftClientReply reply =  isReadOnlyRequest ?
@@ -222,7 +198,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
 
   private CompletableFuture<RaftClientReply> sendRequestAsync(
       ContainerCommandRequestProto request) throws IOException {
-    boolean isReadOnlyRequest = isReadOnly(request);
+    boolean isReadOnlyRequest = HddsUtils.isReadOnly(request);
     ByteString byteString = request.toByteString();
     LOG.debug("sendCommandAsync {} {}", isReadOnlyRequest, request);
     return isReadOnlyRequest ? getClient().sendReadOnlyAsync(() -> byteString) :

http://git-wip-us.apache.org/repos/asf/hadoop/blob/952dc2fd/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 48c6dce..33bf90c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -24,6 +24,7 @@ import com.google.common.net.HostAndPort;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
@@ -315,4 +316,36 @@ public final class HddsUtils {
     return name;
   }
 
+  /**
+   * Checks if the container command is read only or not.
+   * @param proto ContainerCommand Request proto
+   * @return True if its readOnly , false otherwise.
+   */
+  public static boolean isReadOnly(
+      ContainerProtos.ContainerCommandRequestProto proto) {
+    switch (proto.getCmdType()) {
+    case ReadContainer:
+    case ReadChunk:
+    case ListKey:
+    case GetKey:
+    case GetSmallFile:
+    case ListContainer:
+    case ListChunk:
+    case GetCommittedBlockLength:
+      return true;
+    case CloseContainer:
+    case WriteChunk:
+    case UpdateContainer:
+    case CompactChunk:
+    case CreateContainer:
+    case DeleteChunk:
+    case DeleteContainer:
+    case DeleteKey:
+    case PutKey:
+    case PutSmallFile:
+    default:
+      return false;
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/952dc2fd/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index ac7aa57..c0dd0ba 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -57,7 +57,7 @@ import java.util.concurrent.ThreadPoolExecutor;
  * requests.
  *
  * Read only requests are classified in
- * {@link org.apache.hadoop.hdds.scm.XceiverClientRatis#isReadOnly}
+ * {@link org.apache.hadoop.hdds.HddsUtils#isReadOnly}
  * and these readonly requests are replied from the {@link #query(Message)}.
  *
  * The write requests can be divided into requests with user data
@@ -84,6 +84,11 @@ import java.util.concurrent.ThreadPoolExecutor;
  * 2) Write chunk commit operation is executed after write chunk state machine
  * operation. This will ensure that commit operation is sync'd with the state
  * machine operation.
+ *
+ * Synchronization between {@link #writeStateMachineData} and
+ * {@link #applyTransaction} need to be enforced in the StateMachine
+ * implementation. For example, synchronization between writeChunk and
+ * createContainer in {@link ContainerStateMachine}.
  * */
 public class ContainerStateMachine extends BaseStateMachine {
   static final Logger LOG = LoggerFactory.getLogger(
@@ -213,6 +218,10 @@ public class ContainerStateMachine extends BaseStateMachine {
     return CompletableFuture.completedFuture(() -> ByteString.EMPTY);
   }
 
+  /*
+   * writeStateMachineData calls are not synchronized with each other
+   * and also with applyTransaction.
+   */
   @Override
   public CompletableFuture<Message> writeStateMachineData(LogEntryProto entry) {
     try {
@@ -244,6 +253,9 @@ public class ContainerStateMachine extends BaseStateMachine {
     }
   }
 
+  /*
+   * ApplyTransaction calls in Ratis are sequential.
+   */
   @Override
   public CompletableFuture<Message> applyTransaction(TransactionContext trx) {
     try {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: HDDS-287. Add Close ContainerAction to Datanode#StateContext when the container gets full. Contributed by Nanda kumar.

Posted by bo...@apache.org.
HDDS-287. Add Close ContainerAction to Datanode#StateContext when the container gets full. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3517a478
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3517a478
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3517a478

Branch: refs/heads/YARN-7402
Commit: 3517a47897457c11096ab57a4cb0b096a838a3ec
Parents: 952dc2f
Author: Nanda kumar <na...@apache.org>
Authored: Mon Jul 30 21:18:42 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Mon Jul 30 21:18:42 2018 +0530

----------------------------------------------------------------------
 .../container/common/impl/HddsDispatcher.java   |  63 +++++++-
 .../statemachine/DatanodeStateMachine.java      |   2 +-
 .../common/statemachine/StateContext.java       |  14 +-
 .../container/ozoneimpl/OzoneContainer.java     |   6 +-
 .../common/impl/TestHddsDispatcher.java         | 152 +++++++++++++++++++
 .../container/common/impl/package-info.java     |  22 +++
 .../common/interfaces/TestHandler.java          |   4 +-
 .../container/ozoneimpl/TestOzoneContainer.java |   2 +-
 .../ozone/container/common/TestEndPoint.java    |  12 +-
 .../common/impl/TestCloseContainerHandler.java  |   2 +-
 .../container/metrics/TestContainerMetrics.java |   2 +-
 .../container/ozoneimpl/TestOzoneContainer.java |   2 +-
 .../container/server/TestContainerServer.java   |   2 +-
 .../genesis/BenchMarkDatanodeDispatcher.java    |   6 +-
 14 files changed, 270 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3517a478/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 3d418e5..ee232db 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -21,12 +21,21 @@ package org.apache.hadoop.ozone.container.common.impl;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Maps;
+import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerInfo;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerAction;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
+import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@@ -35,11 +44,14 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerType;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerLifeCycleState;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.util.Map;
+import java.util.Optional;
 
 /**
  * Ozone Container dispatcher takes a call from the netty server and routes it
@@ -53,6 +65,8 @@ public class HddsDispatcher implements ContainerDispatcher {
   private final Configuration conf;
   private final ContainerSet containerSet;
   private final VolumeSet volumeSet;
+  private final StateContext context;
+  private final float containerCloseThreshold;
   private String scmID;
   private ContainerMetrics metrics;
 
@@ -61,10 +75,11 @@ public class HddsDispatcher implements ContainerDispatcher {
    * XceiverServerHandler.
    */
   public HddsDispatcher(Configuration config, ContainerSet contSet,
-      VolumeSet volumes) {
+      VolumeSet volumes, StateContext context) {
     this.conf = config;
     this.containerSet = contSet;
     this.volumeSet = volumes;
+    this.context = context;
     this.handlers = Maps.newHashMap();
     this.metrics = ContainerMetrics.create(conf);
     for (ContainerType containerType : ContainerType.values()) {
@@ -72,6 +87,9 @@ public class HddsDispatcher implements ContainerDispatcher {
           Handler.getHandlerForContainerType(
               containerType, conf, containerSet, volumeSet, metrics));
     }
+    this.containerCloseThreshold = conf.getFloat(
+        ScmConfigKeys.OZONE_SCM_CONTAINER_CLOSE_THRESHOLD,
+        ScmConfigKeys.OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT);
 
   }
 
@@ -113,7 +131,11 @@ public class HddsDispatcher implements ContainerDispatcher {
     } catch (StorageContainerException ex) {
       return ContainerUtils.logAndReturnError(LOG, ex, msg);
     }
-
+    // Small performance optimization. We check if the operation is of type
+    // write before trying to send CloseContainerAction.
+    if (!HddsUtils.isReadOnly(msg)) {
+      sendCloseContainerActionIfNeeded(container);
+    }
     Handler handler = getHandler(containerType);
     if (handler == null) {
       StorageContainerException ex = new StorageContainerException("Invalid " +
@@ -130,6 +152,43 @@ public class HddsDispatcher implements ContainerDispatcher {
     }
   }
 
+  /**
+   * If the container usage reaches the close threshold we send Close
+   * ContainerAction to SCM.
+   *
+   * @param container current state of container
+   */
+  private void sendCloseContainerActionIfNeeded(Container container) {
+    // We have to find a more efficient way to close a container.
+    Boolean isOpen = Optional.ofNullable(container)
+        .map(cont -> cont.getContainerState() == ContainerLifeCycleState.OPEN)
+        .orElse(Boolean.FALSE);
+    if (isOpen) {
+      ContainerData containerData = container.getContainerData();
+      double containerUsedPercentage = 1.0f * containerData.getBytesUsed() /
+          StorageUnit.GB.toBytes(containerData.getMaxSizeGB());
+      if (containerUsedPercentage >= containerCloseThreshold) {
+
+        ContainerInfo containerInfo = ContainerInfo.newBuilder()
+            .setContainerID(containerData.getContainerID())
+            .setReadCount(containerData.getReadCount())
+            .setWriteCount(containerData.getWriteCount())
+            .setReadBytes(containerData.getReadBytes())
+            .setWriteBytes(containerData.getWriteBytes())
+            .setUsed(containerData.getBytesUsed())
+            .setState(HddsProtos.LifeCycleState.OPEN)
+            .build();
+
+        ContainerAction action = ContainerAction.newBuilder()
+            .setContainer(containerInfo)
+            .setAction(ContainerAction.Action.CLOSE)
+            .setReason(ContainerAction.Reason.CONTAINER_FULL)
+            .build();
+        context.addContainerActionIfAbsent(action);
+      }
+    }
+  }
+
   @Override
   public Handler getHandler(ContainerProtos.ContainerType containerType) {
     return handlers.get(containerType);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3517a478/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index 69a243e..1ac42dd 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -89,7 +89,7 @@ public class DatanodeStateMachine implements Closeable {
     heartbeatFrequency = TimeUnit.SECONDS.toMillis(
         getScmHeartbeatInterval(conf));
     container = new OzoneContainer(this.datanodeDetails,
-        new OzoneConfiguration(conf));
+        new OzoneConfiguration(conf), context);
     nextHB = new AtomicLong(Time.monotonicNow());
 
      // When we add new handlers just adding a new handler here should do the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3517a478/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index 7862cc6..19c9496 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.ArrayList;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Queue;
@@ -213,6 +212,19 @@ public class StateContext {
   }
 
   /**
+   * Add ContainerAction to ContainerAction queue if it's not present.
+   *
+   * @param containerAction ContainerAction to be added
+   */
+  public void addContainerActionIfAbsent(ContainerAction containerAction) {
+    synchronized (containerActions) {
+      if (!containerActions.contains(containerAction)) {
+        containerActions.add(containerAction);
+      }
+    }
+  }
+
+  /**
    * Returns all the pending ContainerActions from the ContainerAction queue,
    * or empty list if the queue is empty.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3517a478/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index 30fe113..85c947f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
+import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.container.common.transport.server.XceiverServer;
 import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc;
 import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
@@ -70,7 +71,7 @@ public class OzoneContainer {
    * @throws IOException
    */
   public OzoneContainer(DatanodeDetails datanodeDetails, OzoneConfiguration
-      conf) throws IOException {
+      conf, StateContext context) throws IOException {
     this.dnDetails = datanodeDetails;
     this.config = conf;
     this.volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf);
@@ -79,7 +80,8 @@ public class OzoneContainer {
         ScmConfigKeys.DFS_CONTAINER_GRPC_ENABLED_KEY,
         ScmConfigKeys.DFS_CONTAINER_GRPC_ENABLED_DEFAULT);
     buildContainerSet();
-    hddsDispatcher = new HddsDispatcher(config, containerSet, volumeSet);
+    hddsDispatcher = new HddsDispatcher(config, containerSet, volumeSet,
+        context);
     server = new XceiverServerSpi[]{
         useGrpc ? new XceiverServerGrpc(datanodeDetails, this.config, this
             .hddsDispatcher) :

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3517a478/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
new file mode 100644
index 0000000..b107782
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
@@ -0,0 +1,152 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.impl;
+
+import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto
+    .ContainerProtos.ContainerCommandResponseProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .WriteChunkRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerAction;
+import org.apache.hadoop.ozone.container.common.interfaces.Container;
+import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
+import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.ratis.shaded.com.google.protobuf.ByteString;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.UUID;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+/**
+ * Test-cases to verify the functionality of HddsDispatcher.
+ */
+public class TestHddsDispatcher {
+
+  @Test
+  public void testContainerCloseActionWhenFull() throws IOException {
+    String testDir = GenericTestUtils.getTempPath(
+        TestHddsDispatcher.class.getSimpleName());
+    try {
+      UUID scmId = UUID.randomUUID();
+      OzoneConfiguration conf = new OzoneConfiguration();
+      conf.set(HDDS_DATANODE_DIR_KEY, testDir);
+      DatanodeDetails dd = randomDatanodeDetails();
+      ContainerSet containerSet = new ContainerSet();
+      VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf);
+      StateContext context = Mockito.mock(StateContext.class);
+      KeyValueContainerData containerData = new KeyValueContainerData(1L, 1);
+      Container container = new KeyValueContainer(containerData, conf);
+      container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(),
+          scmId.toString());
+      containerSet.addContainer(container);
+      HddsDispatcher hddsDispatcher = new HddsDispatcher(
+          conf, containerSet, volumeSet, context);
+      hddsDispatcher.setScmId(scmId.toString());
+      ContainerCommandResponseProto responseOne = hddsDispatcher.dispatch(
+          getWriteChunkRequest(dd.getUuidString(), 1L, 1L));
+      Assert.assertEquals(ContainerProtos.Result.SUCCESS,
+          responseOne.getResult());
+      verify(context, times(0))
+          .addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
+      containerData.setBytesUsed(Double.valueOf(
+          StorageUnit.MB.toBytes(950)).longValue());
+      ContainerCommandResponseProto responseTwo = hddsDispatcher.dispatch(
+          getWriteChunkRequest(dd.getUuidString(), 1L, 2L));
+      Assert.assertEquals(ContainerProtos.Result.SUCCESS,
+          responseTwo.getResult());
+      verify(context, times(1))
+          .addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
+
+    } finally {
+      FileUtils.deleteDirectory(new File(testDir));
+    }
+
+  }
+
+  // This method has to be removed once we move scm/TestUtils.java
+  // from server-scm project to container-service or to common project.
+  private static DatanodeDetails randomDatanodeDetails() {
+    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.STANDALONE, 0);
+    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.RATIS, 0);
+    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.REST, 0);
+    DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
+    builder.setUuid(UUID.randomUUID().toString())
+        .setHostName("localhost")
+        .setIpAddress("127.0.0.1")
+        .addPort(containerPort)
+        .addPort(ratisPort)
+        .addPort(restPort);
+    return builder.build();
+  }
+
+  private ContainerCommandRequestProto getWriteChunkRequest(
+      String datanodeId, Long containerId, Long localId) {
+
+    ByteString data = ByteString.copyFrom(
+        UUID.randomUUID().toString().getBytes());
+    ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo
+        .newBuilder()
+        .setChunkName(
+            DigestUtils.md5Hex("dummy-key") + "_stream_"
+                + containerId + "_chunk_" + localId)
+        .setOffset(0)
+        .setLen(data.size())
+        .build();
+
+    WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto
+        .newBuilder()
+        .setBlockID(new BlockID(containerId, localId)
+            .getDatanodeBlockIDProtobuf())
+        .setChunkData(chunk)
+        .setData(data);
+
+    return ContainerCommandRequestProto
+        .newBuilder()
+        .setContainerID(containerId)
+        .setCmdType(ContainerProtos.Type.WriteChunk)
+        .setTraceID(UUID.randomUUID().toString())
+        .setDatanodeUuid(datanodeId)
+        .setWriteChunk(writeChunkRequest)
+        .build();
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3517a478/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java
new file mode 100644
index 0000000..07c78c0
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * Datanode container related test-cases.
+ */
+package org.apache.hadoop.ozone.container.common.impl;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3517a478/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
index 6660e9b..c9733f8 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
@@ -52,15 +52,13 @@ public class TestHandler {
   private VolumeSet volumeSet;
   private Handler handler;
 
-  private final static String DATANODE_UUID = UUID.randomUUID().toString();
-
   @Before
   public void setup() throws Exception {
     this.conf = new Configuration();
     this.containerSet = Mockito.mock(ContainerSet.class);
     this.volumeSet = Mockito.mock(VolumeSet.class);
 
-    this.dispatcher = new HddsDispatcher(conf, containerSet, volumeSet);
+    this.dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, null);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3517a478/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 284ffa3..19ec6a2 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -86,7 +86,7 @@ public class TestOzoneContainer {
     // When OzoneContainer is started, the containers from disk should be
     // loaded into the containerSet.
     OzoneContainer ozoneContainer = new
-        OzoneContainer(datanodeDetails, conf);
+        OzoneContainer(datanodeDetails, conf, null);
     ContainerSet containerset = ozoneContainer.getContainerSet();
     assertEquals(10, containerset.containerCount());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3517a478/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index e24e73e..e9359b8 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -151,8 +151,8 @@ public class TestEndPoint {
     OzoneConfiguration conf = SCMTestUtils.getConf();
     try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
         serverAddress, 1000)) {
-      OzoneContainer ozoneContainer = new OzoneContainer(TestUtils.randomDatanodeDetails(),
-          conf);
+      OzoneContainer ozoneContainer = new OzoneContainer(
+          TestUtils.randomDatanodeDetails(), conf, null);
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
       VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
           conf, ozoneContainer);
@@ -176,7 +176,7 @@ public class TestEndPoint {
       GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
           .captureLogs(VersionEndpointTask.LOG);
       OzoneContainer ozoneContainer = new OzoneContainer(TestUtils
-          .randomDatanodeDetails(), conf);
+          .randomDatanodeDetails(), conf, null);
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
       VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
           conf, ozoneContainer);
@@ -228,7 +228,7 @@ public class TestEndPoint {
         nonExistentServerAddress, 1000)) {
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
       OzoneContainer ozoneContainer = new OzoneContainer(TestUtils.randomDatanodeDetails(),
-          conf);
+          conf, null);
       VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
           conf, ozoneContainer);
       EndpointStateMachine.EndPointStates newState = versionTask.call();
@@ -254,8 +254,8 @@ public class TestEndPoint {
     try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
         serverAddress, (int) rpcTimeout)) {
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
-      OzoneContainer ozoneContainer = new OzoneContainer(TestUtils.randomDatanodeDetails(),
-          conf);
+      OzoneContainer ozoneContainer = new OzoneContainer(
+          TestUtils.randomDatanodeDetails(), conf, null);
       VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
           conf, ozoneContainer);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3517a478/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
index d67cf88..73fa70d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
@@ -86,7 +86,7 @@ public class TestCloseContainerHandler {
             .setHostName("localhost").setIpAddress("127.0.0.1").build();
     volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf);
 
-    dispatcher = new HddsDispatcher(conf, containerSet, volumeSet);
+    dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, null);
     handler = (KeyValueHandler) dispatcher
         .getHandler(ContainerProtos.ContainerType.KeyValueContainer);
     openContainerBlockMap = handler.getOpenContainerBlockMap();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3517a478/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
index 13ed192..19b561a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -78,7 +78,7 @@ public class TestContainerMetrics {
           datanodeDetails.getUuidString(), conf);
       ContainerSet containerSet = new ContainerSet();
       HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet,
-          volumeSet);
+          volumeSet, null);
       dispatcher.setScmId(UUID.randomUUID().toString());
 
       server = new XceiverServer(datanodeDetails, conf, dispatcher);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3517a478/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index d271ed3..215dd21 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -72,7 +72,7 @@ public class TestOzoneContainer {
       conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
 
       container = new OzoneContainer(TestUtils.randomDatanodeDetails(),
-          conf);
+          conf, null);
       //Setting scmId, as we start manually ozone container.
       container.getDispatcher().setScmId(UUID.randomUUID().toString());
       container.start();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3517a478/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index bdb26fb..ebcc930 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -214,7 +214,7 @@ public class TestContainerServer {
               .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
 
       HddsDispatcher dispatcher = new HddsDispatcher(
-          conf, mock(ContainerSet.class), mock(VolumeSet.class));
+          conf, mock(ContainerSet.class), mock(VolumeSet.class), null);
       dispatcher.init();
       DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
       server = new XceiverServer(datanodeDetails, conf, dispatcher);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3517a478/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
index e757a7f..3c49fb6 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
@@ -20,6 +20,9 @@ package org.apache.hadoop.ozone.genesis;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
+import org.apache.hadoop.ozone.container.common.statemachine
+    .DatanodeStateMachine.DatanodeStates;
+import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.ratis.shaded.com.google.protobuf.ByteString;
 import org.apache.commons.codec.digest.DigestUtils;
@@ -104,7 +107,8 @@ public class BenchMarkDatanodeDispatcher {
     ContainerSet containerSet = new ContainerSet();
     VolumeSet volumeSet = new VolumeSet(datanodeUuid, conf);
 
-    dispatcher = new HddsDispatcher(conf, containerSet, volumeSet);
+    dispatcher = new HddsDispatcher(conf, containerSet, volumeSet,
+        new StateContext(conf, DatanodeStates.RUNNING, null));
     dispatcher.init();
 
     containerCount = new AtomicInteger();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: YARN-8594. [UI2] Display current logged in user. Contributed by Akhil PB.

Posted by bo...@apache.org.
YARN-8594. [UI2] Display current logged in user. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ea81169
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ea81169
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ea81169

Branch: refs/heads/YARN-7402
Commit: 1ea81169bad5bd6433348ef8e5e7ac12c5a9cb5e
Parents: 41da205
Author: Sunil G <su...@apache.org>
Authored: Thu Aug 2 12:41:06 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Thu Aug 2 12:41:06 2018 +0530

----------------------------------------------------------------------
 .../webapp/app/adapters/cluster-user-info.js    | 29 +++++++++++++
 .../main/webapp/app/controllers/application.js  | 10 ++++-
 .../main/webapp/app/models/cluster-user-info.js | 24 +++++++++++
 .../src/main/webapp/app/routes/application.js   |  6 ++-
 .../webapp/app/serializers/cluster-user-info.js | 43 ++++++++++++++++++++
 .../src/main/webapp/app/styles/app.scss         | 12 +++++-
 .../main/webapp/app/templates/application.hbs   | 15 +++++--
 7 files changed, 132 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea81169/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/cluster-user-info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/cluster-user-info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/cluster-user-info.js
new file mode 100644
index 0000000..a49c0f5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/cluster-user-info.js
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import RESTAbstractAdapter from './restabstract';
+
+export default RESTAbstractAdapter.extend({
+  address: "rmWebAddress",
+  restNameSpace: "cluster",
+  serverName: "RM",
+
+  pathForType(/*modelName*/) {
+    return 'userinfo';
+  }
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea81169/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
index 986b1fd..75b072a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js
@@ -58,5 +58,13 @@ export default Ember.Controller.extend({
       return true;
     }
     return false;
-  }.property('currentPath')
+  }.property('currentPath'),
+
+  clusterInfo: function() {
+    return this.model.clusterInfo.get('firstObject');
+  }.property('model.clusterInfo'),
+
+  userInfo: function() {
+    return this.model.userInfo.get('firstObject');
+  }.property('model.userInfo'),
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea81169/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/cluster-user-info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/cluster-user-info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/cluster-user-info.js
new file mode 100644
index 0000000..c2867f8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/cluster-user-info.js
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import DS from 'ember-data';
+
+export default DS.Model.extend({
+    rmLoginUser: DS.attr('string'),
+    requestedUser: DS.attr('string')
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea81169/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/application.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/application.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/application.js
index 596b303..e30baaa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/application.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/application.js
@@ -21,7 +21,10 @@ import AbstractRoute from './abstract';
 
 export default AbstractRoute.extend({
   model() {
-    return this.store.findAll('ClusterInfo', {reload: true});
+    return Ember.RSVP.hash({
+      clusterInfo: this.store.findAll('ClusterInfo', {reload: true}),
+      userInfo: this.store.findAll('cluster-user-info', {reload: true})
+    });
   },
 
   actions: {
@@ -46,5 +49,6 @@ export default AbstractRoute.extend({
 
   unloadAll: function() {
     this.store.unloadAll('ClusterInfo');
+    this.store.unloadAll('cluster-user-info');
   },
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea81169/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/cluster-user-info.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/cluster-user-info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/cluster-user-info.js
new file mode 100644
index 0000000..617e960
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/cluster-user-info.js
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import DS from 'ember-data';
+
+export default DS.JSONAPISerializer.extend({
+  normalizeSingleResponse(store, primaryModelClass, payload, id,
+    requestType) {
+    var fixedPayload = {
+      id: id,
+      type: primaryModelClass.modelName,
+      attributes: payload
+    };
+
+    return this._super(store, primaryModelClass, fixedPayload, id, requestType);
+  },
+
+  normalizeArrayResponse(store, primaryModelClass, payload, id,
+    requestType) {
+    // return expected is { data: [ {}, {} ] }
+    var normalizedArrayResponse = {};
+
+    normalizedArrayResponse.data = [
+      this.normalizeSingleResponse(store, primaryModelClass, payload, Date.now(), requestType)
+    ];
+    return normalizedArrayResponse;
+  }
+});
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea81169/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss
index c0aaebe..59f6245 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.scss
@@ -472,7 +472,8 @@ div.attempt-info-panel table > tbody > tr > td:last-of-type {
 }
 .yarn-cluster-info {
   display: flex;
-  margin-left: auto
+  margin-left: auto;
+  margin-top: -7px;
 }
 
 .yarn-ui-footer {
@@ -729,6 +730,15 @@ div.service-action-mask img {
   overflow: scroll;
 }
 
+div.loggedin-user {
+  float: right;
+  padding: 15px 5px;
+  color: #555;
+  .username {
+    font-weight: bold;
+  }
+}
+
 .diagnostic-info {
   pre {
     margin-bottom: 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea81169/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
index 56fef26..ecb1481 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs
@@ -72,6 +72,11 @@
             {{/link-to}}
           {{/link-to}}
         </ul>
+        {{#if userInfo}}
+        <div class="loggedin-user">
+          Logged in as: <span class="username">{{userInfo.requestedUser}}</span>
+        </div>
+        {{/if}}
       </div><!-- /.navbar-collapse -->
     </div><!-- /.container-fluid -->
   </nav>
@@ -97,13 +102,15 @@
     </div>
   </div>
   <div class="yarn-cluster-info">
+    {{#if clusterInfo}}
     <div>
-      <strong>v{{model.firstObject.hadoopVersion}}</strong>
-      <span class="yarn-cluster-status yarn-tooltip" title="Hadoop Version: {{model.firstObject.getYARNBuildHash}} &#10;Started on: {{date-formatter model.firstObject.startedOn}}" data-toggle="tooltip" data-placement="top">
-        <i class={{lower model.firstObject.state}} />
+      <strong>v{{clusterInfo.hadoopVersion}}</strong>
+      <span class="yarn-cluster-status yarn-tooltip" title="Hadoop Version: {{clusterInfo.getYARNBuildHash}} &#10;Started on: {{date-formatter clusterInfo.startedOn}}" data-toggle="tooltip" data-placement="top">
+        <i class={{lower clusterInfo.state}} />
       </span>
-      <div>Started at {{date-formatter model.firstObject.startedOn}}</div>
+      <div>Started at {{date-formatter clusterInfo.startedOn}}</div>
     </div>
+    {{/if}}
   </div>
 </div>
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: HDDS-310. VolumeSet shutdown hook fails on datanode restart. Contributed by Bharat Viswanadham.

Posted by bo...@apache.org.
HDDS-310. VolumeSet shutdown hook fails on datanode restart. Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41da2050
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41da2050
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41da2050

Branch: refs/heads/YARN-7402
Commit: 41da2050bdec14709a86fa8a5cf7da82415fd989
Parents: 735b492
Author: Nanda kumar <na...@apache.org>
Authored: Thu Aug 2 11:35:22 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Thu Aug 2 11:35:22 2018 +0530

----------------------------------------------------------------------
 .../ozone/container/common/volume/VolumeSet.java     | 15 +++++++++++++--
 1 file changed, 13 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41da2050/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
index 4a1487b..06f48fc 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
@@ -167,7 +167,7 @@ public class VolumeSet {
 
     // Ensure volume threads are stopped and scm df is saved during shutdown.
     shutdownHook = () -> {
-      shutdown();
+      saveVolumeSetUsed();
     };
     ShutdownHookManager.get().addShutdownHook(shutdownHook,
         SHUTDOWN_HOOK_PRIORITY);
@@ -303,7 +303,11 @@ public class VolumeSet {
     return choosingPolicy.chooseVolume(getVolumesList(), containerSize);
   }
 
-  public void shutdown() {
+  /**
+   * This method, call shutdown on each volume to shutdown volume usage
+   * thread and write scmUsed on each volume.
+   */
+  private void saveVolumeSetUsed() {
     for (HddsVolume hddsVolume : volumeMap.values()) {
       try {
         hddsVolume.shutdown();
@@ -312,7 +316,14 @@ public class VolumeSet {
             ex);
       }
     }
+  }
 
+  /**
+   * Shutdown's the volumeset, if saveVolumeSetUsed is false, call's
+   * {@link VolumeSet#saveVolumeSetUsed}.
+   */
+  public void shutdown() {
+    saveVolumeSetUsed();
     if (shutdownHook != null) {
       ShutdownHookManager.get().removeShutdownHook(shutdownHook);
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: HDDS-290. putKey is failing with KEY_ALLOCATION_ERROR. Contributed by Xiaoyu Yao.

Posted by bo...@apache.org.
HDDS-290. putKey is failing with KEY_ALLOCATION_ERROR. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e83719c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e83719c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e83719c8

Branch: refs/heads/YARN-7402
Commit: e83719c830dd4927c8eef26062c56c0d62b2f04f
Parents: 7c36857
Author: Nanda kumar <na...@apache.org>
Authored: Thu Aug 2 19:02:25 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Thu Aug 2 19:02:25 2018 +0530

----------------------------------------------------------------------
 .../src/main/compose/ozone/docker-config        |  1 +
 .../acceptance/ozonefs/ozonesinglenode.robot    | 49 ++++++++++++++++++++
 .../apache/hadoop/ozone/web/ozShell/Shell.java  |  4 ++
 .../ozone/web/ozShell/keys/PutKeyHandler.java   | 16 +++++--
 4 files changed, 66 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e83719c8/hadoop-dist/src/main/compose/ozone/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozone/docker-config b/hadoop-dist/src/main/compose/ozone/docker-config
index 50abb18..1b75c01 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-config
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -22,6 +22,7 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
 OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
 OZONE-SITE.XML_ozone.handler.type=distributed
 OZONE-SITE.XML_ozone.scm.client.address=scm
+OZONE-SITE.XML_ozone.replication=1
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true
 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
 LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e83719c8/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonesinglenode.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonesinglenode.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonesinglenode.robot
new file mode 100644
index 0000000..b844cee
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonesinglenode.robot
@@ -0,0 +1,49 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation       Ozonefs Single Node Test
+Library             OperatingSystem
+Suite Setup         Startup Ozone cluster with size          1
+Suite Teardown      Teardown Ozone cluster
+Resource            ../commonlib.robot
+
+*** Variables ***
+${COMPOSEFILE}          ${CURDIR}/docker-compose.yaml
+${PROJECTDIR}           ${CURDIR}/../../../../../..
+
+
+*** Test Cases ***
+Create volume and bucket
+    Execute on          datanode        ozone oz -createVolume http://ozoneManager/fstest -user bilbo -quota 100TB -root
+    Execute on          datanode        ozone oz -createBucket http://ozoneManager/fstest/bucket1
+
+Check volume from ozonefs
+    ${result} =         Execute on          hadooplast        hdfs dfs -ls o3://bucket1.fstest/
+
+Create directory from ozonefs
+                        Execute on          hadooplast        hdfs dfs -mkdir -p o3://bucket1.fstest/testdir/deep
+    ${result} =         Execute on          ozoneManager      ozone oz -listKey o3://ozoneManager/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+                                            Should contain    ${result}         testdir/deep
+Test key handling
+                    Execute on          datanode        ozone oz -putKey o3://ozoneManager/fstest/bucket1/key1 -file NOTICE.txt -replicationFactor 1
+                    Execute on          datanode        rm -f NOTICE.txt.1
+                    Execute on          datanode        ozone oz -getKey o3://ozoneManager/fstest/bucket1/key1 -file NOTICE.txt.1
+                    Execute on          datanode        ls -l NOTICE.txt.1
+    ${result} =     Execute on          datanode        ozone oz -infoKey o3://ozoneManager/fstest/bucket1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
+                    Should contain      ${result}       createdOn
+    ${result} =     Execute on          datanode        ozone oz -listKey o3://ozoneManager/fstest/bucket1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
+                    Should Be Equal     ${result}       key1
+                    Execute on          datanode        ozone oz -deleteKey o3://ozoneManager/fstest/bucket1/key1 -v

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e83719c8/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
index 726f4ca..41eef1a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
@@ -92,6 +92,7 @@ public class Shell extends Configured implements Tool {
   public static final String DELETE_KEY = "deleteKey";
   public static final String LIST_KEY = "listKey";
   public static final String FILE = "file";
+  public static final String REPLICATION_FACTOR = "replicationFactor";
 
   // Listing related command line arguments
   public static final String LIST_LENGTH = "length";
@@ -292,6 +293,9 @@ public class Shell extends Configured implements Tool {
         new Option(FILE, true, "Data file path");
     opts.addOption(fileArgument);
 
+    Option repFactor =
+        new Option(REPLICATION_FACTOR, true, "Replication factor (1 or 3)");
+    opts.addOption(repFactor);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e83719c8/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
index ed8cc88..c73307d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
@@ -44,7 +44,9 @@ import java.nio.file.Paths;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_DEFAULT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT;
 
 /**
  * Puts a file into an ozone bucket.
@@ -103,11 +105,17 @@ public class PutKeyHandler extends Handler {
     }
 
     Configuration conf = new OzoneConfiguration();
-    ReplicationFactor replicationFactor = ReplicationFactor.valueOf(
-        conf.getInt(OZONE_REPLICATION, ReplicationFactor.THREE.getValue()));
-    ReplicationType replicationType = ReplicationType.valueOf(
-        conf.get(OZONE_REPLICATION_TYPE, ReplicationType.RATIS.toString()));
+    ReplicationFactor replicationFactor;
+    if (cmd.hasOption(Shell.REPLICATION_FACTOR)) {
+      replicationFactor = ReplicationFactor.valueOf(Integer.parseInt(cmd
+          .getOptionValue(Shell.REPLICATION_FACTOR)));
+    } else {
+      replicationFactor = ReplicationFactor.valueOf(
+          conf.getInt(OZONE_REPLICATION, OZONE_REPLICATION_DEFAULT));
+    }
 
+    ReplicationType replicationType = ReplicationType.valueOf(
+        conf.get(OZONE_REPLICATION_TYPE, OZONE_REPLICATION_TYPE_DEFAULT));
     OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
     OzoneBucket bucket = vol.getBucket(bucketName);
     OzoneOutputStream outputStream = bucket


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: HDDS-305. Datanode StateContext#addContainerActionIfAbsent will add container action even if there already is a ContainerAction. Contributed by Nanda kumar.

Posted by bo...@apache.org.
HDDS-305. Datanode StateContext#addContainerActionIfAbsent will add container action even if there already is a ContainerAction. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7631e0ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7631e0ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7631e0ad

Branch: refs/heads/YARN-7402
Commit: 7631e0adaefcccdbee693089b4c391bea4107a19
Parents: 3e06a5d
Author: Nanda kumar <na...@apache.org>
Authored: Tue Jul 31 17:27:51 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Tue Jul 31 17:27:51 2018 +0530

----------------------------------------------------------------------
 .../ozone/container/common/impl/HddsDispatcher.java    | 13 +------------
 .../main/proto/StorageContainerDatanodeProtocol.proto  |  2 +-
 .../states/endpoint/TestHeartbeatEndpointTask.java     |  5 +----
 3 files changed, 3 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7631e0ad/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index ee232db..d92eb17 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -168,19 +168,8 @@ public class HddsDispatcher implements ContainerDispatcher {
       double containerUsedPercentage = 1.0f * containerData.getBytesUsed() /
           StorageUnit.GB.toBytes(containerData.getMaxSizeGB());
       if (containerUsedPercentage >= containerCloseThreshold) {
-
-        ContainerInfo containerInfo = ContainerInfo.newBuilder()
-            .setContainerID(containerData.getContainerID())
-            .setReadCount(containerData.getReadCount())
-            .setWriteCount(containerData.getWriteCount())
-            .setReadBytes(containerData.getReadBytes())
-            .setWriteBytes(containerData.getWriteBytes())
-            .setUsed(containerData.getBytesUsed())
-            .setState(HddsProtos.LifeCycleState.OPEN)
-            .build();
-
         ContainerAction action = ContainerAction.newBuilder()
-            .setContainer(containerInfo)
+            .setContainerID(containerData.getContainerID())
             .setAction(ContainerAction.Action.CLOSE)
             .setReason(ContainerAction.Reason.CONTAINER_FULL)
             .build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7631e0ad/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
index 0c52efb..71c41e3 100644
--- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
@@ -157,7 +157,7 @@ message ContainerAction {
     CONTAINER_FULL = 1;
   }
 
-  required ContainerInfo container = 1;
+  required int64 containerID = 1;
   required Action action = 2;
   optional Reason reason = 3;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7631e0ad/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
index b4d718d..13de11f 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
@@ -289,10 +289,7 @@ public class TestHeartbeatEndpointTask {
 
   private ContainerAction getContainerAction() {
     ContainerAction.Builder builder = ContainerAction.newBuilder();
-    ContainerInfo containerInfo = ContainerInfo.newBuilder()
-        .setContainerID(1L)
-        .build();
-    builder.setContainer(containerInfo)
+    builder.setContainerID(1L)
         .setAction(ContainerAction.Action.CLOSE)
         .setReason(ContainerAction.Reason.CONTAINER_FULL);
     return builder.build();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: YARN-8600. RegistryDNS hang when remote lookup does not reply. Contributed by Eric Yang

Posted by bo...@apache.org.
YARN-8600. RegistryDNS hang when remote lookup does not reply. Contributed by Eric Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/603a5747
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/603a5747
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/603a5747

Branch: refs/heads/YARN-7402
Commit: 603a57476ce0bf9514f0432a235f29432ca4c323
Parents: 67c65da
Author: Shane Kumpf <sk...@apache.org>
Authored: Wed Aug 1 12:22:01 2018 -0600
Committer: Shane Kumpf <sk...@apache.org>
Committed: Wed Aug 1 12:22:01 2018 -0600

----------------------------------------------------------------------
 .../hadoop/registry/server/dns/LookupTask.java  | 39 ++++++++++++++++++++
 .../hadoop/registry/server/dns/RegistryDNS.java | 21 ++++++++---
 .../registry/server/dns/TestRegistryDNS.java    |  8 ++++
 3 files changed, 63 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/603a5747/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java
new file mode 100644
index 0000000..c2fc4a9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.registry.server.dns;
+
+import java.util.concurrent.Callable;
+
+import org.xbill.DNS.Lookup;
+import org.xbill.DNS.Name;
+import org.xbill.DNS.Record;
+
+public class LookupTask implements Callable<Record[]> {
+
+  private Name name;
+  private int type;
+
+  public LookupTask(Name name, int type) {
+    this.name = name;
+    this.type = type;
+  }
+
+  @Override
+  public Record[] call() throws Exception {
+    return new Lookup(name, type).run();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/603a5747/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
index 0022843..52e49a3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
@@ -99,9 +99,13 @@ import java.util.Properties;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -941,7 +945,7 @@ public class RegistryDNS extends AbstractService implements DNSOperations,
    * @param port    local port.
    * @throws IOException if the UDP processing fails.
    */
-  private void serveNIOUDP(DatagramChannel channel,
+  private synchronized void serveNIOUDP(DatagramChannel channel,
       InetAddress addr, int port) throws Exception {
     SocketAddress remoteAddress = null;
     try {
@@ -1177,13 +1181,20 @@ public class RegistryDNS extends AbstractService implements DNSOperations,
    * @return DNS records
    */
   protected Record[] getRecords(Name name, int type) {
+    Record[] result = null;
+    ExecutorService executor = Executors.newSingleThreadExecutor();
+    Future<Record[]> future = executor.submit(new LookupTask(name, type));
     try {
-      return new Lookup(name, type).run();
-    } catch (NullPointerException |
+      result = future.get(1500, TimeUnit.MILLISECONDS);
+      return result;
+    } catch (InterruptedException | ExecutionException |
+        TimeoutException | NullPointerException |
         ExceptionInInitializerError e) {
-      LOG.error("Fail to lookup: " + name, e);
+      LOG.warn("Failed to lookup: {} type: {}", name, Type.string(type), e);
+      return result;
+    } finally {
+      executor.shutdown();
     }
-    return null;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/603a5747/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
index 969faf9..a0c4ca3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java
@@ -697,6 +697,14 @@ public class TestRegistryDNS extends Assert {
     assertTrue("not an ARecord", recs[0] instanceof ARecord);
     assertTrue("not an ARecord", recs[1] instanceof ARecord);
   }
+
+  @Test(timeout=5000)
+  public void testUpstreamFault() throws Exception {
+    Name name = Name.fromString("19.0.17.172.in-addr.arpa.");
+    Record[] recs = getRegistryDNS().getRecords(name, Type.CNAME);
+    assertNull("Record is not null", recs);
+  }
+
   public RegistryDNS getRegistryDNS() {
     return registryDNS;
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org