You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sn...@apache.org on 2021/12/14 21:01:17 UTC

[hadoop] branch trunk updated: Clean up checkstyle warnings from YARN-11024/10907/10929. Contributed by Benjamin Teke

This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 843f66f  Clean up checkstyle warnings from YARN-11024/10907/10929. Contributed by Benjamin Teke
843f66f is described below

commit 843f66f4dc012dff402bfebc183d46673cd47419
Author: Szilard Nemeth <sn...@apache.org>
AuthorDate: Tue Dec 14 22:00:43 2021 +0100

    Clean up checkstyle warnings from YARN-11024/10907/10929. Contributed by Benjamin Teke
---
 .../scheduler/capacity/AbstractCSQueue.java        |  3 +-
 .../scheduler/capacity/AbstractLeafQueue.java      | 38 +++++++++-------------
 .../GuaranteedOrZeroCapacityOverTimePolicy.java    |  1 -
 .../scheduler/capacity/TestApplicationLimits.java  | 35 +++++++++-----------
 .../capacity/TestApplicationLimitsByPartition.java |  1 -
 .../scheduler/capacity/TestReservations.java       |  1 -
 6 files changed, 33 insertions(+), 46 deletions(-)

diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 3a0e2ae..809a860 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -145,7 +145,8 @@ public abstract class AbstractCSQueue implements CSQueue {
     CSQueueMetrics metrics = old != null ?
         (CSQueueMetrics) old.getMetrics() :
         CSQueueMetrics.forQueue(getQueuePath(), parent,
-            queueContext.getConfiguration().getEnableUserMetrics(), queueContext.getConfiguration());
+            queueContext.getConfiguration().getEnableUserMetrics(),
+            queueContext.getConfiguration());
     this.usageTracker = new CSQueueUsageTracker(metrics);
 
     this.queueCapacities = new QueueCapacities(parent == null);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractLeafQueue.java
index 8b31241..e194800 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractLeafQueue.java
@@ -453,8 +453,7 @@ public class AbstractLeafQueue extends AbstractCSQueue {
   }
 
   @Override
-  public List<QueueUserACLInfo>
-  getQueueUserAclInfo(UserGroupInformation user) {
+  public List<QueueUserACLInfo> getQueueUserAclInfo(UserGroupInformation user) {
     readLock.lock();
     try {
       QueueUserACLInfo userAclInfo = recordFactory.newRecordInstance(
@@ -527,8 +526,8 @@ public class AbstractLeafQueue extends AbstractCSQueue {
   }
 
   @Override
-  public void reinitialize(CSQueue newlyParsedQueue, Resource clusterResource) throws
-      IOException {
+  public void reinitialize(CSQueue newlyParsedQueue, Resource clusterResource)
+      throws IOException {
 
     writeLock.lock();
     try {
@@ -634,7 +633,8 @@ public class AbstractLeafQueue extends AbstractCSQueue {
 
       // Check submission limits for queues
       //TODO recalculate max applications because they can depend on capacity
-      if (getNumApplications() >= getMaxApplications() && !(this instanceof AutoCreatedLeafQueue)) {
+      if (getNumApplications() >= getMaxApplications() &&
+          !(this instanceof AutoCreatedLeafQueue)) {
         String msg =
             "Queue " + getQueuePath() + " already has " + getNumApplications()
                 + " applications,"
@@ -646,7 +646,8 @@ public class AbstractLeafQueue extends AbstractCSQueue {
       // Check submission limits for the user on this queue
       User user = usersManager.getUserAndAddIfAbsent(userName);
       //TODO recalculate max applications because they can depend on capacity
-      if (user.getTotalApplications() >= getMaxApplicationsPerUser() &&  !(this instanceof AutoCreatedLeafQueue)) {
+      if (user.getTotalApplications() >= getMaxApplicationsPerUser() &&
+          !(this instanceof AutoCreatedLeafQueue)) {
         String msg = "Queue " + getQueuePath() + " already has " + user
             .getTotalApplications() + " applications from user " + userName
             + " cannot accept submission of application: " + applicationId;
@@ -825,10 +826,9 @@ public class AbstractLeafQueue extends AbstractCSQueue {
         calculateAndGetAMResourceLimitPerPartition(nodePartition);
       }
 
-      for (Iterator<FiCaSchedulerApp> fsApp =
-           getPendingAppsOrderingPolicy()
+      for (Iterator<FiCaSchedulerApp> fsApp = getPendingAppsOrderingPolicy()
                .getAssignmentIterator(IteratorSelector.EMPTY_ITERATOR_SELECTOR);
-           fsApp.hasNext(); ) {
+           fsApp.hasNext();) {
         FiCaSchedulerApp application = fsApp.next();
         ApplicationId applicationId = application.getApplicationId();
 
@@ -864,7 +864,8 @@ public class AbstractLeafQueue extends AbstractCSQueue {
                 + " skipping enforcement to allow at least one application"
                 + " to start");
           } else{
-            application.updateAMContainerDiagnostics(SchedulerApplicationAttempt.AMState.INACTIVATED,
+            application.updateAMContainerDiagnostics(
+                SchedulerApplicationAttempt.AMState.INACTIVATED,
                 CSAMContainerLaunchDiagnosticsConstants.QUEUE_AM_RESOURCE_LIMIT_EXCEED);
             LOG.debug("Not activating application {} as  amIfStarted: {}"
                 + " exceeds amLimit: {}", applicationId, amIfStarted, amLimit);
@@ -1189,9 +1190,8 @@ public class AbstractLeafQueue extends AbstractCSQueue {
     boolean needAssignToQueueCheck = true;
     IteratorSelector sel = new IteratorSelector();
     sel.setPartition(candidates.getPartition());
-    for (Iterator<FiCaSchedulerApp> assignmentIterator =
-         orderingPolicy.getAssignmentIterator(sel);
-         assignmentIterator.hasNext(); ) {
+    for (Iterator<FiCaSchedulerApp> assignmentIterator = orderingPolicy.getAssignmentIterator(sel);
+         assignmentIterator.hasNext();) {
       FiCaSchedulerApp application = assignmentIterator.next();
 
       ActivitiesLogger.APP.startAppAllocationRecording(activitiesManager,
@@ -1821,13 +1821,8 @@ public class AbstractLeafQueue extends AbstractCSQueue {
       if (null != rmContainer && rmContainer.getNodeLabelExpression().equals(
           RMNodeLabelsManager.NO_LABEL) && !nodePartition.equals(
           RMNodeLabelsManager.NO_LABEL)) {
-        TreeSet<RMContainer> rmContainers = null;
-        if (null == (rmContainers = ignorePartitionExclusivityRMContainers.get(
-            nodePartition))) {
-          rmContainers = new TreeSet<>();
-          ignorePartitionExclusivityRMContainers.put(nodePartition,
-              rmContainers);
-        }
+        TreeSet<RMContainer> rmContainers = ignorePartitionExclusivityRMContainers.computeIfAbsent(
+            nodePartition, k -> new TreeSet<>());
         rmContainers.add(rmContainer);
       }
 
@@ -2195,8 +2190,7 @@ public class AbstractLeafQueue extends AbstractCSQueue {
    * @return all ignored partition exclusivity RMContainers in the LeafQueue,
    *         this will be used by preemption policy.
    */
-  public Map<String, TreeSet<RMContainer>>
-  getIgnoreExclusivityRMContainers() {
+  public Map<String, TreeSet<RMContainer>> getIgnoreExclusivityRMContainers() {
     Map<String, TreeSet<RMContainer>> clonedMap = new HashMap<>();
 
     readLock.lock();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java
index 14d3555..3527557 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.AbstractLeafQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueueUtils;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.AbstractAutoCreatedLeafQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.AutoCreatedLeafQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.AutoCreatedLeafQueueConfig;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
index 33134ba..7cb0ccd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
@@ -275,8 +275,9 @@ public class TestApplicationLimits {
     Resource clusterResource = 
       Resources.createResource(100 * 16 * GB, 100 * 16);
 
-    CapacitySchedulerContext csContext = createCSContext(csConf, resourceCalculator, Resources.createResource(GB, 1),
-        Resources.createResource(16*GB, 16), clusterResource);
+    CapacitySchedulerContext csContext = createCSContext(csConf, resourceCalculator,
+        Resources.createResource(GB, 1), Resources.createResource(16*GB, 16),
+        clusterResource);
     CapacitySchedulerQueueManager queueManager = csContext.getCapacitySchedulerQueueManager();
     CapacitySchedulerQueueContext queueContext = new CapacitySchedulerQueueContext(csContext);
 
@@ -299,7 +300,7 @@ public class TestApplicationLimits {
     assertThat(queue.calculateAndGetAMResourceLimit()).
         isEqualTo(amResourceLimit);
     assertThat(queue.getUserAMResourceLimit()).isEqualTo(
-      Resource.newInstance(80*GB, 1));
+        Resource.newInstance(80*GB, 1));
     
     // Assert in metrics
     assertThat(queue.getMetrics().getAMResourceLimitMB()).isEqualTo(
@@ -307,10 +308,8 @@ public class TestApplicationLimits {
     assertThat(queue.getMetrics().getAMResourceLimitVCores()).isEqualTo(
         amResourceLimit.getVirtualCores());
 
-    assertEquals(
-        (int)(clusterResource.getMemorySize() * queue.getAbsoluteCapacity()),
-        queue.getMetrics().getAvailableMB()
-        );
+    assertEquals((int)(clusterResource.getMemorySize() * queue.getAbsoluteCapacity()),
+        queue.getMetrics().getAvailableMB());
     
     // Add some nodes to the cluster & test new limits
     clusterResource = Resources.createResource(120 * 16 * GB);
@@ -322,10 +321,8 @@ public class TestApplicationLimits {
     assertThat(queue.getUserAMResourceLimit()).isEqualTo(
         Resource.newInstance(96*GB, 1));
     
-    assertEquals(
-        (int)(clusterResource.getMemorySize() * queue.getAbsoluteCapacity()),
-        queue.getMetrics().getAvailableMB()
-        );
+    assertEquals((int)(clusterResource.getMemorySize() * queue.getAbsoluteCapacity()),
+        queue.getMetrics().getAvailableMB());
 
     // should return -1 if per queue setting not set
     assertEquals(
@@ -343,11 +340,10 @@ public class TestApplicationLimits {
     assertEquals(expectedMaxAppsPerUser, queue.getMaxApplicationsPerUser());
 
     // should default to global setting if per queue setting not set
-    assertEquals(
-        (long)CapacitySchedulerConfiguration.DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT, 
+    assertEquals((long)
+            CapacitySchedulerConfiguration.DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT,
         (long)csConf.getMaximumApplicationMasterResourcePerQueuePercent(
-            queue.getQueuePath())
-            );
+            queue.getQueuePath()));
 
     // Change the per-queue max AM resources percentage.
     csConf.setFloat(PREFIX + queue.getQueuePath()
@@ -365,10 +361,9 @@ public class TestApplicationLimits {
 
     queue = (LeafQueue)queues.get(A);
 
-    assertEquals((long) 0.5, 
+    assertEquals((long) 0.5,
         (long) csConf.getMaximumApplicationMasterResourcePerQueuePercent(
-          queue.getQueuePath())
-        );
+          queue.getQueuePath()));
 
     assertThat(queue.calculateAndGetAMResourceLimit()).isEqualTo(
         Resource.newInstance(800 * GB, 1));
@@ -579,8 +574,8 @@ public class TestApplicationLimits {
     // Say cluster has 100 nodes of 16G each
     Resource clusterResource = Resources.createResource(100 * 16 * GB);
 
-    CapacitySchedulerContext csContext = createCSContext(csConf, resourceCalculator, Resources.createResource(GB),
-            Resources.createResource(16*GB), clusterResource);
+    CapacitySchedulerContext csContext = createCSContext(csConf, resourceCalculator,
+        Resources.createResource(GB), Resources.createResource(16*GB), clusterResource);
     CapacitySchedulerQueueManager queueManager = csContext.getCapacitySchedulerQueueManager();
     CapacitySchedulerQueueContext queueContext = new CapacitySchedulerQueueContext(csContext);
 
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimitsByPartition.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimitsByPartition.java
index 4c2ec87..ef50e52 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimitsByPartition.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimitsByPartition.java
@@ -57,7 +57,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.AMState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
index 5662df4..53b1d16 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
@@ -58,7 +58,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.AMState;
 
 import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org