You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-issues@hadoop.apache.org by GitBox <gi...@apache.org> on 2022/01/12 16:54:20 UTC

[GitHub] [hadoop] GauthamBanasandra commented on a change in pull request #3779: YARN-11015. Decouple queue capacity with ability to run OPPORTUNISTIC container

GauthamBanasandra commented on a change in pull request #3779:
URL: https://github.com/apache/hadoop/pull/3779#discussion_r783172687



##########
File path: hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerSchedulerTest.java
##########
@@ -0,0 +1,202 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager;
+
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ConfigurationException;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.security.NMTokenIdentifier;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerStateTransitionListener;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitor;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler.TestContainerSchedulerQueuing;
+import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import static org.mockito.Mockito.spy;
+
+public class BaseContainerSchedulerTest extends BaseContainerManagerTest {

Review comment:
       Please document what this class is for.

##########
File path: hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/OpportunisticContainersQueuePolicy.java
##########
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler;
+
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+
+/**
+ * Determines how to schedule opportunistic containers at the NodeManager,
+ * i.e., whether or not to accept, queue, or reject a container run request.
+ */
+public enum OpportunisticContainersQueuePolicy {
+  /**
+   * Determines whether or not to run a container by the queue capacity:
+   * {@link YarnConfiguration#NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH}.
+   * If there's enough capacity in the queue,
+   * queues the container, otherwise rejects it.
+   */
+  BY_QUEUE_LEN,
+  /**
+   * Determines wheether or not to run a container based on the amount of

Review comment:
       Typo "wheether"

##########
File path: hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerOppContainersByResources.java
##########
@@ -0,0 +1,260 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler;
+
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ContainerSubState;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerSchedulerTest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Tests the behavior of {@link ContainerScheduler} when the max queue length
+ * is set to {@literal < 0} such that the NM only queues
+ * containers if there's enough resources on the node to start
+ * all queued containers.
+ */
+public class TestContainerSchedulerOppContainersByResources
+    extends BaseContainerSchedulerTest {
+  public TestContainerSchedulerOppContainersByResources()
+      throws UnsupportedFileSystemException {
+  }
+
+  @Override
+  public void setup() throws IOException {
+    conf.set(YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_QUEUE_POLICY,
+        OpportunisticContainersQueuePolicy.BY_RESOURCES.name());
+    super.setup();
+  }
+
+  private static boolean isSuccessfulRun(final ContainerStatus containerStatus) {
+    final org.apache.hadoop.yarn.api.records.ContainerState state =
+        containerStatus.getState();
+    final ContainerSubState subState = containerStatus.getContainerSubState();
+    switch (subState) {
+    case RUNNING:
+    case COMPLETING:
+    case DONE:
+      if (subState == ContainerSubState.DONE) {
+        return state ==
+            org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE;
+      }
+
+      return true;
+    default:
+      return false;

Review comment:
       It would be better to throw a "Not yet implemented" exception here.

##########
File path: hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerOppContainersByResources.java
##########
@@ -0,0 +1,260 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler;
+
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ContainerSubState;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerSchedulerTest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Tests the behavior of {@link ContainerScheduler} when the max queue length
+ * is set to {@literal < 0} such that the NM only queues
+ * containers if there's enough resources on the node to start
+ * all queued containers.
+ */
+public class TestContainerSchedulerOppContainersByResources
+    extends BaseContainerSchedulerTest {
+  public TestContainerSchedulerOppContainersByResources()
+      throws UnsupportedFileSystemException {
+  }
+
+  @Override
+  public void setup() throws IOException {
+    conf.set(YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_QUEUE_POLICY,
+        OpportunisticContainersQueuePolicy.BY_RESOURCES.name());
+    super.setup();
+  }
+
+  private static boolean isSuccessfulRun(final ContainerStatus containerStatus) {
+    final org.apache.hadoop.yarn.api.records.ContainerState state =
+        containerStatus.getState();
+    final ContainerSubState subState = containerStatus.getContainerSubState();
+    switch (subState) {
+    case RUNNING:
+    case COMPLETING:
+    case DONE:
+      if (subState == ContainerSubState.DONE) {
+        return state ==
+            org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE;
+      }
+
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  private void verifyRunAndKilledContainers(
+      final List<ContainerId> statList,
+      final int numExpectedContainers, final Set<ContainerId> runContainers,
+      final Set<ContainerId> killedContainers)
+      throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(
+        () -> {
+          GetContainerStatusesRequest statRequest =
+              GetContainerStatusesRequest.newInstance(statList);
+          final List<ContainerStatus> containerStatuses;
+          try {
+            containerStatuses = containerManager
+                .getContainerStatuses(statRequest).getContainerStatuses();
+          } catch (final Exception e) {
+            return false;
+          }
+
+          if (numExpectedContainers != containerStatuses.size()) {
+            return false;
+          }
+
+          for (final ContainerStatus status : containerStatuses) {
+            if (runContainers.contains(status.getContainerId())) {
+              if (!isSuccessfulRun(status)) {
+                return false;
+              }
+            } else if (killedContainers.contains(status.getContainerId())) {
+              if (!status.getDiagnostics()
+                  .contains("Opportunistic container queue is full")) {
+                return false;
+              }
+            } else {
+              return false;
+            }
+          }
+
+          return true;
+        }, 1000, 10000);
+  }
+
+  /**
+   * Tests that newly arrived containers after the resources are filled up
+   * get killed and never get killed.
+   */
+  @Test
+  public void testOpportunisticRunsWhenResourcesAvailable() throws Exception {
+    containerManager.start();

Review comment:
       This is repeated in both the tests and can be moved to @Before (the test setup method in junit).

##########
File path: hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerOppContainersByResources.java
##########
@@ -0,0 +1,260 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler;
+
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ContainerSubState;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerSchedulerTest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Tests the behavior of {@link ContainerScheduler} when the max queue length
+ * is set to {@literal < 0} such that the NM only queues
+ * containers if there's enough resources on the node to start
+ * all queued containers.
+ */
+public class TestContainerSchedulerOppContainersByResources
+    extends BaseContainerSchedulerTest {
+  public TestContainerSchedulerOppContainersByResources()
+      throws UnsupportedFileSystemException {
+  }
+
+  @Override
+  public void setup() throws IOException {
+    conf.set(YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_QUEUE_POLICY,
+        OpportunisticContainersQueuePolicy.BY_RESOURCES.name());
+    super.setup();
+  }
+
+  private static boolean isSuccessfulRun(final ContainerStatus containerStatus) {
+    final org.apache.hadoop.yarn.api.records.ContainerState state =
+        containerStatus.getState();
+    final ContainerSubState subState = containerStatus.getContainerSubState();
+    switch (subState) {
+    case RUNNING:
+    case COMPLETING:
+    case DONE:
+      if (subState == ContainerSubState.DONE) {
+        return state ==
+            org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE;
+      }

Review comment:
       The `if` check here is redundant, since the condition is already satisfied by `switch-case`. You can just have
   ```java
   case DONE:
     return state == org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE;
   ```

##########
File path: hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerOppContainersByResources.java
##########
@@ -0,0 +1,260 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler;
+
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ContainerSubState;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerSchedulerTest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Tests the behavior of {@link ContainerScheduler} when the max queue length
+ * is set to {@literal < 0} such that the NM only queues
+ * containers if there's enough resources on the node to start
+ * all queued containers.
+ */
+public class TestContainerSchedulerOppContainersByResources
+    extends BaseContainerSchedulerTest {
+  public TestContainerSchedulerOppContainersByResources()
+      throws UnsupportedFileSystemException {
+  }
+
+  @Override
+  public void setup() throws IOException {
+    conf.set(YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_QUEUE_POLICY,
+        OpportunisticContainersQueuePolicy.BY_RESOURCES.name());
+    super.setup();
+  }
+
+  private static boolean isSuccessfulRun(final ContainerStatus containerStatus) {
+    final org.apache.hadoop.yarn.api.records.ContainerState state =
+        containerStatus.getState();
+    final ContainerSubState subState = containerStatus.getContainerSubState();
+    switch (subState) {
+    case RUNNING:
+    case COMPLETING:
+    case DONE:
+      if (subState == ContainerSubState.DONE) {
+        return state ==
+            org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE;
+      }
+
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  private void verifyRunAndKilledContainers(
+      final List<ContainerId> statList,
+      final int numExpectedContainers, final Set<ContainerId> runContainers,
+      final Set<ContainerId> killedContainers)
+      throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(
+        () -> {
+          GetContainerStatusesRequest statRequest =
+              GetContainerStatusesRequest.newInstance(statList);
+          final List<ContainerStatus> containerStatuses;
+          try {
+            containerStatuses = containerManager
+                .getContainerStatuses(statRequest).getContainerStatuses();
+          } catch (final Exception e) {
+            return false;
+          }
+
+          if (numExpectedContainers != containerStatuses.size()) {
+            return false;
+          }
+
+          for (final ContainerStatus status : containerStatuses) {
+            if (runContainers.contains(status.getContainerId())) {
+              if (!isSuccessfulRun(status)) {
+                return false;
+              }
+            } else if (killedContainers.contains(status.getContainerId())) {
+              if (!status.getDiagnostics()
+                  .contains("Opportunistic container queue is full")) {
+                return false;
+              }
+            } else {
+              return false;
+            }
+          }
+
+          return true;
+        }, 1000, 10000);
+  }
+
+  /**
+   * Tests that newly arrived containers after the resources are filled up
+   * get killed and never get killed.
+   */
+  @Test
+  public void testOpportunisticRunsWhenResourcesAvailable() throws Exception {
+    containerManager.start();
+    List<StartContainerRequest> list = new ArrayList<>();

Review comment:
       Please rename the variable with a more specific one. The name `list` is quite generic.

##########
File path: hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerOppContainersByResources.java
##########
@@ -0,0 +1,260 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler;
+
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ContainerSubState;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerSchedulerTest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Tests the behavior of {@link ContainerScheduler} when the max queue length
+ * is set to {@literal < 0} such that the NM only queues
+ * containers if there's enough resources on the node to start
+ * all queued containers.
+ */
+public class TestContainerSchedulerOppContainersByResources
+    extends BaseContainerSchedulerTest {
+  public TestContainerSchedulerOppContainersByResources()
+      throws UnsupportedFileSystemException {
+  }
+
+  @Override
+  public void setup() throws IOException {
+    conf.set(YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_QUEUE_POLICY,
+        OpportunisticContainersQueuePolicy.BY_RESOURCES.name());
+    super.setup();
+  }
+
+  private static boolean isSuccessfulRun(final ContainerStatus containerStatus) {
+    final org.apache.hadoop.yarn.api.records.ContainerState state =
+        containerStatus.getState();
+    final ContainerSubState subState = containerStatus.getContainerSubState();
+    switch (subState) {
+    case RUNNING:
+    case COMPLETING:
+    case DONE:
+      if (subState == ContainerSubState.DONE) {
+        return state ==
+            org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE;
+      }
+
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  private void verifyRunAndKilledContainers(
+      final List<ContainerId> statList,
+      final int numExpectedContainers, final Set<ContainerId> runContainers,
+      final Set<ContainerId> killedContainers)
+      throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(
+        () -> {
+          GetContainerStatusesRequest statRequest =
+              GetContainerStatusesRequest.newInstance(statList);
+          final List<ContainerStatus> containerStatuses;
+          try {
+            containerStatuses = containerManager
+                .getContainerStatuses(statRequest).getContainerStatuses();
+          } catch (final Exception e) {
+            return false;
+          }
+
+          if (numExpectedContainers != containerStatuses.size()) {
+            return false;
+          }
+
+          for (final ContainerStatus status : containerStatuses) {
+            if (runContainers.contains(status.getContainerId())) {
+              if (!isSuccessfulRun(status)) {
+                return false;
+              }
+            } else if (killedContainers.contains(status.getContainerId())) {
+              if (!status.getDiagnostics()
+                  .contains("Opportunistic container queue is full")) {
+                return false;
+              }
+            } else {
+              return false;
+            }
+          }
+
+          return true;
+        }, 1000, 10000);
+  }
+
+  /**
+   * Tests that newly arrived containers after the resources are filled up
+   * get killed and never get killed.

Review comment:
       Could you please the part `get killed and never get killed` a little more?

##########
File path: hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerOppContainersByResources.java
##########
@@ -0,0 +1,260 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler;
+
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ContainerSubState;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerSchedulerTest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Tests the behavior of {@link ContainerScheduler} when the max queue length
+ * is set to {@literal < 0} such that the NM only queues
+ * containers if there's enough resources on the node to start
+ * all queued containers.
+ */
+public class TestContainerSchedulerOppContainersByResources
+    extends BaseContainerSchedulerTest {
+  public TestContainerSchedulerOppContainersByResources()
+      throws UnsupportedFileSystemException {
+  }
+
+  @Override
+  public void setup() throws IOException {
+    conf.set(YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_QUEUE_POLICY,
+        OpportunisticContainersQueuePolicy.BY_RESOURCES.name());
+    super.setup();
+  }
+
+  private static boolean isSuccessfulRun(final ContainerStatus containerStatus) {
+    final org.apache.hadoop.yarn.api.records.ContainerState state =
+        containerStatus.getState();
+    final ContainerSubState subState = containerStatus.getContainerSubState();
+    switch (subState) {
+    case RUNNING:
+    case COMPLETING:
+    case DONE:
+      if (subState == ContainerSubState.DONE) {
+        return state ==
+            org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE;
+      }
+
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  private void verifyRunAndKilledContainers(
+      final List<ContainerId> statList,
+      final int numExpectedContainers, final Set<ContainerId> runContainers,
+      final Set<ContainerId> killedContainers)
+      throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(
+        () -> {
+          GetContainerStatusesRequest statRequest =
+              GetContainerStatusesRequest.newInstance(statList);
+          final List<ContainerStatus> containerStatuses;
+          try {
+            containerStatuses = containerManager
+                .getContainerStatuses(statRequest).getContainerStatuses();
+          } catch (final Exception e) {
+            return false;

Review comment:
       I just saw that this lambda get invoked periodically. Please ignore the above comment if the exception is expected to be thrown frequently, in the interest of not flooding the logs.

##########
File path: hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerOppContainersByResources.java
##########
@@ -0,0 +1,260 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler;
+
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ContainerSubState;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerSchedulerTest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Tests the behavior of {@link ContainerScheduler} when the max queue length
+ * is set to {@literal < 0} such that the NM only queues
+ * containers if there's enough resources on the node to start
+ * all queued containers.
+ */
+public class TestContainerSchedulerOppContainersByResources
+    extends BaseContainerSchedulerTest {
+  public TestContainerSchedulerOppContainersByResources()
+      throws UnsupportedFileSystemException {
+  }
+
+  @Override
+  public void setup() throws IOException {
+    conf.set(YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_QUEUE_POLICY,
+        OpportunisticContainersQueuePolicy.BY_RESOURCES.name());
+    super.setup();
+  }
+
+  private static boolean isSuccessfulRun(final ContainerStatus containerStatus) {
+    final org.apache.hadoop.yarn.api.records.ContainerState state =
+        containerStatus.getState();
+    final ContainerSubState subState = containerStatus.getContainerSubState();
+    switch (subState) {
+    case RUNNING:
+    case COMPLETING:
+    case DONE:
+      if (subState == ContainerSubState.DONE) {
+        return state ==
+            org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE;
+      }
+
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  private void verifyRunAndKilledContainers(
+      final List<ContainerId> statList,
+      final int numExpectedContainers, final Set<ContainerId> runContainers,
+      final Set<ContainerId> killedContainers)
+      throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(
+        () -> {
+          GetContainerStatusesRequest statRequest =
+              GetContainerStatusesRequest.newInstance(statList);
+          final List<ContainerStatus> containerStatuses;
+          try {
+            containerStatuses = containerManager
+                .getContainerStatuses(statRequest).getContainerStatuses();
+          } catch (final Exception e) {
+            return false;
+          }
+
+          if (numExpectedContainers != containerStatuses.size()) {
+            return false;
+          }
+
+          for (final ContainerStatus status : containerStatuses) {
+            if (runContainers.contains(status.getContainerId())) {
+              if (!isSuccessfulRun(status)) {
+                return false;
+              }
+            } else if (killedContainers.contains(status.getContainerId())) {
+              if (!status.getDiagnostics()
+                  .contains("Opportunistic container queue is full")) {
+                return false;
+              }
+            } else {
+              return false;
+            }
+          }
+
+          return true;
+        }, 1000, 10000);
+  }
+
+  /**
+   * Tests that newly arrived containers after the resources are filled up
+   * get killed and never get killed.
+   */
+  @Test
+  public void testOpportunisticRunsWhenResourcesAvailable() throws Exception {
+    containerManager.start();
+    List<StartContainerRequest> list = new ArrayList<>();
+    final int numContainers = 8;
+    final int numContainersQueued = 4;
+    final Set<ContainerId> runContainers = new HashSet<>();
+    final Set<ContainerId> killedContainers = new HashSet<>();
+
+    for (int i = 0; i < numContainers; i++) {
+      // OContainers that should be run
+      list.add(StartContainerRequest.newInstance(
+          recordFactory.newRecordInstance(ContainerLaunchContext.class),
+          createContainerToken(createContainerId(i), DUMMY_RM_IDENTIFIER,
+              context.getNodeId(),
+              user, BuilderUtils.newResource(512, 1),
+              context.getContainerTokenSecretManager(), null,
+              ExecutionType.OPPORTUNISTIC)));
+    }
+
+    StartContainersRequest allRequests =
+        StartContainersRequest.newInstance(list);
+    containerManager.startContainers(allRequests);
+
+    // Wait for containers to start
+    for (int i = 0; i < numContainersQueued; i++) {
+      final ContainerId containerId = createContainerId(i);
+      BaseContainerManagerTest
+          .waitForNMContainerState(containerManager, containerId,
+              ContainerState.RUNNING, 40);
+      runContainers.add(containerId);
+    }
+
+    // Wait for containers to be killed
+    for (int i = numContainersQueued; i < numContainers; i++) {
+      final ContainerId containerId = createContainerId(i);
+      BaseContainerManagerTest
+          .waitForNMContainerState(containerManager, createContainerId(i),
+              ContainerState.DONE, 40);
+      killedContainers.add(containerId);
+    }
+
+    Thread.sleep(5000);
+
+    // Get container statuses.
+    List<ContainerId> statList = new ArrayList<>();
+    for (int i = 0; i < numContainers; i++) {
+      statList.add(createContainerId(i));
+    }
+
+    verifyRunAndKilledContainers(
+        statList, numContainers, runContainers, killedContainers);
+
+    ContainerScheduler containerScheduler =
+        containerManager.getContainerScheduler();
+    Assert.assertEquals(0,
+        containerScheduler.getNumQueuedContainers());
+    Assert.assertEquals(0,
+        containerScheduler.getNumQueuedGuaranteedContainers());
+    Assert.assertEquals(0,
+        containerScheduler.getNumQueuedOpportunisticContainers());
+    Assert.assertEquals(0,
+        metrics.getQueuedOpportunisticContainers());
+    Assert.assertEquals(0, metrics.getQueuedGuaranteedContainers());

Review comment:
       This is repeated in both the tests and can be moved to @After (the test tear down method in junit).

##########
File path: hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerOppContainersByResources.java
##########
@@ -0,0 +1,260 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler;
+
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ContainerSubState;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerSchedulerTest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Tests the behavior of {@link ContainerScheduler} when the max queue length
+ * is set to {@literal < 0} such that the NM only queues
+ * containers if there's enough resources on the node to start
+ * all queued containers.
+ */
+public class TestContainerSchedulerOppContainersByResources
+    extends BaseContainerSchedulerTest {
+  public TestContainerSchedulerOppContainersByResources()
+      throws UnsupportedFileSystemException {
+  }
+
+  @Override
+  public void setup() throws IOException {
+    conf.set(YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_QUEUE_POLICY,
+        OpportunisticContainersQueuePolicy.BY_RESOURCES.name());
+    super.setup();
+  }
+
+  private static boolean isSuccessfulRun(final ContainerStatus containerStatus) {
+    final org.apache.hadoop.yarn.api.records.ContainerState state =
+        containerStatus.getState();
+    final ContainerSubState subState = containerStatus.getContainerSubState();
+    switch (subState) {
+    case RUNNING:
+    case COMPLETING:
+    case DONE:
+      if (subState == ContainerSubState.DONE) {
+        return state ==
+            org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE;
+      }
+
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  private void verifyRunAndKilledContainers(
+      final List<ContainerId> statList,
+      final int numExpectedContainers, final Set<ContainerId> runContainers,
+      final Set<ContainerId> killedContainers)
+      throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(
+        () -> {
+          GetContainerStatusesRequest statRequest =
+              GetContainerStatusesRequest.newInstance(statList);
+          final List<ContainerStatus> containerStatuses;
+          try {
+            containerStatuses = containerManager
+                .getContainerStatuses(statRequest).getContainerStatuses();
+          } catch (final Exception e) {
+            return false;

Review comment:
       It would good to log the exception here.

##########
File path: hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerOppContainersByResources.java
##########
@@ -0,0 +1,260 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler;
+
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ContainerSubState;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerSchedulerTest;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Tests the behavior of {@link ContainerScheduler} when the max queue length
+ * is set to {@literal < 0} such that the NM only queues
+ * containers if there's enough resources on the node to start
+ * all queued containers.
+ */
+public class TestContainerSchedulerOppContainersByResources
+    extends BaseContainerSchedulerTest {
+  public TestContainerSchedulerOppContainersByResources()
+      throws UnsupportedFileSystemException {
+  }
+
+  @Override
+  public void setup() throws IOException {
+    conf.set(YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_QUEUE_POLICY,
+        OpportunisticContainersQueuePolicy.BY_RESOURCES.name());
+    super.setup();
+  }
+
+  private static boolean isSuccessfulRun(final ContainerStatus containerStatus) {
+    final org.apache.hadoop.yarn.api.records.ContainerState state =
+        containerStatus.getState();
+    final ContainerSubState subState = containerStatus.getContainerSubState();
+    switch (subState) {
+    case RUNNING:
+    case COMPLETING:
+    case DONE:
+      if (subState == ContainerSubState.DONE) {
+        return state ==
+            org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE;
+      }
+
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  private void verifyRunAndKilledContainers(
+      final List<ContainerId> statList,
+      final int numExpectedContainers, final Set<ContainerId> runContainers,
+      final Set<ContainerId> killedContainers)
+      throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(
+        () -> {
+          GetContainerStatusesRequest statRequest =
+              GetContainerStatusesRequest.newInstance(statList);
+          final List<ContainerStatus> containerStatuses;
+          try {
+            containerStatuses = containerManager
+                .getContainerStatuses(statRequest).getContainerStatuses();
+          } catch (final Exception e) {
+            return false;
+          }
+
+          if (numExpectedContainers != containerStatuses.size()) {
+            return false;
+          }
+
+          for (final ContainerStatus status : containerStatuses) {
+            if (runContainers.contains(status.getContainerId())) {
+              if (!isSuccessfulRun(status)) {
+                return false;
+              }
+            } else if (killedContainers.contains(status.getContainerId())) {
+              if (!status.getDiagnostics()
+                  .contains("Opportunistic container queue is full")) {
+                return false;
+              }
+            } else {
+              return false;
+            }
+          }
+
+          return true;
+        }, 1000, 10000);
+  }
+
+  /**
+   * Tests that newly arrived containers after the resources are filled up
+   * get killed and never get killed.
+   */
+  @Test
+  public void testOpportunisticRunsWhenResourcesAvailable() throws Exception {
+    containerManager.start();
+    List<StartContainerRequest> list = new ArrayList<>();
+    final int numContainers = 8;
+    final int numContainersQueued = 4;
+    final Set<ContainerId> runContainers = new HashSet<>();
+    final Set<ContainerId> killedContainers = new HashSet<>();
+
+    for (int i = 0; i < numContainers; i++) {
+      // OContainers that should be run
+      list.add(StartContainerRequest.newInstance(
+          recordFactory.newRecordInstance(ContainerLaunchContext.class),
+          createContainerToken(createContainerId(i), DUMMY_RM_IDENTIFIER,
+              context.getNodeId(),
+              user, BuilderUtils.newResource(512, 1),
+              context.getContainerTokenSecretManager(), null,
+              ExecutionType.OPPORTUNISTIC)));
+    }
+
+    StartContainersRequest allRequests =
+        StartContainersRequest.newInstance(list);
+    containerManager.startContainers(allRequests);
+
+    // Wait for containers to start
+    for (int i = 0; i < numContainersQueued; i++) {
+      final ContainerId containerId = createContainerId(i);
+      BaseContainerManagerTest
+          .waitForNMContainerState(containerManager, containerId,
+              ContainerState.RUNNING, 40);
+      runContainers.add(containerId);
+    }
+
+    // Wait for containers to be killed
+    for (int i = numContainersQueued; i < numContainers; i++) {
+      final ContainerId containerId = createContainerId(i);
+      BaseContainerManagerTest
+          .waitForNMContainerState(containerManager, createContainerId(i),
+              ContainerState.DONE, 40);
+      killedContainers.add(containerId);
+    }
+
+    Thread.sleep(5000);
+
+    // Get container statuses.
+    List<ContainerId> statList = new ArrayList<>();
+    for (int i = 0; i < numContainers; i++) {
+      statList.add(createContainerId(i));
+    }
+
+    verifyRunAndKilledContainers(
+        statList, numContainers, runContainers, killedContainers);
+
+    ContainerScheduler containerScheduler =
+        containerManager.getContainerScheduler();
+    Assert.assertEquals(0,
+        containerScheduler.getNumQueuedContainers());
+    Assert.assertEquals(0,
+        containerScheduler.getNumQueuedGuaranteedContainers());
+    Assert.assertEquals(0,
+        containerScheduler.getNumQueuedOpportunisticContainers());
+    Assert.assertEquals(0,
+        metrics.getQueuedOpportunisticContainers());
+    Assert.assertEquals(0, metrics.getQueuedGuaranteedContainers());
+  }
+
+  /**
+   * Sets the max queue length to negative such that the NM only queues
+   * containers if there's enough resources on the node to start
+   * all queued containers.
+   * Tests that newly arrived containers after the resources are filled up
+   * get killed and never get killed.
+   */
+  @Test
+  public void testKillOpportunisticWhenNoResourcesAvailable() throws Exception {
+    containerManager.start();
+    List<StartContainerRequest> list = new ArrayList<>();

Review comment:
       Please use a more specific variable name here.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: common-issues-unsubscribe@hadoop.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-issues-help@hadoop.apache.org