You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mesos.apache.org by ch...@apache.org on 2018/06/15 23:33:10 UTC

mesos git commit: Fixed the flakiness in the `NVIDIA_GPU_NvidiaDockerImage` test.

Repository: mesos
Updated Branches:
  refs/heads/master 594ddb30c -> 9da882d4d


Fixed the flakiness in the `NVIDIA_GPU_NvidiaDockerImage` test.

This test is flaky because it tries to download the 1GB 'nvidia/cuda'
image from Docker Hub, which might take more than 1 minute and cause
the command executor unable to register in time.

This patch fixes this problem by using the default executor, which does
not wait for fetching task images before registration. If the image
fetch stalls more than 1 minute, the container will fail because of the
`--fetcher_stall_timeout` agent flag.

The time we wait for `TASK_FINISHED` is also extended to 180 seconds.

Review: https://reviews.apache.org/r/67596


Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/9da882d4
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/9da882d4
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/9da882d4

Branch: refs/heads/master
Commit: 9da882d4dac21e932ffa75a59362e615d9c43445
Parents: 594ddb3
Author: Chun-Hung Hsiao <ch...@mesosphere.io>
Authored: Tue Jun 12 10:49:24 2018 -0700
Committer: Chun-Hung Hsiao <ch...@mesosphere.io>
Committed: Fri Jun 15 16:32:27 2018 -0700

----------------------------------------------------------------------
 .../containerizer/nvidia_gpu_isolator_tests.cpp | 185 +++++++++++--------
 1 file changed, 105 insertions(+), 80 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/mesos/blob/9da882d4/src/tests/containerizer/nvidia_gpu_isolator_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/containerizer/nvidia_gpu_isolator_tests.cpp b/src/tests/containerizer/nvidia_gpu_isolator_tests.cpp
index d8c3e6d..040453e 100644
--- a/src/tests/containerizer/nvidia_gpu_isolator_tests.cpp
+++ b/src/tests/containerizer/nvidia_gpu_isolator_tests.cpp
@@ -41,6 +41,8 @@
 #include "slave/containerizer/containerizer.hpp"
 #include "slave/containerizer/fetcher.hpp"
 
+#include "slave/containerizer/mesos/containerizer.hpp"
+
 #include "slave/containerizer/mesos/isolators/gpu/nvidia.hpp"
 
 #include "tests/mesos.hpp"
@@ -66,6 +68,9 @@ using std::string;
 using std::vector;
 
 using testing::_;
+using testing::AllOf;
+using testing::AtMost;
+using testing::DoAll;
 using testing::Eq;
 using testing::Return;
 
@@ -73,7 +78,7 @@ namespace mesos {
 namespace internal {
 namespace tests {
 
-class NvidiaGpuTest : public MesosTest {};
+class NvidiaGpuTest : public ContainerizerTest<slave::MesosContainerizer> {};
 
 
 // This test verifies that we are able to enable the Nvidia GPU
@@ -208,102 +213,122 @@ TEST_F(NvidiaGpuTest, ROOT_INTERNET_CURL_CGROUPS_NVIDIA_GPU_NvidiaDockerImage)
   Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
   ASSERT_SOME(slave);
 
-  MockScheduler sched;
+  // NOTE: We use the default executor (and thus v1 API) in this test to avoid
+  // executor registration timing out due to fetching the 'nvidia/cuda' image
+  // over a slow connection.
+  auto scheduler = std::make_shared<v1::MockHTTPScheduler>();
 
-  FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
+  v1::FrameworkInfo frameworkInfo = v1::DEFAULT_FRAMEWORK_INFO;
   frameworkInfo.add_capabilities()->set_type(
-      FrameworkInfo::Capability::GPU_RESOURCES);
+      v1::FrameworkInfo::Capability::GPU_RESOURCES);
 
-  MesosSchedulerDriver driver(
-      &sched, frameworkInfo, master.get()->pid, DEFAULT_CREDENTIAL);
+  EXPECT_CALL(*scheduler, connected(_))
+    .WillOnce(v1::scheduler::SendSubscribe(frameworkInfo));
 
-  Future<Nothing> schedRegistered;
-  EXPECT_CALL(sched, registered(_, _, _))
-    .WillOnce(FutureSatisfy(&schedRegistered));
-
-  Future<vector<Offer>> offers1, offers2;
-  EXPECT_CALL(sched, resourceOffers(_, _))
-    .WillOnce(FutureArg<1>(&offers1))
-    .WillOnce(FutureArg<1>(&offers2))
-    .WillRepeatedly(Return());      // Ignore subsequent offers.
+  Future<v1::scheduler::Event::Subscribed> subscribed;
+  EXPECT_CALL(*scheduler, subscribed(_, _))
+    .WillOnce(FutureArg<1>(&subscribed));
 
-  driver.start();
+  Future<v1::scheduler::Event::Offers> offers;
+  EXPECT_CALL(*scheduler, offers(_, _))
+    .WillOnce(FutureArg<1>(&offers))
+    .WillRepeatedly(Return()); // Ignore subsequent offers.
 
-  AWAIT_READY(schedRegistered);
+  EXPECT_CALL(*scheduler, heartbeat(_))
+    .WillRepeatedly(Return()); // Ignore heartbeats.
 
-  Image image;
-  image.set_type(Image::DOCKER);
-  image.mutable_docker()->set_name("nvidia/cuda");
+  EXPECT_CALL(*scheduler, failure(_, _))
+    .Times(AtMost(2));
 
-  // Launch a task requesting 1 GPU and verify
-  // that `nvidia-smi` lists exactly one GPU.
-  AWAIT_READY(offers1);
-  ASSERT_EQ(1u, offers1->size());
+  v1::scheduler::TestMesos mesos(
+    master.get()->pid,
+    ContentType::PROTOBUF,
+    scheduler);
 
-  TaskInfo task1 = createTask(
-      offers1->at(0).slave_id(),
-      Resources::parse("cpus:1;mem:128;gpus:1").get(),
-      "NUM_GPUS=`nvidia-smi --list-gpus | wc -l`;\n"
-      "if [ \"$NUM_GPUS\" != \"1\" ]; then\n"
-      "  exit 1;\n"
-      "fi");
+  AWAIT_READY(subscribed);
 
-  ContainerInfo* container = task1.mutable_container();
-  container->set_type(ContainerInfo::MESOS);
-  container->mutable_mesos()->mutable_image()->CopyFrom(image);
+  const v1::FrameworkID& frameworkId = subscribed->framework_id();
 
-  Future<TaskStatus> statusStarting1, statusRunning1, statusFinished1;
-  EXPECT_CALL(sched, statusUpdate(_, _))
-    .WillOnce(FutureArg<1>(&statusStarting1))
-    .WillOnce(FutureArg<1>(&statusRunning1))
-    .WillOnce(FutureArg<1>(&statusFinished1));
-
-  driver.launchTasks(offers1->at(0).id(), {task1});
-
-  // We wait wait up to 120 seconds
-  // to download the docker image.
-  AWAIT_READY_FOR(statusStarting1, Seconds(120));
-  ASSERT_EQ(TASK_STARTING, statusStarting1->state());
+  AWAIT_READY(offers);
+  ASSERT_FALSE(offers->offers().empty());
 
-  AWAIT_READY_FOR(statusRunning1, Seconds(120));
-  ASSERT_EQ(TASK_RUNNING, statusRunning1->state());
+  const v1::AgentID& agentId = offers->offers(0).agent_id();
 
-  AWAIT_READY(statusFinished1);
-  ASSERT_EQ(TASK_FINISHED, statusFinished1->state());
+  mesos::v1::Image image;
+  image.set_type(mesos::v1::Image::DOCKER);
+  image.mutable_docker()->set_name("nvidia/cuda");
 
-  // Launch a task requesting no GPUs and
-  // verify that running `nvidia-smi` fails.
-  AWAIT_READY(offers2);
-  EXPECT_EQ(1u, offers2->size());
+  // Launch a task requesting 1 GPU and verify that `nvidia-smi` lists exactly
+  // one GPU.
+  v1::ExecutorInfo executor1 = v1::createExecutorInfo(
+      id::UUID::random().toString(),
+      None(),
+      "cpus:0.1;mem:32;disk:32",
+      v1::ExecutorInfo::DEFAULT,
+      frameworkId);
+
+  v1::TaskInfo task1 = v1::createTask(
+      agentId,
+      v1::Resources::parse("cpus:0.1;mem:32;gpus:1").get(),
+      "NUM_GPUS=`nvidia-smi --list-gpus | wc -l`;\n"
+      "if [ \"$NUM_GPUS\" != \"1\" ]; then\n"
+      "  exit 1;\n"
+      "fi");
 
-  TaskInfo task2 = createTask(
-      offers2->at(0).slave_id(),
-      Resources::parse("cpus:1;mem:128").get(),
+  mesos::v1::ContainerInfo* container1 = task1.mutable_container();
+  container1->set_type(mesos::v1::ContainerInfo::MESOS);
+  container1->mutable_mesos()->mutable_image()->CopyFrom(image);
+
+  // Launch a task requesting no GPU and verify that running `nvidia-smi` fails.
+  v1::ExecutorInfo executor2 = v1::createExecutorInfo(
+      id::UUID::random().toString(),
+      None(),
+      "cpus:0.1;mem:32;disk:32",
+      v1::ExecutorInfo::DEFAULT,
+      frameworkId);
+
+  v1::TaskInfo task2 = v1::createTask(
+      agentId,
+      v1::Resources::parse("cpus:0.1;mem:32").get(),
       "nvidia-smi");
 
-  container = task2.mutable_container();
-  container->set_type(ContainerInfo::MESOS);
-  container->mutable_mesos()->mutable_image()->CopyFrom(image);
-
-  Future<TaskStatus> statusStarting2, statusRunning2, statusFailed2;
-  EXPECT_CALL(sched, statusUpdate(&driver, _))
-    .WillOnce(FutureArg<1>(&statusStarting2))
-    .WillOnce(FutureArg<1>(&statusRunning2))
-    .WillOnce(FutureArg<1>(&statusFailed2));
-
-  driver.launchTasks(offers2->at(0).id(), {task2});
-
-  AWAIT_READY_FOR(statusStarting2, Seconds(120));
-  ASSERT_EQ(TASK_STARTING, statusStarting2->state());
-
-  AWAIT_READY_FOR(statusRunning2, Seconds(120));
-  ASSERT_EQ(TASK_RUNNING, statusRunning2->state());
-
-  AWAIT_READY(statusFailed2);
-  ASSERT_EQ(TASK_FAILED, statusFailed2->state());
-
-  driver.stop();
-  driver.join();
+  mesos::v1::ContainerInfo* container2 = task2.mutable_container();
+  container2->set_type(mesos::v1::ContainerInfo::MESOS);
+  container2->mutable_mesos()->mutable_image()->CopyFrom(image);
+
+  EXPECT_CALL(*scheduler, update(_, TaskStatusUpdateStateEq(v1::TASK_STARTING)))
+    .Times(2)
+    .WillRepeatedly(v1::scheduler::SendAcknowledge(frameworkId, agentId));
+
+  EXPECT_CALL(*scheduler, update(_, TaskStatusUpdateStateEq(v1::TASK_RUNNING)))
+    .Times(2)
+    .WillRepeatedly(v1::scheduler::SendAcknowledge(frameworkId, agentId));
+
+  Future<Nothing> task1Finished;
+  EXPECT_CALL(*scheduler, update(_, AllOf(
+      TaskStatusUpdateTaskIdEq(task1),
+      TaskStatusUpdateStateEq(v1::TASK_FINISHED))))
+    .WillOnce(DoAll(
+        FutureSatisfy(&task1Finished),
+        v1::scheduler::SendAcknowledge(frameworkId, agentId)));
+
+  Future<Nothing> task2Failed;
+  EXPECT_CALL(*scheduler, update(_, AllOf(
+      TaskStatusUpdateTaskIdEq(task2),
+      TaskStatusUpdateStateEq(v1::TASK_FAILED))))
+    .WillOnce(DoAll(
+        FutureSatisfy(&task2Failed),
+        v1::scheduler::SendAcknowledge(frameworkId, agentId)));
+
+  mesos.send(v1::createCallAccept(
+      frameworkId,
+      offers->offers(0),
+      {v1::LAUNCH_GROUP(executor1, v1::createTaskGroupInfo({task1})),
+       v1::LAUNCH_GROUP(executor2, v1::createTaskGroupInfo({task2}))}));
+
+  // We wait up to 180 seconds to download the docker image.
+  AWAIT_READY_FOR(task1Finished, Seconds(180));
+  AWAIT_READY(task2Failed);
 }