You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mesos.apache.org by vi...@apache.org on 2016/03/04 01:01:13 UTC

mesos git commit: Prevents early container destruction in MemoryPressureTests.

Repository: mesos
Updated Branches:
  refs/heads/master 0af8bd6fe -> 5387a4d8a


Prevents early container destruction in MemoryPressureTests.

Prevents the container to be reaped by pausing the clock before
killing the task, so that measurements from the containerizer can be
taken even if the executor has already exited.

Review: https://reviews.apache.org/r/44362/


Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/5387a4d8
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/5387a4d8
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/5387a4d8

Branch: refs/heads/master
Commit: 5387a4d8ac44a8ba7a7f7d62bee0f7276b82545a
Parents: 0af8bd6
Author: Alexander Rojas <al...@mesosphere.io>
Authored: Thu Mar 3 16:00:53 2016 -0800
Committer: Vinod Kone <vi...@gmail.com>
Committed: Thu Mar 3 16:00:53 2016 -0800

----------------------------------------------------------------------
 .../containerizer/memory_pressure_tests.cpp     | 53 +++++++++++---------
 1 file changed, 29 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/mesos/blob/5387a4d8/src/tests/containerizer/memory_pressure_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/containerizer/memory_pressure_tests.cpp b/src/tests/containerizer/memory_pressure_tests.cpp
index 79f1349..03879d9 100644
--- a/src/tests/containerizer/memory_pressure_tests.cpp
+++ b/src/tests/containerizer/memory_pressure_tests.cpp
@@ -87,14 +87,9 @@ TEST_F(MemoryPressureMesosTest, CGROUPS_ROOT_Statistics)
 
   ASSERT_SOME(containerizer);
 
-  Future<SlaveRegisteredMessage> registered =
-      FUTURE_PROTOBUF(SlaveRegisteredMessage(), master.get(), _);
-
   Try<PID<Slave>> slave = StartSlave(containerizer.get(), flags);
   ASSERT_SOME(slave);
 
-  AWAIT_READY(registered);
-
   MockScheduler sched;
 
   MesosSchedulerDriver driver(
@@ -159,6 +154,11 @@ TEST_F(MemoryPressureMesosTest, CGROUPS_ROOT_Statistics)
 
   EXPECT_LE(waited, Seconds(5));
 
+  // Pause the clock to ensure that the reaper doesn't reap the exited
+  // command executor and inform the containerizer/slave.
+  Clock::pause();
+  Clock::settle();
+
   // Stop the memory-hammering task.
   driver.killTask(task.task_id());
 
@@ -175,6 +175,8 @@ TEST_F(MemoryPressureMesosTest, CGROUPS_ROOT_Statistics)
   EXPECT_GE(usage.get().mem_medium_pressure_counter(),
             usage.get().mem_critical_pressure_counter());
 
+  Clock::resume();
+
   driver.stop();
   driver.join();
 
@@ -202,14 +204,9 @@ TEST_F(MemoryPressureMesosTest, CGROUPS_ROOT_SlaveRecovery)
 
   ASSERT_SOME(containerizer1);
 
-  Future<SlaveRegisteredMessage> registered =
-      FUTURE_PROTOBUF(SlaveRegisteredMessage(), master.get(), _);
-
   Try<PID<Slave>> slave = StartSlave(containerizer1.get(), flags);
   ASSERT_SOME(slave);
 
-  AWAIT_READY(registered);
-
   MockScheduler sched;
 
   // Enable checkpointing for the framework.
@@ -241,18 +238,12 @@ TEST_F(MemoryPressureMesosTest, CGROUPS_ROOT_SlaveRecovery)
       "while true; do dd count=512 bs=1M if=/dev/zero of=./temp; done");
 
   Future<TaskStatus> running;
-  Promise<TaskStatus> killed;
   EXPECT_CALL(sched, statusUpdate(&driver, _))
-    .WillOnce(FutureArg<1>(&running))
-    .WillRepeatedly(DoAll(
-        Invoke([&killed](Unused, const TaskStatus& status) {
-          // More than one TASK_RUNNING status can arrive
-          // before the TASK_KILLED does.
-          if (status.state() == TASK_KILLED) {
-            killed.set(status);
-          }
-        }),
-        Return()));
+    .WillOnce(FutureArg<1>(&running));
+
+
+  Future<Nothing> _statusUpdateAcknowledgement =
+    FUTURE_DISPATCH(_, &Slave::_statusUpdateAcknowledgement);
 
   driver.launchTasks(offers.get()[0].id(), {task});
 
@@ -260,6 +251,9 @@ TEST_F(MemoryPressureMesosTest, CGROUPS_ROOT_SlaveRecovery)
   EXPECT_EQ(task.task_id(), running.get().task_id());
   EXPECT_EQ(TASK_RUNNING, running.get().state());
 
+  // Wait for the ACK to be checkpointed.
+  AWAIT_READY(_statusUpdateAcknowledgement);
+
   // We restart the slave to let it recover.
   Stop(slave.get());
   delete containerizer1.get();
@@ -310,12 +304,21 @@ TEST_F(MemoryPressureMesosTest, CGROUPS_ROOT_SlaveRecovery)
 
   EXPECT_LE(waited, Seconds(5));
 
+  // Pause the clock to ensure that the reaper doesn't reap the exited
+  // command executor and inform the containerizer/slave.
+  Clock::pause();
+  Clock::settle();
+
+  Future<TaskStatus> killed;
+  EXPECT_CALL(sched, statusUpdate(&driver, _))
+    .WillOnce(FutureArg<1>(&killed));
+
   // Stop the memory-hammering task.
   driver.killTask(task.task_id());
 
-  AWAIT_READY(killed.future());
-  EXPECT_EQ(task.task_id(), killed.future()->task_id());
-  EXPECT_EQ(TASK_KILLED, killed.future()->state());
+  AWAIT_READY(killed);
+  EXPECT_EQ(task.task_id(), killed->task_id());
+  EXPECT_EQ(TASK_KILLED, killed->state());
 
   // Now check the correctness of the memory pressure counters.
   Future<ResourceStatistics> usage = containerizer2.get()->usage(containerId);
@@ -326,6 +329,8 @@ TEST_F(MemoryPressureMesosTest, CGROUPS_ROOT_SlaveRecovery)
   EXPECT_GE(usage.get().mem_medium_pressure_counter(),
             usage.get().mem_critical_pressure_counter());
 
+  Clock::resume();
+
   driver.stop();
   driver.join();