You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mesos.apache.org by ji...@apache.org on 2016/10/08 21:21:41 UTC
[1/3] mesos git commit: Added test case
`CgroupsIsolatorTest.ROOT_CGROUPS_MemoryForward`.
Repository: mesos
Updated Branches:
refs/heads/master 1fe48d388 -> e7703d1c8
Added test case `CgroupsIsolatorTest.ROOT_CGROUPS_MemoryForward`.
Review: https://reviews.apache.org/r/52243/
Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/96c866d3
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/96c866d3
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/96c866d3
Branch: refs/heads/master
Commit: 96c866d36e49dd76a5819154a023c5a23d8ae074
Parents: 1fe48d3
Author: haosdent huang <ha...@gmail.com>
Authored: Sat Oct 8 10:45:07 2016 -0700
Committer: Jie Yu <yu...@gmail.com>
Committed: Sat Oct 8 10:45:07 2016 -0700
----------------------------------------------------------------------
.../containerizer/cgroups_isolator_tests.cpp | 171 +++++++++++++++++++
1 file changed, 171 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/mesos/blob/96c866d3/src/tests/containerizer/cgroups_isolator_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/containerizer/cgroups_isolator_tests.cpp b/src/tests/containerizer/cgroups_isolator_tests.cpp
index f1035a0..1bb4ad2 100644
--- a/src/tests/containerizer/cgroups_isolator_tests.cpp
+++ b/src/tests/containerizer/cgroups_isolator_tests.cpp
@@ -43,6 +43,7 @@ using mesos::internal::slave::CGROUP_SUBSYSTEM_NET_CLS_NAME;
using mesos::internal::slave::CGROUP_SUBSYSTEM_PERF_EVENT_NAME;
using mesos::internal::slave::CPU_SHARES_PER_CPU_REVOCABLE;
using mesos::internal::slave::DEFAULT_EXECUTOR_CPUS;
+using mesos::internal::slave::EXECUTOR_REREGISTER_TIMEOUT;
using mesos::internal::slave::Containerizer;
using mesos::internal::slave::Fetcher;
@@ -54,6 +55,7 @@ using mesos::internal::slave::Slave;
using mesos::master::detector::MasterDetector;
+using process::Clock;
using process::Future;
using process::Owned;
using process::Queue;
@@ -1119,6 +1121,175 @@ TEST_F(CgroupsIsolatorTest, ROOT_CGROUPS_PERF_PerfForward)
driver.join();
}
+
+// Test that the memory subsystem can be enabled after the agent
+// restart. Previously created containers will not perform memory
+// isolation but newly created containers will.
+TEST_F(CgroupsIsolatorTest, ROOT_CGROUPS_MemoryForward)
+{
+ Try<Owned<cluster::Master>> master = StartMaster();
+ ASSERT_SOME(master);
+
+ // Start an agent using a containerizer without the memory isolation.
+ slave::Flags flags = CreateSlaveFlags();
+ flags.isolation = "cgroups/cpu";
+
+ Fetcher fetcher;
+
+ Try<MesosContainerizer*> create =
+ MesosContainerizer::create(flags, true, &fetcher);
+
+ ASSERT_SOME(create);
+
+ Owned<slave::Containerizer> containerizer(create.get());
+
+ Owned<MasterDetector> detector = master.get()->createDetector();
+
+ Try<Owned<cluster::Slave>> slave = StartSlave(
+ detector.get(),
+ containerizer.get(),
+ flags);
+
+ ASSERT_SOME(slave);
+
+ // Enable checkpointing for the framework.
+ FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
+ frameworkInfo.set_checkpoint(true);
+
+ MockScheduler sched;
+
+ MesosSchedulerDriver driver(
+ &sched,
+ frameworkInfo,
+ master.get()->pid,
+ DEFAULT_CREDENTIAL);
+
+ EXPECT_CALL(sched, registered(&driver, _, _));
+
+ Future<vector<Offer>> offers1;
+ EXPECT_CALL(sched, resourceOffers(_, _))
+ .WillOnce(FutureArg<1>(&offers1))
+ .WillRepeatedly(Return()); // Ignore subsequent offers.
+
+ driver.start();
+
+ AWAIT_READY(offers1);
+ EXPECT_NE(0u, offers1->size());
+
+ Future<TaskStatus> statusRunning1;
+ EXPECT_CALL(sched, statusUpdate(&driver, _))
+ .WillOnce(FutureArg<1>(&statusRunning1))
+ .WillRepeatedly(Return());
+
+ TaskInfo task1 = createTask(
+ offers1.get()[0].slave_id(),
+ Resources::parse("cpus:0.5;mem:128").get(),
+ "sleep 1000");
+
+ // We want to be notified immediately with new offer.
+ Filters filters;
+ filters.set_refuse_seconds(0);
+
+ driver.launchTasks(offers1.get()[0].id(), {task1}, filters);
+
+ AWAIT_READY(statusRunning1);
+ EXPECT_EQ(TASK_RUNNING, statusRunning1->state());
+
+ Future<hashset<ContainerID>> containers = containerizer->containers();
+
+ AWAIT_READY(containers);
+ EXPECT_EQ(1u, containers->size());
+
+ ContainerID containerId1 = *(containers->begin());
+
+ Future<ResourceStatistics> usage = containerizer->usage(containerId1);
+ AWAIT_READY(usage);
+
+ // There should not be any memory statistics.
+ EXPECT_FALSE(usage->has_mem_total_bytes());
+
+ slave.get()->terminate();
+
+ Future<vector<Offer>> offers2;
+ EXPECT_CALL(sched, resourceOffers(_, _))
+ .WillOnce(FutureArg<1>(&offers2))
+ .WillRepeatedly(Return()); // Ignore subsequent offers.
+
+ // Set up this to speed up the recovery of the agent.
+ Future<ReregisterExecutorMessage> reregisterExecutorMessage =
+ FUTURE_PROTOBUF(ReregisterExecutorMessage(), _, _);
+
+ Future<Nothing> __recover = FUTURE_DISPATCH(_, &Slave::__recover);
+
+ // Start an agent using a containerizer with the memory isolation.
+ flags.isolation = "cgroups/cpu,cgroups/mem";
+
+ containerizer.reset();
+
+ create = MesosContainerizer::create(flags, true, &fetcher);
+ ASSERT_SOME(create);
+
+ containerizer.reset(create.get());
+
+ slave = StartSlave(detector.get(), containerizer.get(), flags);
+ ASSERT_SOME(slave);
+
+ Clock::pause();
+
+ // Wait for the executor to re-register.
+ AWAIT_READY(reregisterExecutorMessage);
+
+ // Ensure the agent considers itself recovered.
+ Clock::advance(EXECUTOR_REREGISTER_TIMEOUT);
+ Clock::resume();
+
+ // Wait until agent recovery is complete.
+ AWAIT_READY(__recover);
+
+ AWAIT_READY(offers2);
+ EXPECT_NE(0u, offers2->size());
+
+ // The first container should not report memory statistics.
+ usage = containerizer->usage(containerId1);
+ AWAIT_READY(usage);
+
+ EXPECT_FALSE(usage->has_mem_total_bytes());
+
+ // Start a new container which will start reporting memory statistics.
+ TaskInfo task2 = createTask(offers2.get()[0], "sleep 1000");
+
+ Future<TaskStatus> statusRunning2;
+ EXPECT_CALL(sched, statusUpdate(&driver, _))
+ .WillOnce(FutureArg<1>(&statusRunning2))
+ .WillRepeatedly(Return()); // Ignore subsequent offers.
+
+ driver.launchTasks(offers2.get()[0].id(), {task2});
+
+ AWAIT_READY(statusRunning2);
+ EXPECT_EQ(TASK_RUNNING, statusRunning2.get().state());
+
+ containers = containerizer->containers();
+
+ AWAIT_READY(containers);
+ EXPECT_EQ(2u, containers.get().size());
+ EXPECT_TRUE(containers.get().contains(containerId1));
+
+ ContainerID containerId2;
+ foreach (const ContainerID containerId, containers.get()) {
+ if (containerId != containerId1) {
+ containerId2 = containerId;
+ }
+ }
+
+ usage = containerizer->usage(containerId2);
+ AWAIT_READY(usage);
+
+ EXPECT_TRUE(usage->has_mem_total_bytes());
+
+ driver.stop();
+ driver.join();
+}
+
} // namespace tests {
} // namespace internal {
} // namespace mesos {
[2/3] mesos git commit: Added test case
`CgroupsIsolatorTest.ROOT_CGROUPS_MemoryBackward`.
Posted by ji...@apache.org.
Added test case `CgroupsIsolatorTest.ROOT_CGROUPS_MemoryBackward`.
Review: https://reviews.apache.org/r/52244/
Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/9118eded
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/9118eded
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/9118eded
Branch: refs/heads/master
Commit: 9118eded36b3e61a0727fa328540e7da1b9402f0
Parents: 96c866d
Author: haosdent huang <ha...@gmail.com>
Authored: Sat Oct 8 10:45:18 2016 -0700
Committer: Jie Yu <yu...@gmail.com>
Committed: Sat Oct 8 10:45:18 2016 -0700
----------------------------------------------------------------------
.../containerizer/cgroups_isolator_tests.cpp | 197 +++++++++++++++++++
1 file changed, 197 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/mesos/blob/9118eded/src/tests/containerizer/cgroups_isolator_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/containerizer/cgroups_isolator_tests.cpp b/src/tests/containerizer/cgroups_isolator_tests.cpp
index 1bb4ad2..af0dcd4 100644
--- a/src/tests/containerizer/cgroups_isolator_tests.cpp
+++ b/src/tests/containerizer/cgroups_isolator_tests.cpp
@@ -1290,6 +1290,203 @@ TEST_F(CgroupsIsolatorTest, ROOT_CGROUPS_MemoryForward)
driver.join();
}
+
+// Test that the memory subsystem can be disabled after the agent
+// restart. Previously created containers will perform memory isolation
+// but newly created containers will.
+TEST_F(CgroupsIsolatorTest, ROOT_CGROUPS_MemoryBackward)
+{
+ Try<Owned<cluster::Master>> master = StartMaster();
+ ASSERT_SOME(master);
+
+ // Start an agent using a containerizer with the memory isolation.
+ slave::Flags flags = CreateSlaveFlags();
+ flags.isolation = "cgroups/cpu,cgroups/mem";
+
+ Fetcher fetcher;
+
+ Try<MesosContainerizer*> create =
+ MesosContainerizer::create(flags, true, &fetcher);
+
+ ASSERT_SOME(create);
+
+ Owned<slave::Containerizer> containerizer(create.get());
+
+ Owned<MasterDetector> detector = master.get()->createDetector();
+
+ Try<Owned<cluster::Slave>> slave = StartSlave(
+ detector.get(),
+ containerizer.get(),
+ flags);
+
+ ASSERT_SOME(slave);
+
+ // Enable checkpointing for the framework.
+ FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
+ frameworkInfo.set_checkpoint(true);
+
+ MockScheduler sched;
+
+ MesosSchedulerDriver driver(
+ &sched,
+ frameworkInfo,
+ master.get()->pid,
+ DEFAULT_CREDENTIAL);
+
+ EXPECT_CALL(sched, registered(&driver, _, _));
+
+ Future<vector<Offer>> offers1;
+ EXPECT_CALL(sched, resourceOffers(_, _))
+ .WillOnce(FutureArg<1>(&offers1))
+ .WillRepeatedly(Return()); // Ignore subsequent offers.
+
+ driver.start();
+
+ AWAIT_READY(offers1);
+ EXPECT_NE(0u, offers1->size());
+
+ Future<TaskStatus> statusRunning1;
+ EXPECT_CALL(sched, statusUpdate(&driver, _))
+ .WillOnce(FutureArg<1>(&statusRunning1))
+ .WillRepeatedly(Return());
+
+ TaskInfo task1 = createTask(
+ offers1.get()[0].slave_id(),
+ Resources::parse("cpus:0.5;mem:128").get(),
+ "sleep 1000");
+
+ // We want to be notified immediately with new offer.
+ Filters filters;
+ filters.set_refuse_seconds(0);
+
+ driver.launchTasks(offers1.get()[0].id(), {task1}, filters);
+
+ AWAIT_READY(statusRunning1);
+ EXPECT_EQ(TASK_RUNNING, statusRunning1->state());
+
+ Future<hashset<ContainerID>> containers = containerizer->containers();
+
+ AWAIT_READY(containers);
+ EXPECT_EQ(1u, containers->size());
+
+ ContainerID containerId1 = *(containers->begin());
+
+ Future<ResourceStatistics> usage = containerizer->usage(containerId1);
+ AWAIT_READY(usage);
+
+ EXPECT_TRUE(usage.get().has_mem_total_bytes());
+
+ slave.get()->terminate();
+
+ Future<vector<Offer>> offers2;
+ EXPECT_CALL(sched, resourceOffers(_, _))
+ .WillOnce(FutureArg<1>(&offers2))
+ .WillRepeatedly(Return()); // Ignore subsequent offers.
+
+ // Set up this to speed up the recovery of the agent.
+ Future<ReregisterExecutorMessage> reregisterExecutorMessage =
+ FUTURE_PROTOBUF(ReregisterExecutorMessage(), _, _);
+
+ Future<Nothing> __recover = FUTURE_DISPATCH(_, &Slave::__recover);
+
+ // Start an agent using a containerizer without the memory isolation.
+ flags.isolation = "cgroups/cpu";
+
+ containerizer.reset();
+
+ create = MesosContainerizer::create(flags, true, &fetcher);
+ ASSERT_SOME(create);
+
+ containerizer.reset(create.get());
+
+ slave = StartSlave(detector.get(), containerizer.get(), flags);
+ ASSERT_SOME(slave);
+
+ Clock::pause();
+
+ // Wait for the executor to re-register.
+ AWAIT_READY(reregisterExecutorMessage);
+
+ // Ensure the agent considers itself recovered.
+ Clock::advance(EXECUTOR_REREGISTER_TIMEOUT);
+ Clock::resume();
+
+ // Wait until agent recovery is complete.
+ AWAIT_READY(__recover);
+
+ AWAIT_READY(offers2);
+ EXPECT_NE(0u, offers2->size());
+
+ // The first container should not report memory statistics.
+ usage = containerizer->usage(containerId1);
+ AWAIT_READY(usage);
+
+ // After restart the agent without the memory isolation,
+ // the container should not report memory statistics.
+ EXPECT_FALSE(usage->has_mem_total_bytes());
+
+ TaskInfo task2 = createTask(offers2.get()[0], "sleep 1000");
+
+ Future<TaskStatus> statusRunning2;
+ EXPECT_CALL(sched, statusUpdate(&driver, _))
+ .WillOnce(FutureArg<1>(&statusRunning2))
+ .WillRepeatedly(Return()); // Ignore subsequent offers.
+
+ driver.launchTasks(offers2.get()[0].id(), {task2});
+
+ AWAIT_READY(statusRunning2);
+ EXPECT_EQ(TASK_RUNNING, statusRunning2.get().state());
+
+ containers = containerizer->containers();
+
+ AWAIT_READY(containers);
+ EXPECT_EQ(2u, containers.get().size());
+ EXPECT_TRUE(containers.get().contains(containerId1));
+
+ ContainerID containerId2;
+ foreach (const ContainerID containerId, containers.get()) {
+ if (containerId != containerId1) {
+ containerId2 = containerId;
+ }
+ }
+
+ usage = containerizer->usage(containerId2);
+ AWAIT_READY(usage);
+
+ // After restart the agent without the memory isolation,
+ // the container should not report memory statistics.
+ EXPECT_FALSE(usage->has_mem_total_bytes());
+
+ driver.stop();
+ driver.join();
+
+ slave.get()->terminate();
+
+ __recover = FUTURE_DISPATCH(_, &Slave::__recover);
+
+ // Restart an agent using a containerizer with the memory isolation to
+ // clean up the orphan cgroups.
+ flags.isolation = "cgroups/mem";
+
+ containerizer.reset();
+
+ create = MesosContainerizer::create(flags, true, &fetcher);
+ ASSERT_SOME(create);
+
+ containerizer.reset(create.get());
+
+ slave = StartSlave(detector.get(), containerizer.get(), flags);
+ ASSERT_SOME(slave);
+
+ Clock::pause();
+ Clock::settle();
+ // Ensure the agent considers itself recovered.
+ Clock::advance(EXECUTOR_REREGISTER_TIMEOUT);
+ Clock::resume();
+
+ AWAIT_READY(__recover);
+}
+
} // namespace tests {
} // namespace internal {
} // namespace mesos {
[3/3] mesos git commit: Made some adjustments to the cgroups isolator
tests.
Posted by ji...@apache.org.
Made some adjustments to the cgroups isolator tests.
This patch makes sure that the cgroups will be cleaned up for each
test by using the ContainerizerTest fixture.
Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/e7703d1c
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/e7703d1c
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/e7703d1c
Branch: refs/heads/master
Commit: e7703d1c88936d2b684b43c4a902333b33c808ff
Parents: 9118ede
Author: Jie Yu <yu...@gmail.com>
Authored: Sat Oct 8 14:20:47 2016 -0700
Committer: Jie Yu <yu...@gmail.com>
Committed: Sat Oct 8 14:20:51 2016 -0700
----------------------------------------------------------------------
.../containerizer/cgroups_isolator_tests.cpp | 67 ++++----------------
1 file changed, 11 insertions(+), 56 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/mesos/blob/e7703d1c/src/tests/containerizer/cgroups_isolator_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/containerizer/cgroups_isolator_tests.cpp b/src/tests/containerizer/cgroups_isolator_tests.cpp
index af0dcd4..b579757 100644
--- a/src/tests/containerizer/cgroups_isolator_tests.cpp
+++ b/src/tests/containerizer/cgroups_isolator_tests.cpp
@@ -24,7 +24,6 @@
#include "slave/containerizer/mesos/containerizer.hpp"
#include "slave/containerizer/mesos/isolators/cgroups/constants.hpp"
-
#include "slave/containerizer/mesos/isolators/cgroups/subsystems/net_cls.hpp"
#include "tests/mesos.hpp"
@@ -43,7 +42,6 @@ using mesos::internal::slave::CGROUP_SUBSYSTEM_NET_CLS_NAME;
using mesos::internal::slave::CGROUP_SUBSYSTEM_PERF_EVENT_NAME;
using mesos::internal::slave::CPU_SHARES_PER_CPU_REVOCABLE;
using mesos::internal::slave::DEFAULT_EXECUTOR_CPUS;
-using mesos::internal::slave::EXECUTOR_REREGISTER_TIMEOUT;
using mesos::internal::slave::Containerizer;
using mesos::internal::slave::Fetcher;
@@ -55,7 +53,6 @@ using mesos::internal::slave::Slave;
using mesos::master::detector::MasterDetector;
-using process::Clock;
using process::Future;
using process::Owned;
using process::Queue;
@@ -80,7 +77,8 @@ TEST_SCRIPT(ContainerizerTest,
"balloon_framework_test.sh")
-class CgroupsIsolatorTest : public MesosTest {};
+class CgroupsIsolatorTest
+ : public ContainerizerTest<MesosContainerizer> {};
// This test starts the agent with cgroups isolation and launches a
@@ -271,6 +269,7 @@ TEST_F(CgroupsIsolatorTest, ROOT_CGROUPS_RevocableCpu)
FrameworkInfo::Capability::REVOCABLE_RESOURCES);
MockScheduler sched;
+
MesosSchedulerDriver driver(
&sched,
frameworkInfo,
@@ -371,6 +370,7 @@ TEST_F(CgroupsIsolatorTest, ROOT_CGROUPS_CFS_EnableCfs)
ASSERT_SOME(slave);
MockScheduler sched;
+
MesosSchedulerDriver driver(
&sched,
DEFAULT_FRAMEWORK_INFO,
@@ -471,6 +471,7 @@ TEST_F(CgroupsIsolatorTest, ROOT_CGROUPS_PidsAndTids)
ASSERT_SOME(slave);
MockScheduler sched;
+
MesosSchedulerDriver driver(
&sched,
DEFAULT_FRAMEWORK_INFO,
@@ -973,6 +974,12 @@ TEST_F(CgroupsIsolatorTest, ROOT_CGROUPS_PERF_PerfForward)
slave::Flags flags = CreateSlaveFlags();
flags.isolation = "cgroups/cpu,cgroups/mem";
+ // TODO(jieyu): This is necessary because currently, we don't have a
+ // way to kill and wait for the perf process to finish, and cgroups
+ // cleanup function does not yet support killing processes without a
+ // freezer cgroup.
+ flags.agent_subsystems = None();
+
Fetcher fetcher;
Try<MesosContainerizer*> create =
@@ -1215,10 +1222,6 @@ TEST_F(CgroupsIsolatorTest, ROOT_CGROUPS_MemoryForward)
.WillOnce(FutureArg<1>(&offers2))
.WillRepeatedly(Return()); // Ignore subsequent offers.
- // Set up this to speed up the recovery of the agent.
- Future<ReregisterExecutorMessage> reregisterExecutorMessage =
- FUTURE_PROTOBUF(ReregisterExecutorMessage(), _, _);
-
Future<Nothing> __recover = FUTURE_DISPATCH(_, &Slave::__recover);
// Start an agent using a containerizer with the memory isolation.
@@ -1234,15 +1237,6 @@ TEST_F(CgroupsIsolatorTest, ROOT_CGROUPS_MemoryForward)
slave = StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
- Clock::pause();
-
- // Wait for the executor to re-register.
- AWAIT_READY(reregisterExecutorMessage);
-
- // Ensure the agent considers itself recovered.
- Clock::advance(EXECUTOR_REREGISTER_TIMEOUT);
- Clock::resume();
-
// Wait until agent recovery is complete.
AWAIT_READY(__recover);
@@ -1383,10 +1377,6 @@ TEST_F(CgroupsIsolatorTest, ROOT_CGROUPS_MemoryBackward)
.WillOnce(FutureArg<1>(&offers2))
.WillRepeatedly(Return()); // Ignore subsequent offers.
- // Set up this to speed up the recovery of the agent.
- Future<ReregisterExecutorMessage> reregisterExecutorMessage =
- FUTURE_PROTOBUF(ReregisterExecutorMessage(), _, _);
-
Future<Nothing> __recover = FUTURE_DISPATCH(_, &Slave::__recover);
// Start an agent using a containerizer without the memory isolation.
@@ -1402,15 +1392,6 @@ TEST_F(CgroupsIsolatorTest, ROOT_CGROUPS_MemoryBackward)
slave = StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
- Clock::pause();
-
- // Wait for the executor to re-register.
- AWAIT_READY(reregisterExecutorMessage);
-
- // Ensure the agent considers itself recovered.
- Clock::advance(EXECUTOR_REREGISTER_TIMEOUT);
- Clock::resume();
-
// Wait until agent recovery is complete.
AWAIT_READY(__recover);
@@ -1459,32 +1440,6 @@ TEST_F(CgroupsIsolatorTest, ROOT_CGROUPS_MemoryBackward)
driver.stop();
driver.join();
-
- slave.get()->terminate();
-
- __recover = FUTURE_DISPATCH(_, &Slave::__recover);
-
- // Restart an agent using a containerizer with the memory isolation to
- // clean up the orphan cgroups.
- flags.isolation = "cgroups/mem";
-
- containerizer.reset();
-
- create = MesosContainerizer::create(flags, true, &fetcher);
- ASSERT_SOME(create);
-
- containerizer.reset(create.get());
-
- slave = StartSlave(detector.get(), containerizer.get(), flags);
- ASSERT_SOME(slave);
-
- Clock::pause();
- Clock::settle();
- // Ensure the agent considers itself recovered.
- Clock::advance(EXECUTOR_REREGISTER_TIMEOUT);
- Clock::resume();
-
- AWAIT_READY(__recover);
}
} // namespace tests {