You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mesos.apache.org by be...@apache.org on 2013/05/26 18:57:37 UTC
[12/28] git commit: Refactored MesosTest/MesosClusterTest into a
generic fixture for launching in-memory Mesos clusters and updated all tests
appropriately.
Refactored MesosTest/MesosClusterTest into a generic fixture for
launching in-memory Mesos clusters and updated all tests
appropriately.
Review: https://reviews.apache.org/r/11273
Project: http://git-wip-us.apache.org/repos/asf/incubator-mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-mesos/commit/6b1b8208
Tree: http://git-wip-us.apache.org/repos/asf/incubator-mesos/tree/6b1b8208
Diff: http://git-wip-us.apache.org/repos/asf/incubator-mesos/diff/6b1b8208
Branch: refs/heads/master
Commit: 6b1b82085a6049944cb3342a415829fe3d0e9320
Parents: b553a07
Author: Benjamin Hindman <be...@twitter.com>
Authored: Wed Apr 24 22:06:48 2013 -0700
Committer: Benjamin Hindman <be...@twitter.com>
Committed: Sun May 26 00:13:35 2013 -0700
----------------------------------------------------------------------
src/Makefile.am | 2 +
src/master/hierarchical_allocator_process.hpp | 2 +-
src/tests/allocator_tests.cpp | 166 +++---
src/tests/allocator_zookeeper_tests.cpp | 151 +++--
src/tests/cgroups_tests.cpp | 5 +-
src/tests/cluster.hpp | 247 ++------
src/tests/configurator_tests.cpp | 12 +-
src/tests/exception_tests.cpp | 16 +-
src/tests/fault_tolerance_tests.cpp | 301 +++------
src/tests/files_tests.cpp | 2 +
src/tests/flags_tests.cpp | 3 +-
src/tests/gc_tests.cpp | 88 ++-
src/tests/group_tests.cpp | 3 +-
src/tests/isolator.hpp | 9 +
src/tests/isolator_tests.cpp | 62 +-
src/tests/log_tests.cpp | 5 +-
src/tests/logging_tests.cpp | 5 +-
src/tests/main.cpp | 3 +-
src/tests/master_detector_tests.cpp | 17 +-
src/tests/master_tests.cpp | 120 ++--
src/tests/mesos.cpp | 240 +++++++
src/tests/mesos.hpp | 676 ++++++++++++++++++
src/tests/monitor_tests.cpp | 7 +-
src/tests/paths_tests.cpp | 2 -
src/tests/protobuf_io_tests.cpp | 3 +-
src/tests/reaper_tests.cpp | 15 +-
src/tests/resource_offers_tests.cpp | 24 +-
src/tests/script.cpp | 4 +-
src/tests/slave_recovery_tests.cpp | 335 ++++++----
src/tests/state_tests.cpp | 3 +-
src/tests/status_update_manager_tests.cpp | 52 +-
src/tests/utils.cpp | 1 +
src/tests/utils.hpp | 727 +-------------------
src/tests/zookeeper.cpp | 2 +-
src/tests/zookeeper.hpp | 10 +-
src/tests/zookeeper_tests.cpp | 3 +-
src/zookeeper/authentication.hpp | 19 +-
src/zookeeper/url.hpp | 9 +
38 files changed, 1719 insertions(+), 1632 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-mesos/blob/6b1b8208/src/Makefile.am
----------------------------------------------------------------------
diff --git a/src/Makefile.am b/src/Makefile.am
index bc0c5c6..0f7794e 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -233,6 +233,7 @@ libmesos_no_third_party_la_SOURCES += common/attributes.hpp \
tests/zookeeper.hpp tests/flags.hpp tests/utils.hpp \
tests/cluster.hpp \
tests/isolator.hpp \
+ tests/mesos.hpp \
tests/zookeeper_test_server.hpp zookeeper/authentication.hpp \
zookeeper/group.hpp zookeeper/watcher.hpp \
zookeeper/zookeeper.hpp zookeeper/url.hpp
@@ -780,6 +781,7 @@ check_PROGRAMS += mesos-tests
mesos_tests_SOURCES = tests/main.cpp tests/utils.cpp \
tests/environment.cpp \
tests/flags.cpp \
+ tests/mesos.cpp \
tests/master_tests.cpp tests/state_tests.cpp \
tests/paths_tests.cpp \
tests/reaper_tests.cpp \
http://git-wip-us.apache.org/repos/asf/incubator-mesos/blob/6b1b8208/src/master/hierarchical_allocator_process.hpp
----------------------------------------------------------------------
diff --git a/src/master/hierarchical_allocator_process.hpp b/src/master/hierarchical_allocator_process.hpp
index f4afb71..ebd97e4 100644
--- a/src/master/hierarchical_allocator_process.hpp
+++ b/src/master/hierarchical_allocator_process.hpp
@@ -30,6 +30,7 @@
#include "common/resources.hpp"
#include "master/allocator.hpp"
+#include "master/drf_sorter.hpp"
#include "master/master.hpp"
#include "master/sorter.hpp"
@@ -38,7 +39,6 @@ namespace internal {
namespace master {
// Forward declarations.
-class DRFSorter;
class Filter;
http://git-wip-us.apache.org/repos/asf/incubator-mesos/blob/6b1b8208/src/tests/allocator_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/allocator_tests.cpp b/src/tests/allocator_tests.cpp
index 540c05a..5a0de33 100644
--- a/src/tests/allocator_tests.cpp
+++ b/src/tests/allocator_tests.cpp
@@ -34,8 +34,8 @@
#include "master/hierarchical_allocator_process.hpp"
#include "master/master.hpp"
-#include "tests/cluster.hpp"
-#include "tests/utils.hpp"
+#include "tests/isolator.hpp"
+#include "tests/mesos.hpp"
using namespace mesos;
using namespace mesos::internal;
@@ -66,7 +66,7 @@ using testing::Return;
using testing::SaveArg;
-class DRFAllocatorTest : public MesosClusterTest {};
+class DRFAllocatorTest : public MesosTest {};
// Checks that the DRF allocator implements the DRF algorithm
@@ -82,16 +82,15 @@ TEST_F(DRFAllocatorTest, DRFAllocatorProcess)
EXPECT_CALL(allocator, initialize(_, _));
- Try<PID<Master> > master = cluster.masters.start(&allocator);
+ Try<PID<Master> > master = StartMaster(&allocator);
ASSERT_SOME(master);
- TestingIsolator isolator1;
- slave::Flags flags1 = cluster.slaves.flags;
+ slave::Flags flags1 = CreateSlaveFlags();
flags1.resources = Option<string>("cpus:2;mem:1024;disk:0");
EXPECT_CALL(allocator, slaveAdded(_, _, _));
- Try<PID<Slave> > slave1 = cluster.slaves.start(flags1, &isolator1);
+ Try<PID<Slave> > slave1 = StartSlave(flags1);
ASSERT_SOME(slave1);
// Total cluster resources now cpus=2, mem=1024.
@@ -135,8 +134,7 @@ TEST_F(DRFAllocatorTest, DRFAllocatorProcess)
AWAIT_READY(frameworkAdded2);
- TestingIsolator isolator2;
- slave::Flags flags2 = cluster.slaves.flags;
+ slave::Flags flags2 = CreateSlaveFlags();
flags2.resources = Option<string>("cpus:1;mem:512;disk:0");
EXPECT_CALL(allocator, slaveAdded(_, _, _));
@@ -145,7 +143,7 @@ TEST_F(DRFAllocatorTest, DRFAllocatorProcess)
EXPECT_CALL(sched2, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers2));
- Try<PID<Slave> > slave2 = cluster.slaves.start(flags2, &isolator2);
+ Try<PID<Slave> > slave2 = StartSlave(flags2);
ASSERT_SOME(slave2);
// Total cluster resources now cpus=3, mem=1536.
// framework1 share = 0.66
@@ -159,8 +157,7 @@ TEST_F(DRFAllocatorTest, DRFAllocatorProcess)
// framework1 share = 0.66
// framework2 share = 0.33
- TestingIsolator isolator3;
- slave::Flags flags3 = cluster.slaves.flags;
+ slave::Flags flags3 = CreateSlaveFlags();
flags3.resources = Option<string>("cpus:3;mem:2048;disk:0");
EXPECT_CALL(allocator, slaveAdded(_, _, _));
@@ -169,7 +166,7 @@ TEST_F(DRFAllocatorTest, DRFAllocatorProcess)
EXPECT_CALL(sched2, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers3));
- Try<PID<Slave> > slave3 = cluster.slaves.start(flags3, &isolator3);
+ Try<PID<Slave> > slave3 = StartSlave(flags3);
ASSERT_SOME(slave3);
// Total cluster resources now cpus=6, mem=3584.
// framework1 share = 0.33
@@ -200,8 +197,7 @@ TEST_F(DRFAllocatorTest, DRFAllocatorProcess)
AWAIT_READY(frameworkAdded3);
- TestingIsolator isolator4;
- slave::Flags flags4 = cluster.slaves.flags;
+ slave::Flags flags4 = CreateSlaveFlags();
flags4.resources = Option<string>("cpus:4;mem:4096;disk:0");
EXPECT_CALL(allocator, slaveAdded(_, _, _));
@@ -210,7 +206,7 @@ TEST_F(DRFAllocatorTest, DRFAllocatorProcess)
EXPECT_CALL(sched3, resourceOffers(_, _))
.WillOnce(FutureArg<1>(&offers4));
- Try<PID<Slave> > slave4 = cluster.slaves.start(flags4, &isolator4);
+ Try<PID<Slave> > slave4 = StartSlave(flags4);
ASSERT_SOME(slave4);
// Total cluster resources now cpus=10, mem=7680.
// framework1 share = 0.2
@@ -240,17 +236,17 @@ TEST_F(DRFAllocatorTest, DRFAllocatorProcess)
driver2.stop();
driver3.stop();
- cluster.shutdown();
+ Shutdown();
}
template <typename T>
-class AllocatorTest : public MesosClusterTest
+class AllocatorTest : public MesosTest
{
protected:
virtual void SetUp()
{
- MesosClusterTest::SetUp();
+ MesosTest::SetUp();
a = new Allocator(&allocator);
}
@@ -258,7 +254,7 @@ protected:
virtual void TearDown()
{
delete a;
- MesosClusterTest::TearDown();
+ MesosTest::TearDown();
}
MockAllocatorProcess<T> allocator;
@@ -277,17 +273,15 @@ TYPED_TEST(AllocatorTest, MockAllocator)
{
EXPECT_CALL(this->allocator, initialize(_, _));
- Try<PID<Master> > master = this->cluster.masters.start(&this->allocator);
+ Try<PID<Master> > master = this->StartMaster(&this->allocator);
ASSERT_SOME(master);
- MockExecutor exec;
- TestingIsolator isolator(DEFAULT_EXECUTOR_ID, &exec);
- slave::Flags flags = this->cluster.slaves.flags;
+ slave::Flags flags = this->CreateSlaveFlags();
flags.resources = Option<string>("cpus:2;mem:1024;disk:0");
EXPECT_CALL(this->allocator, slaveAdded(_, _, _));
- Try<PID<Slave> > slave = this->cluster.slaves.start(flags, &isolator);
+ Try<PID<Slave> > slave = this->StartSlave(flags);
ASSERT_SOME(slave);
MockScheduler sched;
@@ -327,11 +321,11 @@ TYPED_TEST(AllocatorTest, MockAllocator)
EXPECT_CALL(this->allocator, slaveRemoved(_))
.WillOnce(FutureSatisfy(&slaveRemoved));
- this->cluster.slaves.shutdown();
+ this->ShutdownSlaves();
AWAIT_READY(slaveRemoved);
- this->cluster.masters.shutdown();
+ this->ShutdownMasters();
}
@@ -342,17 +336,17 @@ TYPED_TEST(AllocatorTest, ResourcesUnused)
{
EXPECT_CALL(this->allocator, initialize(_, _));
- Try<PID<Master> > master = this->cluster.masters.start(&this->allocator);
+ Try<PID<Master> > master = this->StartMaster(&this->allocator);
ASSERT_SOME(master);
- MockExecutor exec;
- TestingIsolator isolator(DEFAULT_EXECUTOR_ID, &exec);
- slave::Flags flags1 = this->cluster.slaves.flags;
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
+
+ slave::Flags flags1 = this->CreateSlaveFlags();
flags1.resources = Option<string>("cpus:2;mem:1024");
EXPECT_CALL(this->allocator, slaveAdded(_, _, _));
- Try<PID<Slave> > slave1 = this->cluster.slaves.start(flags1, &isolator);
+ Try<PID<Slave> > slave1 = this->StartSlave(&exec, flags1);
ASSERT_SOME(slave1);
MockScheduler sched1;
@@ -431,7 +425,7 @@ TYPED_TEST(AllocatorTest, ResourcesUnused)
EXPECT_CALL(this->allocator, slaveRemoved(_))
.Times(AtMost(1));
- this->cluster.shutdown();
+ this->Shutdown();
}
@@ -442,16 +436,15 @@ TYPED_TEST(AllocatorTest, OutOfOrderDispatch)
{
EXPECT_CALL(this->allocator, initialize(_, _));
- Try<PID<Master> > master = this->cluster.masters.start(&this->allocator);
+ Try<PID<Master> > master = this->StartMaster(&this->allocator);
ASSERT_SOME(master);
- TestingIsolator isolator;
- slave::Flags flags1 = this->cluster.slaves.flags;
+ slave::Flags flags1 = this->CreateSlaveFlags();
flags1.resources = Option<string>("cpus:2;mem:1024");
EXPECT_CALL(this->allocator, slaveAdded(_, _, _));
- Try<PID<Slave> > slave1 = this->cluster.slaves.start(flags1, &isolator);
+ Try<PID<Slave> > slave1 = this->StartSlave(flags1);
ASSERT_SOME(slave1);
FrameworkInfo frameworkInfo1;
@@ -553,7 +546,7 @@ TYPED_TEST(AllocatorTest, OutOfOrderDispatch)
EXPECT_CALL(this->allocator, slaveRemoved(_))
.Times(AtMost(1));
- this->cluster.shutdown();
+ this->Shutdown();
}
@@ -564,17 +557,18 @@ TYPED_TEST(AllocatorTest, SchedulerFailover)
{
EXPECT_CALL(this->allocator, initialize(_, _));
- Try<PID<Master> > master = this->cluster.masters.start(&this->allocator);
+ Try<PID<Master> > master = this->StartMaster(&this->allocator);
ASSERT_SOME(master);
- MockExecutor exec;
- TestingIsolator isolator(DEFAULT_EXECUTOR_ID, &exec);
- slave::Flags flags = this->cluster.slaves.flags;
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
+ TestingIsolator isolator(&exec);
+
+ slave::Flags flags = this->CreateSlaveFlags();
flags.resources = Option<string>("cpus:3;mem:1024");
EXPECT_CALL(this->allocator, slaveAdded(_, _, _));
- Try<PID<Slave> > slave = this->cluster.slaves.start(flags, &isolator);
+ Try<PID<Slave> > slave = this->StartSlave(&isolator, flags);
ASSERT_SOME(slave);
FrameworkInfo frameworkInfo1;
@@ -680,7 +674,7 @@ TYPED_TEST(AllocatorTest, SchedulerFailover)
EXPECT_CALL(this->allocator, slaveRemoved(_))
.Times(AtMost(1));
- this->cluster.shutdown();
+ this->Shutdown();
}
@@ -690,12 +684,12 @@ TYPED_TEST(AllocatorTest, FrameworkExited)
{
EXPECT_CALL(this->allocator, initialize(_, _));
- master::Flags masterFlags = this->cluster.masters.flags;
+ master::Flags masterFlags = this->CreateMasterFlags();
masterFlags.allocation_interval = Duration::parse("50ms").get();
- Try<PID<Master> > master = this->cluster.masters.start(&this->allocator, masterFlags);
+ Try<PID<Master> > master = this->StartMaster(&this->allocator, masterFlags);
ASSERT_SOME(master);
- MockExecutor exec;
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
EXPECT_CALL(exec, registered(_, _, _, _))
.Times(2);
@@ -707,8 +701,9 @@ TYPED_TEST(AllocatorTest, FrameworkExited)
EXPECT_CALL(exec, shutdown(_))
.Times(AtMost(2));
- TestingIsolator isolator(DEFAULT_EXECUTOR_ID, &exec);
- slave::Flags flags = this->cluster.slaves.flags;
+ TestingIsolator isolator(&exec);
+
+ slave::Flags flags = this->CreateSlaveFlags();
flags.resources = Option<string>("cpus:3;mem:1024");
EXPECT_CALL(isolator, resourcesChanged(_, _, _))
@@ -716,7 +711,7 @@ TYPED_TEST(AllocatorTest, FrameworkExited)
EXPECT_CALL(this->allocator, slaveAdded(_, _, _));
- Try<PID<Slave> > slave = this->cluster.slaves.start(flags, &isolator);
+ Try<PID<Slave> > slave = this->StartSlave(&isolator, flags);
ASSERT_SOME(slave);
MockScheduler sched1;
@@ -809,7 +804,7 @@ TYPED_TEST(AllocatorTest, FrameworkExited)
EXPECT_CALL(this->allocator, slaveRemoved(_))
.Times(AtMost(1));
- this->cluster.shutdown();
+ this->Shutdown();
}
@@ -821,17 +816,18 @@ TYPED_TEST(AllocatorTest, SlaveLost)
{
EXPECT_CALL(this->allocator, initialize(_, _));
- Try<PID<Master> > master = this->cluster.masters.start(&this->allocator);
+ Try<PID<Master> > master = this->StartMaster(&this->allocator);
ASSERT_SOME(master);
- MockExecutor exec;
- TestingIsolator isolator(DEFAULT_EXECUTOR_ID, &exec);
- slave::Flags flags1 = this->cluster.slaves.flags;
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
+ TestingIsolator isolator(&exec);
+
+ slave::Flags flags1 = this->CreateSlaveFlags();
flags1.resources = Option<string>("cpus:2;mem:1024");
EXPECT_CALL(this->allocator, slaveAdded(_, _, _));
- Try<PID<Slave> > slave1 = this->cluster.slaves.start(flags1, &isolator);
+ Try<PID<Slave> > slave1 = this->StartSlave(&isolator, flags1);
ASSERT_SOME(slave1);
MockScheduler sched1;
@@ -887,15 +883,15 @@ TYPED_TEST(AllocatorTest, SlaveLost)
EXPECT_CALL(sched1, slaveLost(_, _));
- this->cluster.slaves.shutdown();
+ this->ShutdownSlaves();
AWAIT_READY(slaveRemoved1);
AWAIT_READY(shutdownCall);
- MockExecutor exec2;
- TestingIsolator isolator2(DEFAULT_EXECUTOR_ID, &exec2);
- slave::Flags flags2 = this->cluster.slaves.flags;
+ MockExecutor exec2(DEFAULT_EXECUTOR_ID);
+
+ slave::Flags flags2 = this->CreateSlaveFlags();
flags2.resources = Option<string>("cpus:3;mem:256");
EXPECT_CALL(this->allocator, slaveAdded(_, _, _));
@@ -907,7 +903,7 @@ TYPED_TEST(AllocatorTest, SlaveLost)
EXPECT_CALL(sched1, resourceOffers(_, OfferEq(3, 256)))
.WillOnce(FutureArg<1>(&resourceOffers2));
- Try<PID<Slave> > slave2 = this->cluster.slaves.start(flags2, &isolator2);
+ Try<PID<Slave> > slave2 = this->StartSlave(&exec2, flags2);
ASSERT_SOME(slave2);
AWAIT_READY(resourceOffers2);
@@ -935,7 +931,7 @@ TYPED_TEST(AllocatorTest, SlaveLost)
EXPECT_CALL(this->allocator, slaveRemoved(_))
.Times(AtMost(1));
- this->cluster.shutdown();
+ this->Shutdown();
}
@@ -946,19 +942,20 @@ TYPED_TEST(AllocatorTest, SlaveAdded)
{
EXPECT_CALL(this->allocator, initialize(_, _));
- master::Flags masterFlags = this->cluster.masters.flags;
+ master::Flags masterFlags = this->CreateMasterFlags();
masterFlags.allocation_interval = Duration::parse("50ms").get();
- Try<PID<Master> > master = this->cluster.masters.start(&this->allocator, masterFlags);
+ Try<PID<Master> > master = this->StartMaster(&this->allocator, masterFlags);
ASSERT_SOME(master);
- MockExecutor exec;
- TestingIsolator isolator(DEFAULT_EXECUTOR_ID, &exec);
- slave::Flags flags1 = this->cluster.slaves.flags;
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
+ TestingIsolator isolator(&exec);
+
+ slave::Flags flags1 = this->CreateSlaveFlags();
flags1.resources = Option<string>("cpus:3;mem:1024");
EXPECT_CALL(this->allocator, slaveAdded(_, _, _));
- Try<PID<Slave> > slave1 = this->cluster.slaves.start(flags1, &isolator);
+ Try<PID<Slave> > slave1 = this->StartSlave(&isolator, flags1);
ASSERT_SOME(slave1);
MockScheduler sched1;
@@ -1004,7 +1001,7 @@ TYPED_TEST(AllocatorTest, SlaveAdded)
AWAIT_READY(launchTask);
- slave::Flags flags2 = this->cluster.slaves.flags;
+ slave::Flags flags2 = this->CreateSlaveFlags();
flags2.resources = Option<string>("cpus:4;mem:2048");
EXPECT_CALL(this->allocator, slaveAdded(_, _, _));
@@ -1016,7 +1013,7 @@ TYPED_TEST(AllocatorTest, SlaveAdded)
EXPECT_CALL(sched1, resourceOffers(_, OfferEq(5, 2560)))
.WillOnce(FutureSatisfy(&resourceOffers2));
- Try<PID<Slave> > slave2 = this->cluster.slaves.start(flags2, &isolator);
+ Try<PID<Slave> > slave2 = this->StartSlave(flags2);
ASSERT_SOME(slave2);
AWAIT_READY(resourceOffers2);
@@ -1042,7 +1039,7 @@ TYPED_TEST(AllocatorTest, SlaveAdded)
EXPECT_CALL(this->allocator, slaveRemoved(_))
.Times(AtMost(2));
- this->cluster.shutdown();
+ this->Shutdown();
}
@@ -1052,19 +1049,20 @@ TYPED_TEST(AllocatorTest, TaskFinished)
{
EXPECT_CALL(this->allocator, initialize(_, _));
- master::Flags masterFlags = this->cluster.masters.flags;
+ master::Flags masterFlags = this->CreateMasterFlags();
masterFlags.allocation_interval = Duration::parse("50ms").get();
- Try<PID<Master> > master = this->cluster.masters.start(&this->allocator, masterFlags);
+ Try<PID<Master> > master = this->StartMaster(&this->allocator, masterFlags);
ASSERT_SOME(master);
- MockExecutor exec;
- TestingIsolator isolator(DEFAULT_EXECUTOR_ID, &exec);
- slave::Flags flags = this->cluster.slaves.flags;
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
+ TestingIsolator isolator(&exec);
+
+ slave::Flags flags = this->CreateSlaveFlags();
flags.resources = Option<string>("cpus:3;mem:1024");
EXPECT_CALL(this->allocator, slaveAdded(_, _, _));
- Try<PID<Slave> > slave = this->cluster.slaves.start(flags, &isolator);
+ Try<PID<Slave> > slave = this->StartSlave(&isolator, flags);
ASSERT_SOME(slave);
MockScheduler sched1;
@@ -1156,7 +1154,7 @@ TYPED_TEST(AllocatorTest, TaskFinished)
EXPECT_CALL(this->allocator, slaveRemoved(_))
.Times(AtMost(1));
- this->cluster.shutdown();
+ this->Shutdown();
}
@@ -1170,7 +1168,7 @@ TYPED_TEST(AllocatorTest, WhitelistSlave)
string path = "whitelist.txt";
ASSERT_SOME(os::write(path, hosts)) << "Error writing whitelist";
- master::Flags masterFlags = this->cluster.masters.flags;
+ master::Flags masterFlags = this->CreateMasterFlags();
masterFlags.whitelist = "file://" + path; // TODO(benh): Put in /tmp.
EXPECT_CALL(this->allocator, initialize(_, _));
@@ -1180,17 +1178,15 @@ TYPED_TEST(AllocatorTest, WhitelistSlave)
.WillOnce(DoAll(InvokeUpdateWhitelist(&this->allocator),
FutureSatisfy(&updateWhitelist1)));
- Try<PID<Master> > master = this->cluster.masters.start(&this->allocator, masterFlags);
+ Try<PID<Master> > master = this->StartMaster(&this->allocator, masterFlags);
ASSERT_SOME(master);
- MockExecutor exec;
- TestingIsolator isolator(DEFAULT_EXECUTOR_ID, &exec);
- slave::Flags flags = this->cluster.slaves.flags;
+ slave::Flags flags = this->CreateSlaveFlags();
flags.resources = Option<string>("cpus:2;mem:1024");
EXPECT_CALL(this->allocator, slaveAdded(_, _, _));
- Try<PID<Slave> > slave = this->cluster.slaves.start(flags, &isolator);
+ Try<PID<Slave> > slave = this->StartSlave(flags);
ASSERT_SOME(slave);
MockScheduler sched;
@@ -1256,7 +1252,7 @@ TYPED_TEST(AllocatorTest, WhitelistSlave)
EXPECT_CALL(this->allocator, slaveRemoved(_))
.Times(AtMost(1));
- this->cluster.shutdown();
+ this->Shutdown();
os::rm(path);
}
http://git-wip-us.apache.org/repos/asf/incubator-mesos/blob/6b1b8208/src/tests/allocator_zookeeper_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/allocator_zookeeper_tests.cpp b/src/tests/allocator_zookeeper_tests.cpp
index d03a564..31f4dc6 100644
--- a/src/tests/allocator_zookeeper_tests.cpp
+++ b/src/tests/allocator_zookeeper_tests.cpp
@@ -22,16 +22,15 @@
#include <process/gmock.hpp>
#include <process/message.hpp>
-#include "detector/detector.hpp"
+#include <stout/option.hpp>
+#include <stout/try.hpp>
#include "master/allocator.hpp"
#include "master/master.hpp"
-#include "tests/utils.hpp"
+#include "tests/mesos.hpp"
#include "tests/zookeeper.hpp"
-#include "zookeeper/url.hpp"
-
using namespace mesos;
using namespace mesos::internal;
using namespace mesos::internal::tests;
@@ -45,7 +44,6 @@ using mesos::internal::slave::Slave;
using process::Future;
using process::PID;
-using std::map;
using std::string;
using std::vector;
@@ -53,20 +51,49 @@ using testing::_;
using testing::AtMost;
using testing::DoAll;
using testing::DoDefault;
-using testing::Eq;
-using testing::Return;
-using testing::SaveArg;
template <typename T = AllocatorProcess>
-class AllocatorZooKeeperTest : public ZooKeeperTest
+class AllocatorZooKeeperTest : public MesosTest
{
+public:
+ static void SetUpTestCase()
+ {
+ // Make sure the JVM is created.
+ ZooKeeperTest::SetUpTestCase();
+
+ // Launch the ZooKeeper test server.
+ server = new ZooKeeperTestServer();
+ server->startNetwork();
+
+ Try<zookeeper::URL> parse = zookeeper::URL::parse(
+ "zk://" + server->connectString() + "/znode");
+ ASSERT_SOME(parse);
+
+ url = parse.get();
+ }
+
+ static void TearDownTestCase()
+ {
+ delete server;
+ server = NULL;
+ }
+
protected:
- T allocator1;
- MockAllocatorProcess<T> allocator2;
+ AllocatorZooKeeperTest() : MesosTest(url) {}
+
+ static ZooKeeperTestServer* server;
+ static Option<zookeeper::URL> url;
};
+template <typename T>
+ZooKeeperTestServer* AllocatorZooKeeperTest<T>::server = NULL;
+
+template <typename T>
+Option<zookeeper::URL> AllocatorZooKeeperTest<T>::url;
+
+
// Runs TYPED_TEST(AllocatorZooKeeperTest, ...) on all AllocatorTypes.
TYPED_TEST_CASE(AllocatorZooKeeperTest, AllocatorTypes);
@@ -77,25 +104,22 @@ TYPED_TEST_CASE(AllocatorZooKeeperTest, AllocatorTypes);
// accounted for correctly.
TYPED_TEST(AllocatorZooKeeperTest, FrameworkReregistersFirst)
{
- string zk = "zk://" + this->server->connectString() + "/znode";
- Try<zookeeper::URL> url = zookeeper::URL::parse(zk);
- ASSERT_SOME(url);
+ TypeParam allocator1;
- Cluster cluster(url.get());
-
- Try<PID<Master> > master = cluster.masters.start(&this->allocator1);
+ Try<PID<Master> > master = this->StartMaster(&allocator1);
ASSERT_SOME(master);
- MockExecutor exec;
- TestingIsolator isolator(DEFAULT_EXECUTOR_ID, &exec);
- slave::Flags flags = cluster.slaves.flags;
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
+
+ slave::Flags flags = this->CreateSlaveFlags();
flags.resources = Option<string>("cpus:2;mem:1024");
- Try<PID<Slave> > slave = cluster.slaves.start(flags, &isolator);
+ Try<PID<Slave> > slave = this->StartSlave(&exec, flags);
ASSERT_SOME(slave);
MockScheduler sched;
- MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, zk);
+ MesosSchedulerDriver driver(
+ &sched, DEFAULT_FRAMEWORK_INFO, stringify(this->url.get()));
Future<Nothing> registered;
EXPECT_CALL(sched, registered(&driver, _, _))
@@ -141,32 +165,34 @@ TYPED_TEST(AllocatorZooKeeperTest, FrameworkReregistersFirst)
// Stop the failing master from telling the slave to shut down when
// it is killed.
- Future<process::Message> shutdownMsg =
- DROP_MESSAGE(Eq(ShutdownMessage().GetTypeName()), _, _);
+ Future<ShutdownMessage> shutdownMessage =
+ DROP_PROTOBUF(ShutdownMessage(), _, _);
// Stop the slave from reregistering with the new master until the
// framework has reregistered.
- DROP_MESSAGES(Eq(ReregisterSlaveMessage().GetTypeName()), _, _);
+ DROP_PROTOBUFS(ReregisterSlaveMessage(), _, _);
- cluster.masters.shutdown();
+ this->ShutdownMasters();
- AWAIT_READY(shutdownMsg);
+ AWAIT_READY(shutdownMessage);
- EXPECT_CALL(this->allocator2, initialize(_, _));
+ MockAllocatorProcess<TypeParam> allocator2;
- Try<PID<Master> > master2 = cluster.masters.start(&this->allocator2);
+ EXPECT_CALL(allocator2, initialize(_, _));
+
+ Try<PID<Master> > master2 = this->StartMaster(&allocator2);
ASSERT_SOME(master2);
Future<Nothing> frameworkAdded;
- EXPECT_CALL(this->allocator2, frameworkAdded(_, _, _))
- .WillOnce(DoAll(InvokeFrameworkAdded(&this->allocator2),
+ EXPECT_CALL(allocator2, frameworkAdded(_, _, _))
+ .WillOnce(DoAll(InvokeFrameworkAdded(&allocator2),
FutureSatisfy(&frameworkAdded)));
EXPECT_CALL(sched, reregistered(&driver, _));
AWAIT_READY(frameworkAdded);
- EXPECT_CALL(this->allocator2, slaveAdded(_, _, _));
+ EXPECT_CALL(allocator2, slaveAdded(_, _, _));
Future<vector<Offer> > resourceOffers2;
EXPECT_CALL(sched, resourceOffers(&driver, _))
@@ -184,13 +210,13 @@ TYPED_TEST(AllocatorZooKeeperTest, FrameworkReregistersFirst)
EXPECT_THAT(resourceOffers2.get(), OfferEq(1, 524));
// Shut everything down.
- EXPECT_CALL(this->allocator2, resourcesRecovered(_, _, _))
+ EXPECT_CALL(allocator2, resourcesRecovered(_, _, _))
.WillRepeatedly(DoDefault());
- EXPECT_CALL(this->allocator2, frameworkDeactivated(_));
+ EXPECT_CALL(allocator2, frameworkDeactivated(_));
Future<Nothing> frameworkRemoved;
- EXPECT_CALL(this->allocator2, frameworkRemoved(_))
+ EXPECT_CALL(allocator2, frameworkRemoved(_))
.WillOnce(FutureSatisfy(&frameworkRemoved));
EXPECT_CALL(exec, shutdown(_))
@@ -201,10 +227,10 @@ TYPED_TEST(AllocatorZooKeeperTest, FrameworkReregistersFirst)
AWAIT_READY(frameworkRemoved);
- EXPECT_CALL(this->allocator2, slaveRemoved(_))
+ EXPECT_CALL(allocator2, slaveRemoved(_))
.Times(AtMost(1));
- cluster.shutdown();
+ this->Shutdown();
}
@@ -214,25 +240,22 @@ TYPED_TEST(AllocatorZooKeeperTest, FrameworkReregistersFirst)
// accounted for correctly.
TYPED_TEST(AllocatorZooKeeperTest, SlaveReregistersFirst)
{
- string zk = "zk://" + this->server->connectString() + "/znode";
- Try<zookeeper::URL> url = zookeeper::URL::parse(zk);
- ASSERT_SOME(url);
-
- Cluster cluster(url.get());
+ TypeParam allocator1;
- Try<PID<Master> > master = cluster.masters.start(&this->allocator1);
+ Try<PID<Master> > master = this->StartMaster(&allocator1);
ASSERT_SOME(master);
- MockExecutor exec;
- TestingIsolator isolator(DEFAULT_EXECUTOR_ID, &exec);
- slave::Flags flags = cluster.slaves.flags;
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
+
+ slave::Flags flags = this->CreateSlaveFlags();
flags.resources = Option<string>("cpus:2;mem:1024");
- Try<PID<Slave> > slave = cluster.slaves.start(flags, &isolator);
+ Try<PID<Slave> > slave = this->StartSlave(&exec, flags);
ASSERT_SOME(slave);
MockScheduler sched;
- MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO,zk);
+ MesosSchedulerDriver driver(
+ &sched, DEFAULT_FRAMEWORK_INFO, stringify(this->url.get()));
Future<Nothing> registered;
EXPECT_CALL(sched, registered(&driver, _, _))
@@ -278,32 +301,34 @@ TYPED_TEST(AllocatorZooKeeperTest, SlaveReregistersFirst)
// Stop the failing master from telling the slave to shut down when
// it is killed.
- Future<process::Message> shutdownMsg =
- DROP_MESSAGE(Eq(ShutdownMessage().GetTypeName()), _, _);
+ Future<ShutdownMessage> shutdownMessage =
+ DROP_PROTOBUF(ShutdownMessage(), _, _);
// Stop the framework from reregistering with the new master until the
// slave has reregistered.
- DROP_MESSAGES(Eq(ReregisterFrameworkMessage().GetTypeName()), _, _);
+ DROP_PROTOBUFS(ReregisterFrameworkMessage(), _, _);
+
+ this->ShutdownMasters();
- cluster.masters.shutdown();
+ AWAIT_READY(shutdownMessage);
- AWAIT_READY(shutdownMsg);
+ MockAllocatorProcess<TypeParam> allocator2;
- EXPECT_CALL(this->allocator2, initialize(_, _));
+ EXPECT_CALL(allocator2, initialize(_, _));
- Try<PID<Master> > master2 = cluster.masters.start(&this->allocator2);
+ Try<PID<Master> > master2 = this->StartMaster(&allocator2);
ASSERT_SOME(master2);
Future<Nothing> slaveAdded;
- EXPECT_CALL(this->allocator2, slaveAdded(_, _, _))
- .WillOnce(DoAll(InvokeSlaveAdded(&this->allocator2),
+ EXPECT_CALL(allocator2, slaveAdded(_, _, _))
+ .WillOnce(DoAll(InvokeSlaveAdded(&allocator2),
FutureSatisfy(&slaveAdded)));
EXPECT_CALL(sched, reregistered(&driver, _));
AWAIT_READY(slaveAdded);
- EXPECT_CALL(this->allocator2, frameworkAdded(_, _, _));
+ EXPECT_CALL(allocator2, frameworkAdded(_, _, _));
Future<vector<Offer> > resourceOffers2;
EXPECT_CALL(sched, resourceOffers(&driver, _))
@@ -321,13 +346,13 @@ TYPED_TEST(AllocatorZooKeeperTest, SlaveReregistersFirst)
EXPECT_THAT(resourceOffers2.get(), OfferEq(1, 524));
// Shut everything down.
- EXPECT_CALL(this->allocator2, resourcesRecovered(_, _, _))
+ EXPECT_CALL(allocator2, resourcesRecovered(_, _, _))
.WillRepeatedly(DoDefault());
- EXPECT_CALL(this->allocator2, frameworkDeactivated(_));
+ EXPECT_CALL(allocator2, frameworkDeactivated(_));
Future<Nothing> frameworkRemoved;
- EXPECT_CALL(this->allocator2, frameworkRemoved(_))
+ EXPECT_CALL(allocator2, frameworkRemoved(_))
.WillOnce(FutureSatisfy(&frameworkRemoved));
EXPECT_CALL(exec, shutdown(_))
@@ -338,8 +363,8 @@ TYPED_TEST(AllocatorZooKeeperTest, SlaveReregistersFirst)
AWAIT_READY(frameworkRemoved);
- EXPECT_CALL(this->allocator2, slaveRemoved(_))
+ EXPECT_CALL(allocator2, slaveRemoved(_))
.Times(AtMost(1));
- cluster.shutdown();
+ this->Shutdown();
}
http://git-wip-us.apache.org/repos/asf/incubator-mesos/blob/6b1b8208/src/tests/cgroups_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/cgroups_tests.cpp b/src/tests/cgroups_tests.cpp
index db33738..f062fe6 100644
--- a/src/tests/cgroups_tests.cpp
+++ b/src/tests/cgroups_tests.cpp
@@ -35,6 +35,9 @@
#include <gmock/gmock.h>
+#include <process/gtest.hpp>
+
+#include <stout/gtest.hpp>
#include <stout/hashmap.hpp>
#include <stout/option.hpp>
#include <stout/os.hpp>
@@ -44,7 +47,7 @@
#include "linux/cgroups.hpp"
-#include "tests/utils.hpp"
+#include "tests/mesos.hpp" // For TEST_CGROUPS_(HIERARCHY|ROOT).
using namespace mesos::internal::tests;
http://git-wip-us.apache.org/repos/asf/incubator-mesos/blob/6b1b8208/src/tests/cluster.hpp
----------------------------------------------------------------------
diff --git a/src/tests/cluster.hpp b/src/tests/cluster.hpp
index 682b7d6..25cd554 100644
--- a/src/tests/cluster.hpp
+++ b/src/tests/cluster.hpp
@@ -43,10 +43,9 @@
#include "slave/flags.hpp"
#include "slave/isolator.hpp"
+#include "slave/process_isolator.hpp"
#include "slave/slave.hpp"
-#include "tests/isolator.hpp" // For TestingIsolator.
-
#include "zookeeper/url.hpp"
namespace mesos {
@@ -56,7 +55,6 @@ namespace tests {
class Cluster
{
public:
- // TODO(benh): Take flags and make const in Masters and Slaves.
Cluster(const Option<zookeeper::URL>& url = None())
: masters(this, url),
slaves(this, &masters) {}
@@ -70,23 +68,17 @@ public:
void shutdown();
- // Start and manage a new master.
- Try<process::PID<master::Master> > start();
-
// Start and manage a new master using the specified flags.
- Try<process::PID<master::Master> > start(const master::Flags& flags);
-
- // Start and manage a new master injecting the specified allocator
- // process. The allocator process is expected to outlive the
- // launched master (i.e., until it is stopped via Masters::stop).
Try<process::PID<master::Master> > start(
- master::AllocatorProcess* allocatorProcess);
+ const master::Flags& flags = master::Flags());
- // Start and manage a new master using the specified flags
- // and injecting the allocator process as above.
+ // Start and manage a new master injecting the specified allocator
+ // process and using the specified flags. The allocator process is
+ // expected to outlive the launched master (i.e., until it is
+ // stopped via Masters::stop).
Try<process::PID<master::Master> > start(
master::AllocatorProcess* allocatorProcess,
- const master::Flags& flags);
+ const master::Flags& flags = master::Flags());
// Stops and cleans up a master at the specified PID.
Try<Nothing> stop(const process::PID<master::Master>& pid);
@@ -94,10 +86,7 @@ public:
// Returns a new master detector for this instance of masters.
Owned<MasterDetector> detector(
const process::PID<slave::Slave>& pid,
- bool quiet);
-
- // "Default" flags used for creating masters.
- master::Flags flags;
+ const slave::Flags& flags);
private:
// Not copyable, not assignable.
@@ -107,6 +96,7 @@ public:
Cluster* cluster; // Enclosing class.
Option<zookeeper::URL> url;
+ // Encapsulates a single master's dependencies.
struct Master
{
Master()
@@ -134,42 +124,24 @@ public:
// Stop and clean up all slaves.
void shutdown();
- // Start and manage a new slave.
- Try<process::PID<slave::Slave> > start();
-
- // Start and manage a new slave using the specified flags.
- Try<process::PID<slave::Slave> > start(const slave::Flags& flags);
-
- // Start and manage a new slave with a testing isolator that uses
- // the specified executor for the specified ID. The executor is
- // expected to outlive the launched slave (i.e., until it is
- // stopped via Slaves::stop).
+ // Start and manage a new slave with a process isolator using the
+ // specified flags.
Try<process::PID<slave::Slave> > start(
- const ExecutorID& executorId,
- Executor* executor);
-
- // Start and manage a new slave using the specified flags with a
- // testing isolator that uses the specified executor for the
- // specified ID. The executor is expected to outlive the launched
- // slave (i.e., until it is stopped via Slaves::stop).
- Try<process::PID<slave::Slave> > start(
- const slave::Flags& flags,
- const ExecutorID& executorId,
- Executor* executor);
+ const slave::Flags& flags = slave::Flags());
// Start and manage a new slave injecting the specified isolator.
// The isolator is expected to outlive the launched slave (i.e.,
// until it is stopped via Slaves::stop).
- Try<process::PID<slave::Slave> > start(slave::Isolator* isolator);
Try<process::PID<slave::Slave> > start(
- const slave::Flags& flags,
- slave::Isolator* isolator);
+ slave::Isolator* isolator,
+ const slave::Flags& flags = slave::Flags());
- // Stops and cleans up a slave at the specified PID.
- Try<Nothing> stop(const process::PID<slave::Slave>& pid);
-
- // "Default" flags used for creating slaves.
- slave::Flags flags;
+ // Stops and cleans up a slave at the specified PID. If 'shutdown'
+ // is true than the slave is sent a shutdown message instead of
+ // being terminated.
+ Try<Nothing> stop(
+ const process::PID<slave::Slave>& pid,
+ bool shutdown = false);
private:
// Not copyable, not assignable.
@@ -179,15 +151,16 @@ public:
Cluster* cluster; // Enclosing class.
Masters* masters; // Used to create MasterDetector instances.
+ // Encapsulates a single slave's dependencies.
struct Slave
{
Slave()
- : slave(NULL),
- isolator(NULL),
+ : isolator(NULL),
+ slave(NULL),
detector(NULL) {}
- slave::Slave* slave;
slave::Isolator* isolator;
+ slave::Slave* slave;
Owned<MasterDetector> detector;
};
@@ -238,32 +211,6 @@ inline void Cluster::Masters::shutdown()
}
-inline Try<process::PID<master::Master> > Cluster::Masters::start()
-{
- // Disallow multiple masters when not using ZooKeeper.
- if (!masters.empty() && url.isNone()) {
- return Error("Can not start multiple masters when not using ZooKeeper");
- }
-
- Master master;
- master.allocatorProcess = new master::HierarchicalDRFAllocatorProcess();
- master.allocator = new master::Allocator(master.allocatorProcess);
- master.master = new master::Master(master.allocator, &cluster->files, flags);
-
- process::PID<master::Master> pid = process::spawn(master.master);
-
- if (url.isSome()) {
- master.detector = new ZooKeeperMasterDetector(url.get(), pid, true, true);
- } else {
- master.detector = new BasicMasterDetector(pid);
- }
-
- masters[pid] = master;
-
- return pid;
-}
-
-
inline Try<process::PID<master::Master> > Cluster::Masters::start(
const master::Flags& flags)
{
@@ -273,6 +220,7 @@ inline Try<process::PID<master::Master> > Cluster::Masters::start(
}
Master master;
+
master.allocatorProcess = new master::HierarchicalDRFAllocatorProcess();
master.allocator = new master::Allocator(master.allocatorProcess);
master.master = new master::Master(master.allocator, &cluster->files, flags);
@@ -292,13 +240,6 @@ inline Try<process::PID<master::Master> > Cluster::Masters::start(
inline Try<process::PID<master::Master> > Cluster::Masters::start(
- master::AllocatorProcess* allocatorProcess)
-{
- return Cluster::Masters::start(allocatorProcess, flags);
-}
-
-
-inline Try<process::PID<master::Master> > Cluster::Masters::start(
master::AllocatorProcess* allocatorProcess,
const master::Flags& flags)
{
@@ -339,11 +280,8 @@ inline Try<Nothing> Cluster::Masters::stop(
process::wait(master.master);
delete master.master;
- delete master.allocator; // Terminates and waits for the allocator process.
-
- if (master.allocatorProcess != NULL) {
- delete master.allocatorProcess;
- }
+ delete master.allocator; // Terminates and waits for allocator process.
+ delete master.allocatorProcess; // May be NULL.
delete master.detector;
@@ -355,10 +293,10 @@ inline Try<Nothing> Cluster::Masters::stop(
inline Owned<MasterDetector> Cluster::Masters::detector(
const process::PID<slave::Slave>& pid,
- bool quiet)
+ const slave::Flags& flags)
{
if (url.isSome()) {
- return new ZooKeeperMasterDetector(url.get(), pid, false, quiet);
+ return new ZooKeeperMasterDetector(url.get(), pid, false, flags.quiet);
}
CHECK(masters.size() == 1);
@@ -387,52 +325,22 @@ inline void Cluster::Slaves::shutdown()
}
-inline Try<process::PID<slave::Slave> > Cluster::Slaves::start()
-{
- // TODO(benh): Check that we dont have another slave already running
- // with flags that conflict (e.g., work_dir).
-
- Slave slave;
-
- slave.isolator = new TestingIsolator();
-
- process::spawn(slave.isolator);
-
- // TODO(benh): Create a work directory for each slave.
-
- slave.slave = new slave::Slave(flags, true, slave.isolator, &cluster->files);
-
- process::PID<slave::Slave> pid = process::spawn(slave.slave);
-
- // Get a detector for the master(s).
- slave.detector = masters->detector(pid, flags.quiet);
-
- slaves[pid] = slave;
-
- return pid;
-}
-
-
inline Try<process::PID<slave::Slave> > Cluster::Slaves::start(
const slave::Flags& flags)
{
- // TODO(benh): Check that we dont have another slave already running
- // with flags that conflict (e.g., work_dir).
+ // TODO(benh): Create a work directory if using the default.
Slave slave;
- slave.isolator = new TestingIsolator();
-
+ // Create a new process isolator for this slave.
+ slave.isolator = new slave::ProcessIsolator();
process::spawn(slave.isolator);
- // TODO(benh): Create a work directory for each slave.
-
slave.slave = new slave::Slave(flags, true, slave.isolator, &cluster->files);
-
process::PID<slave::Slave> pid = process::spawn(slave.slave);
// Get a detector for the master(s).
- slave.detector = masters->detector(pid, flags.quiet);
+ slave.detector = masters->detector(pid, flags);
slaves[pid] = slave;
@@ -441,82 +349,18 @@ inline Try<process::PID<slave::Slave> > Cluster::Slaves::start(
inline Try<process::PID<slave::Slave> > Cluster::Slaves::start(
- const ExecutorID& executorId,
- Executor* executor)
-{
- return start(flags, executorId, executor);
-}
-
-
-inline Try<process::PID<slave::Slave> > Cluster::Slaves::start(
- const slave::Flags& flags,
- const ExecutorID& executorId,
- Executor* executor)
-{
- // TODO(benh): Check that we dont have another slave already running
- // with flags that conflict (e.g., work_dir).
-
- Slave slave;
-
- slave.isolator = new TestingIsolator(executorId, executor);
-
- process::spawn(slave.isolator);
-
- // TODO(benh): Create a work directory for each slave.
-
- slave.slave = new slave::Slave(flags, true, slave.isolator, &cluster->files);
-
- process::PID<slave::Slave> pid = process::spawn(slave.slave);
-
- // Get a detector for the master(s).
- slave.detector = masters->detector(pid, flags.quiet);
-
- slaves[pid] = slave;
-
- return pid;
-}
-
-
-inline Try<process::PID<slave::Slave> > Cluster::Slaves::start(
- slave::Isolator* isolator)
-{
- // TODO(benh): Check that we dont have another slave already running
- // with flags that conflict (e.g., work_dir).
-
- Slave slave;
-
- // TODO(benh): Create a work directory for each slave.
-
- slave.slave = new slave::Slave(flags, true, isolator, &cluster->files);
-
- process::PID<slave::Slave> pid = process::spawn(slave.slave);
-
- // Get a detector for the master(s).
- slave.detector = masters->detector(pid, flags.quiet);
-
- slaves[pid] = slave;
-
- return pid;
-}
-
-
-inline Try<process::PID<slave::Slave> > Cluster::Slaves::start(
- const slave::Flags& flags,
- slave::Isolator* isolator)
+ slave::Isolator* isolator,
+ const slave::Flags& flags)
{
- // TODO(benh): Check that we dont have another slave already running
- // with flags that conflict (e.g., work_dir).
+ // TODO(benh): Create a work directory if using the default.
Slave slave;
- // TODO(benh): Create a work directory for each slave.
-
slave.slave = new slave::Slave(flags, true, isolator, &cluster->files);
-
process::PID<slave::Slave> pid = process::spawn(slave.slave);
// Get a detector for the master(s).
- slave.detector = masters->detector(pid, flags.quiet);
+ slave.detector = masters->detector(pid, flags);
slaves[pid] = slave;
@@ -525,7 +369,8 @@ inline Try<process::PID<slave::Slave> > Cluster::Slaves::start(
inline Try<Nothing> Cluster::Slaves::stop(
- const process::PID<slave::Slave>& pid)
+ const process::PID<slave::Slave>& pid,
+ bool shutdown)
{
if (slaves.count(pid) == 0) {
return Error("No slave found to stop");
@@ -533,17 +378,15 @@ inline Try<Nothing> Cluster::Slaves::stop(
Slave slave = slaves[pid];
- process::terminate(slave.slave);
+ if (shutdown) {
+ process::dispatch(slave.slave, &slave::Slave::shutdown);
+ } else {
+ process::terminate(slave.slave);
+ }
process::wait(slave.slave);
delete slave.slave;
- if (slave.isolator != NULL) {
- // TODO(benh): Terminate and wait for the isolator once the slave
- // is no longer doing so.
- // process::terminate(slave.isolator);
- // process::wait(slave.isolator);
- delete slave.isolator;
- }
+ delete slave.isolator; // May be NULL.
slaves.erase(pid);
http://git-wip-us.apache.org/repos/asf/incubator-mesos/blob/6b1b8208/src/tests/configurator_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/configurator_tests.cpp b/src/tests/configurator_tests.cpp
index e8ba936..b82f5c2 100644
--- a/src/tests/configurator_tests.cpp
+++ b/src/tests/configurator_tests.cpp
@@ -18,17 +18,13 @@
#include <gtest/gtest.h>
-#include <fstream>
-
-#include <boost/lexical_cast.hpp>
+#include <stout/gtest.hpp>
+#include <stout/os.hpp>
#include "configurator/configurator.hpp"
#include "tests/utils.hpp"
-using boost::lexical_cast;
-
-using std::ofstream;
using std::string;
using namespace mesos;
@@ -36,9 +32,7 @@ using namespace mesos::internal;
using namespace mesos::internal::tests;
-class ConfiguratorTest : public TemporaryDirectoryTest
-{
-};
+class ConfiguratorTest : public TemporaryDirectoryTest {};
TEST_F(ConfiguratorTest, Environment)
http://git-wip-us.apache.org/repos/asf/incubator-mesos/blob/6b1b8208/src/tests/exception_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/exception_tests.cpp b/src/tests/exception_tests.cpp
index 911e786..60bb690 100644
--- a/src/tests/exception_tests.cpp
+++ b/src/tests/exception_tests.cpp
@@ -22,17 +22,14 @@
#include <mesos/scheduler.hpp>
#include <process/gmock.hpp>
-
-#include "detector/detector.hpp"
+#include <process/pid.hpp>
+#include <process/process.hpp>
#include "local/local.hpp"
#include "master/master.hpp"
-#include "slave/process_isolator.hpp"
-#include "slave/slave.hpp"
-
-#include "tests/utils.hpp"
+#include "tests/mesos.hpp"
using namespace mesos;
using namespace mesos::internal;
@@ -40,8 +37,6 @@ using namespace mesos::internal::tests;
using mesos::internal::master::Master;
-using mesos::internal::slave::Slave;
-
using process::Future;
using process::PID;
@@ -50,14 +45,9 @@ using std::map;
using std::vector;
using testing::_;
-using testing::AnyOf;
using testing::AtMost;
-using testing::DoAll;
-using testing::ElementsAre;
using testing::Eq;
-using testing::Not;
using testing::Return;
-using testing::SaveArg;
TEST(ExceptionTest, DeactivateFrameworkOnAbort)
http://git-wip-us.apache.org/repos/asf/incubator-mesos/blob/6b1b8208/src/tests/fault_tolerance_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/fault_tolerance_tests.cpp b/src/tests/fault_tolerance_tests.cpp
index bcfe5db..e41a044 100644
--- a/src/tests/fault_tolerance_tests.cpp
+++ b/src/tests/fault_tolerance_tests.cpp
@@ -38,26 +38,21 @@
#include "common/protobuf_utils.hpp"
-#include "detector/detector.hpp"
-
#include "local/local.hpp"
-#include "master/allocator.hpp"
-#include "master/hierarchical_allocator_process.hpp"
#include "master/master.hpp"
#include "slave/isolator.hpp"
#include "slave/slave.hpp"
-#include "tests/utils.hpp"
+#include "tests/isolator.hpp"
+#include "tests/mesos.hpp"
using namespace mesos;
using namespace mesos::internal;
using namespace mesos::internal::protobuf;
using namespace mesos::internal::tests;
-using mesos::internal::master::Allocator;
-using mesos::internal::master::HierarchicalDRFAllocatorProcess;
using mesos::internal::master::Master;
using mesos::internal::slave::Isolator;
@@ -92,21 +87,14 @@ class FaultToleranceTest : public MesosTest {};
// its offer(s) is rescinded.
TEST_F(FaultToleranceTest, SlaveLost)
{
- HierarchicalDRFAllocatorProcess allocator;
- Allocator a(&allocator);
- Files files;
- Master m(&a, &files);
- PID<Master> master = process::spawn(&m);
-
- TestingIsolator isolator;
-
- Slave s(slaveFlags, true, &isolator, &files);
- PID<Slave> slave = process::spawn(&s);
+ Try<PID<Master> > master = StartMaster();
+ ASSERT_SOME(master);
- BasicMasterDetector detector(master, slave, true);
+ Try<PID<Slave> > slave = StartSlave();
+ ASSERT_SOME(slave);
MockScheduler sched;
- MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master);
+ MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master.get());
EXPECT_CALL(sched, registered(&driver, _, _));
@@ -128,7 +116,7 @@ TEST_F(FaultToleranceTest, SlaveLost)
EXPECT_CALL(sched, slaveLost(&driver, offers.get()[0].slave_id()))
.WillOnce(FutureSatisfy(&slaveLost));
- process::terminate(slave);
+ ShutdownSlaves();
AWAIT_READY(offerRescinded);
AWAIT_READY(slaveLost);
@@ -136,10 +124,7 @@ TEST_F(FaultToleranceTest, SlaveLost)
driver.stop();
driver.join();
- process::wait(slave);
-
- process::terminate(master);
- process::wait(master);
+ Shutdown();
}
@@ -206,9 +191,6 @@ TEST_F(FaultToleranceTest, PartitionedSlave)
}
-// TODO(bmahler): Remove this when all the tests are refactored.
-class FaultToleranceClusterTest : public MesosClusterTest {};
-
// The purpose of this test is to ensure that when slaves are removed
// from the master, and then attempt to re-register, we deny the
// re-registration by sending a ShutdownMessage to the slave.
@@ -218,9 +200,9 @@ class FaultToleranceClusterTest : public MesosClusterTest {};
// re-register with its running tasks. We've already notified
// frameworks that these tasks were LOST, so we have to have the slave
// slave shut down.
-TEST_F(FaultToleranceClusterTest, PartitionedSlaveReregistration)
+TEST_F(FaultToleranceTest, PartitionedSlaveReregistration)
{
- Try<PID<Master> > master = cluster.masters.start();
+ Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
// Allow the master to PING the slave, but drop all PONG messages
@@ -230,8 +212,9 @@ TEST_F(FaultToleranceClusterTest, PartitionedSlaveReregistration)
Future<Message> ping = FUTURE_MESSAGE(Eq("PING"), _, _);
DROP_MESSAGES(Eq("PONG"), _, _);
- MockExecutor exec;
- Try<PID<Slave> > slave = cluster.slaves.start(DEFAULT_EXECUTOR_ID, &exec);
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
+
+ Try<PID<Slave> > slave = StartSlave(&exec);
ASSERT_SOME(slave);
MockScheduler sched;
@@ -355,7 +338,7 @@ TEST_F(FaultToleranceClusterTest, PartitionedSlaveReregistration)
driver.stop();
driver.join();
- cluster.shutdown();
+ Shutdown();
}
@@ -367,9 +350,9 @@ TEST_F(FaultToleranceClusterTest, PartitionedSlaveReregistration)
// the slave may attempt to send updates if it was unaware that the
// master deactivated it. We've already notified frameworks that these
// tasks were LOST, so we have to have the slave shut down.
-TEST_F(FaultToleranceClusterTest, PartitionedSlaveStatusUpdates)
+TEST_F(FaultToleranceTest, PartitionedSlaveStatusUpdates)
{
- Try<PID<Master> > master = cluster.masters.start();
+ Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
// Allow the master to PING the slave, but drop all PONG messages
@@ -382,8 +365,9 @@ TEST_F(FaultToleranceClusterTest, PartitionedSlaveStatusUpdates)
Future<SlaveRegisteredMessage> slaveRegisteredMessage =
FUTURE_PROTOBUF(SlaveRegisteredMessage(), _, _);
- MockExecutor exec;
- Try<PID<Slave> > slave = cluster.slaves.start(DEFAULT_EXECUTOR_ID, &exec);
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
+
+ Try<PID<Slave> > slave = StartSlave(&exec);
ASSERT_SOME(slave);
AWAIT_READY(slaveRegisteredMessage);
@@ -466,7 +450,7 @@ TEST_F(FaultToleranceClusterTest, PartitionedSlaveStatusUpdates)
driver.stop();
driver.join();
- cluster.shutdown();
+ Shutdown();
}
@@ -479,9 +463,9 @@ TEST_F(FaultToleranceClusterTest, PartitionedSlaveStatusUpdates)
// it was unaware that the master deactivated it. We've already
// notified frameworks that the tasks under the executors were LOST,
// so we have to have the slave shut down.
-TEST_F(FaultToleranceClusterTest, PartitionedSlaveExitedExecutor)
+TEST_F(FaultToleranceTest, PartitionedSlaveExitedExecutor)
{
- Try<PID<Master> > master = cluster.masters.start();
+ Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
// Allow the master to PING the slave, but drop all PONG messages
@@ -491,10 +475,10 @@ TEST_F(FaultToleranceClusterTest, PartitionedSlaveExitedExecutor)
Future<Message> ping = FUTURE_MESSAGE(Eq("PING"), _, _);
DROP_MESSAGES(Eq("PONG"), _, _);
- MockExecutor exec;
- TestingIsolator* isolator = new TestingIsolator(DEFAULT_EXECUTOR_ID, &exec);
- process::spawn(isolator);
- Try<PID<Slave> > slave = cluster.slaves.start(isolator);
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
+ TestingIsolator isolator(&exec);
+
+ Try<PID<Slave> > slave = StartSlave(&isolator);
ASSERT_SOME(slave);
MockScheduler sched;
@@ -602,21 +586,15 @@ TEST_F(FaultToleranceClusterTest, PartitionedSlaveExitedExecutor)
driver.stop();
driver.join();
- cluster.shutdown();
-
- // TODO(benh): Terminate and wait for the isolator once the slave
- // is no longer doing so.
- // process::terminate(isolator);
- // process::wait(isolator);
- delete isolator;
+ Shutdown();
}
// This test ensures that a framework connecting with a
// failed over master gets a re-registered callback.
-TEST_F(FaultToleranceClusterTest, MasterFailover)
+TEST_F(FaultToleranceTest, MasterFailover)
{
- Try<PID<Master> > master = cluster.masters.start();
+ Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
MockScheduler sched;
@@ -632,10 +610,12 @@ TEST_F(FaultToleranceClusterTest, MasterFailover)
AWAIT_READY(frameworkRegisteredMessage);
// Simulate failed over master by restarting the master.
- ASSERT_SOME(cluster.masters.stop(master.get()));
- master = cluster.masters.start();
+ Stop(master.get());
+ master = StartMaster();
ASSERT_SOME(master);
+ EXPECT_CALL(sched, disconnected(&driver));
+
Future<Nothing> reregistered;
EXPECT_CALL(sched, reregistered(&driver, _))
.WillOnce(FutureSatisfy(&reregistered));
@@ -652,7 +632,7 @@ TEST_F(FaultToleranceClusterTest, MasterFailover)
driver.stop();
driver.join();
- cluster.shutdown();
+ Shutdown();
}
@@ -818,21 +798,14 @@ TEST_F(FaultToleranceTest, FrameworkReregister)
TEST_F(FaultToleranceTest, TaskLost)
{
- HierarchicalDRFAllocatorProcess allocator;
- Allocator a(&allocator);
- Files files;
- Master m(&a, &files);
- PID<Master> master = process::spawn(&m);
-
- TestingIsolator isolator;
-
- Slave s(slaveFlags, true, &isolator, &files);
- PID<Slave> slave = process::spawn(&s);
+ Try<PID<Master> > master = StartMaster();
+ ASSERT_SOME(master);
- BasicMasterDetector detector(master, slave, true);
+ Try<PID<Slave> > slave = StartSlave();
+ ASSERT_SOME(slave);
MockScheduler sched;
- MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master);
+ MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master.get());
EXPECT_CALL(sched, registered(&driver, _, _));
@@ -882,11 +855,7 @@ TEST_F(FaultToleranceTest, TaskLost)
driver.stop();
driver.join();
- process::terminate(slave);
- process::wait(slave);
-
- process::terminate(master);
- process::wait(master);
+ Shutdown();
}
@@ -896,23 +865,17 @@ TEST_F(FaultToleranceTest, SchedulerFailoverStatusUpdate)
{
Clock::pause();
- HierarchicalDRFAllocatorProcess allocator;
- Allocator a(&allocator);
- Files files;
- Master m(&a, &files);
- PID<Master> master = process::spawn(&m);
-
- MockExecutor exec;
- TestingIsolator isolator(DEFAULT_EXECUTOR_ID, &exec);
+ Try<PID<Master> > master = StartMaster();
+ ASSERT_SOME(master);
- Slave s(slaveFlags, true, &isolator, &files);
- PID<Slave> slave = process::spawn(&s);
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
- BasicMasterDetector detector(master, slave, true);
+ Try<PID<Slave> > slave = StartSlave(&exec);
+ ASSERT_SOME(slave);
// Launch the first (i.e., failing) scheduler.
MockScheduler sched1;
- MesosSchedulerDriver driver1(&sched1, DEFAULT_FRAMEWORK_INFO, master);
+ MesosSchedulerDriver driver1(&sched1, DEFAULT_FRAMEWORK_INFO, master.get());
FrameworkID frameworkId;
EXPECT_CALL(sched1, registered(&driver1, _, _))
@@ -948,7 +911,7 @@ TEST_F(FaultToleranceTest, SchedulerFailoverStatusUpdate)
// Drop the first status update message
// between master and the scheduler.
Future<StatusUpdateMessage> statusUpdateMessage =
- DROP_PROTOBUF(StatusUpdateMessage(), _, Not(AnyOf(Eq(master), Eq(slave))));
+ DROP_PROTOBUF(StatusUpdateMessage(), _, Not(AnyOf(Eq(master.get()), Eq(slave.get()))));
driver1.launchTasks(offers.get()[0].id(), tasks);
@@ -964,7 +927,7 @@ TEST_F(FaultToleranceTest, SchedulerFailoverStatusUpdate)
framework2 = DEFAULT_FRAMEWORK_INFO;
framework2.mutable_id()->MergeFrom(frameworkId);
- MesosSchedulerDriver driver2(&sched2, framework2, master);
+ MesosSchedulerDriver driver2(&sched2, framework2, master.get());
Future<Nothing> registered2;
EXPECT_CALL(sched2, registered(&driver2, frameworkId, _))
@@ -999,11 +962,7 @@ TEST_F(FaultToleranceTest, SchedulerFailoverStatusUpdate)
AWAIT_READY(shutdown); // Ensures MockExecutor can be deallocated.
- process::terminate(slave);
- process::wait(slave);
-
- process::terminate(master);
- process::wait(master);
+ Shutdown();
Clock::resume();
}
@@ -1011,22 +970,16 @@ TEST_F(FaultToleranceTest, SchedulerFailoverStatusUpdate)
TEST_F(FaultToleranceTest, ForwardStatusUpdateUnknownExecutor)
{
- HierarchicalDRFAllocatorProcess allocator;
- Allocator a(&allocator);
- Files files;
- Master m(&a, &files);
- PID<Master> master = process::spawn(&m);
-
- MockExecutor exec;
- TestingIsolator isolator(DEFAULT_EXECUTOR_ID, &exec);
+ Try<PID<Master> > master = StartMaster();
+ ASSERT_SOME(master);
- Slave s(slaveFlags, true, &isolator, &files);
- PID<Slave> slave = process::spawn(&s);
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
- BasicMasterDetector detector(master, slave, true);
+ Try<PID<Slave> > slave = StartSlave(&exec);
+ ASSERT_SOME(slave);
MockScheduler sched;
- MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master);
+ MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master.get());
FrameworkID frameworkId;
EXPECT_CALL(sched, registered(&driver, _, _))
@@ -1078,7 +1031,7 @@ TEST_F(FaultToleranceTest, ForwardStatusUpdateUnknownExecutor)
StatusUpdate statusUpdate2 = createStatusUpdate(
frameworkId, offer.slave_id(), taskId, TASK_RUNNING, "Dummy update");
- process::dispatch(slave, &Slave::statusUpdate, statusUpdate2);
+ process::dispatch(slave.get(), &Slave::statusUpdate, statusUpdate2);
// Ensure that the scheduler receives task2's update.
AWAIT_READY(status);
@@ -1094,32 +1047,22 @@ TEST_F(FaultToleranceTest, ForwardStatusUpdateUnknownExecutor)
AWAIT_READY(shutdown); // Ensures MockExecutor can be deallocated.
- process::terminate(slave);
- process::wait(slave);
-
- process::terminate(master);
- process::wait(master);
+ Shutdown();
}
TEST_F(FaultToleranceTest, SchedulerFailoverFrameworkMessage)
{
- HierarchicalDRFAllocatorProcess allocator;
- Allocator a(&allocator);
- Files files;
- Master m(&a, &files);
- PID<Master> master = process::spawn(&m);
-
- MockExecutor exec;
- TestingIsolator isolator(DEFAULT_EXECUTOR_ID, &exec);
+ Try<PID<Master> > master = StartMaster();
+ ASSERT_SOME(master);
- Slave s(slaveFlags, true, &isolator, &files);
- PID<Slave> slave = process::spawn(&s);
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
- BasicMasterDetector detector(master, slave, true);
+ Try<PID<Slave> > slave = StartSlave(&exec);
+ ASSERT_SOME(slave);
MockScheduler sched1;
- MesosSchedulerDriver driver1(&sched1, DEFAULT_FRAMEWORK_INFO, master);
+ MesosSchedulerDriver driver1(&sched1, DEFAULT_FRAMEWORK_INFO, master.get());
FrameworkID frameworkId;
EXPECT_CALL(sched1, registered(&driver1, _, _))
@@ -1167,7 +1110,7 @@ TEST_F(FaultToleranceTest, SchedulerFailoverFrameworkMessage)
framework2 = DEFAULT_FRAMEWORK_INFO;
framework2.mutable_id()->MergeFrom(frameworkId);
- MesosSchedulerDriver driver2(&sched2, framework2, master);
+ MesosSchedulerDriver driver2(&sched2, framework2, master.get());
Future<Nothing> registered;
EXPECT_CALL(sched2, registered(&driver2, frameworkId, _))
@@ -1199,34 +1142,23 @@ TEST_F(FaultToleranceTest, SchedulerFailoverFrameworkMessage)
AWAIT_READY(shutdown); // Ensures MockExecutor can be deallocated.
- process::terminate(slave);
- process::wait(slave);
-
- process::terminate(master);
- process::wait(master);
+ Shutdown();
}
// This test checks that a scheduler exit shuts down the executor.
TEST_F(FaultToleranceTest, SchedulerExit)
{
- HierarchicalDRFAllocatorProcess allocator;
- Allocator a(&allocator);
- Files files;
- Master m(&a, &files);
- PID<Master> master = process::spawn(&m);
-
- MockExecutor exec;
-
- TestingIsolator isolator(DEFAULT_EXECUTOR_ID, &exec);
+ Try<PID<Master> > master = StartMaster();
+ ASSERT_SOME(master);
- Slave s(slaveFlags, true, &isolator, &files);
- PID<Slave> slave = process::spawn(&s);
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
- BasicMasterDetector detector(master, slave, true);
+ Try<PID<Slave> > slave = StartSlave(&exec);
+ ASSERT_SOME(slave);
MockScheduler sched;
- MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master);
+ MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master.get());
EXPECT_CALL(sched, registered(&driver, _, _));
@@ -1275,11 +1207,7 @@ TEST_F(FaultToleranceTest, SchedulerExit)
AWAIT_READY(shutdown);
- process::terminate(slave);
- process::wait(slave);
-
- process::terminate(master);
- process::wait(master);
+ Shutdown();
}
@@ -1287,25 +1215,18 @@ TEST_F(FaultToleranceTest, SlaveReliableRegistration)
{
Clock::pause();
- HierarchicalDRFAllocatorProcess allocator;
- Allocator a(&allocator);
- Files files;
- Master m(&a, &files);
- PID<Master> master = process::spawn(&m);
-
- TestingIsolator isolator;
+ Try<PID<Master> > master = StartMaster();
+ ASSERT_SOME(master);
// Drop the first slave registered message, allow subsequent messages.
Future<SlaveRegisteredMessage> slaveRegisteredMessage =
DROP_PROTOBUF(SlaveRegisteredMessage(), _, _);
- Slave s(slaveFlags, true, &isolator, &files);
- PID<Slave> slave = process::spawn(&s);
-
- BasicMasterDetector detector(master, slave, true);
+ Try<PID<Slave> > slave = StartSlave();
+ ASSERT_SOME(slave);
MockScheduler sched;
- MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master);
+ MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master.get());
EXPECT_CALL(sched, registered(&driver, _, _));
@@ -1325,11 +1246,7 @@ TEST_F(FaultToleranceTest, SlaveReliableRegistration)
driver.stop();
driver.join();
- process::terminate(slave);
- process::wait(slave);
-
- process::terminate(master);
- process::wait(master);
+ Shutdown();
Clock::resume();
}
@@ -1337,21 +1254,14 @@ TEST_F(FaultToleranceTest, SlaveReliableRegistration)
TEST_F(FaultToleranceTest, SlaveReregisterOnZKExpiration)
{
- HierarchicalDRFAllocatorProcess allocator;
- Allocator a(&allocator);
- Files files;
- Master m(&a, &files);
- PID<Master> master = process::spawn(&m);
-
- TestingIsolator isolator;
-
- Slave s(slaveFlags, true, &isolator, &files);
- PID<Slave> slave = process::spawn(&s);
+ Try<PID<Master> > master = StartMaster();
+ ASSERT_SOME(master);
- BasicMasterDetector detector(master, slave, true);
+ Try<PID<Slave> > slave = StartSlave();
+ ASSERT_SOME(slave);
MockScheduler sched;
- MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master);
+ MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master.get());
EXPECT_CALL(sched, registered(&driver, _, _));
@@ -1371,44 +1281,32 @@ TEST_F(FaultToleranceTest, SlaveReregisterOnZKExpiration)
// expiration) at the slave.
NewMasterDetectedMessage message;
- message.set_pid(master);
+ message.set_pid(master.get());
- process::post(slave, message);
+ process::post(slave.get(), message);
AWAIT_READY(slaveReregisteredMessage);
driver.stop();
driver.join();
- process::terminate(slave);
- process::wait(slave);
-
- process::terminate(master);
- process::wait(master);
+ Shutdown();
}
// This test verifies that the master sends TASK_LOST updates
// for tasks in the master absent from the re-registered slave.
// We do this by dropping RunTaskMessage from master to the slave.
-// TODO(vinod): Use 'Cluster' abstraction.
TEST_F(FaultToleranceTest, ConsolidateTasksOnSlaveReregistration)
{
- HierarchicalDRFAllocatorProcess allocator;
- Allocator a(&allocator);
- Files files;
- Master m(&a, &files);
- PID<Master> master = process::spawn(&m);
-
- TestingIsolator isolator;
-
- Slave s(slaveFlags, true, &isolator, &files);
- PID<Slave> slave = process::spawn(&s);
+ Try<PID<Master> > master = StartMaster();
+ ASSERT_SOME(master);
- BasicMasterDetector detector(master, slave, true);
+ Try<PID<Slave> > slave = StartSlave();
+ ASSERT_SOME(slave);
MockScheduler sched;
- MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master);
+ MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master.get());
EXPECT_CALL(sched, registered(&driver, _, _));
@@ -1435,14 +1333,15 @@ TEST_F(FaultToleranceTest, ConsolidateTasksOnSlaveReregistration)
// We now launch a task and drop the corresponding RunTaskMessage on
// the slave, to ensure that only the master knows about this task.
- Future<RunTaskMessage> runTaskMessage = DROP_PROTOBUF(RunTaskMessage(), _, _);
+ Future<RunTaskMessage> runTaskMessage =
+ DROP_PROTOBUF(RunTaskMessage(), _, _);
driver.launchTasks(offers.get()[0].id(), tasks);
AWAIT_READY(runTaskMessage);
Future<SlaveReregisteredMessage> slaveReregisteredMessage =
- FUTURE_PROTOBUF(SlaveReregisteredMessage(), _, _);
+ FUTURE_PROTOBUF(SlaveReregisteredMessage(), _, _);
Future<TaskStatus> status;
EXPECT_CALL(sched, statusUpdate(&driver, _))
@@ -1452,9 +1351,9 @@ TEST_F(FaultToleranceTest, ConsolidateTasksOnSlaveReregistration)
// expiration) at the slave to force re-registration.
NewMasterDetectedMessage message;
- message.set_pid(master);
+ message.set_pid(master.get());
- process::post(slave, message);
+ process::post(slave.get(), message);
AWAIT_READY(slaveReregisteredMessage);
@@ -1466,9 +1365,5 @@ TEST_F(FaultToleranceTest, ConsolidateTasksOnSlaveReregistration)
driver.stop();
driver.join();
- process::terminate(slave);
- process::wait(slave);
-
- process::terminate(master);
- process::wait(master);
+ Shutdown();
}
http://git-wip-us.apache.org/repos/asf/incubator-mesos/blob/6b1b8208/src/tests/files_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/files_tests.cpp b/src/tests/files_tests.cpp
index 5679ecd..a696aa2 100644
--- a/src/tests/files_tests.cpp
+++ b/src/tests/files_tests.cpp
@@ -21,10 +21,12 @@
#include <gmock/gmock.h>
#include <process/future.hpp>
+#include <process/gtest.hpp>
#include <process/http.hpp>
#include <process/pid.hpp>
#include <process/process.hpp>
+#include <stout/gtest.hpp>
#include <stout/json.hpp>
#include <stout/os.hpp>
#include <stout/stringify.hpp>
http://git-wip-us.apache.org/repos/asf/incubator-mesos/blob/6b1b8208/src/tests/flags_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/flags_tests.cpp b/src/tests/flags_tests.cpp
index e07dbcc..d52d4db 100644
--- a/src/tests/flags_tests.cpp
+++ b/src/tests/flags_tests.cpp
@@ -23,6 +23,7 @@
#include <string>
#include <stout/duration.hpp>
+#include <stout/gtest.hpp>
#include <stout/none.hpp>
#include <stout/option.hpp>
@@ -31,8 +32,6 @@
#include "flags/flags.hpp"
-#include "tests/utils.hpp"
-
using namespace flags;
class TestFlags : public virtual FlagsBase
http://git-wip-us.apache.org/repos/asf/incubator-mesos/blob/6b1b8208/src/tests/gc_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/gc_tests.cpp b/src/tests/gc_tests.cpp
index 949678c..43c3e5d 100644
--- a/src/tests/gc_tests.cpp
+++ b/src/tests/gc_tests.cpp
@@ -55,6 +55,8 @@
#include "slave/paths.hpp"
#include "slave/slave.hpp"
+#include "tests/isolator.hpp"
+#include "tests/mesos.hpp"
#include "tests/utils.hpp"
using namespace mesos;
@@ -248,20 +250,24 @@ TEST_F(GarbageCollectorTest, Prune)
}
-class GarbageCollectorIntegrationTest : public MesosClusterTest {};
+class GarbageCollectorIntegrationTest : public MesosTest {};
TEST_F(GarbageCollectorIntegrationTest, Restart)
{
- Try<PID<Master> > master = cluster.masters.start();
+ Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
Future<SlaveRegisteredMessage> slaveRegisteredMessage =
FUTURE_PROTOBUF(SlaveRegisteredMessage(), _, _);
- MockExecutor exec;
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
- Try<PID<Slave> > slave = cluster.slaves.start(DEFAULT_EXECUTOR_ID, &exec);
+ // Need to create our own flags because we want to reuse them when
+ // we (re)start the slave below.
+ slave::Flags flags = CreateSlaveFlags();
+
+ Try<PID<Slave> > slave = StartSlave(&exec, flags);
ASSERT_SOME(slave);
AWAIT_READY(slaveRegisteredMessage);
@@ -272,7 +278,7 @@ TEST_F(GarbageCollectorIntegrationTest, Restart)
EXPECT_CALL(sched, registered(_, _, _))
.Times(1);
- Resources resources = Resources::parse(cluster.slaves.flags.resources.get());
+ Resources resources = Resources::parse(flags.resources.get());
double cpus = resources.get("cpus", Value::Scalar()).value();
double mem = resources.get("mem", Value::Scalar()).value();
@@ -300,7 +306,7 @@ TEST_F(GarbageCollectorIntegrationTest, Restart)
// until the task is launched. We get the slave ID from the
// SlaveRegisteredMessage.
const std::string& slaveDir = slave::paths::getSlavePath(
- cluster.slaves.flags.work_dir,
+ flags.work_dir,
slaveRegisteredMessage.get().slave_id());
ASSERT_TRUE(os::exists(slaveDir));
@@ -318,7 +324,7 @@ TEST_F(GarbageCollectorIntegrationTest, Restart)
EXPECT_CALL(sched, slaveLost(_, _))
.WillOnce(FutureSatisfy(&slaveLost));
- cluster.slaves.stop(slave.get());
+ Stop(slave.get());
AWAIT_READY(shutdown); // Ensures MockExecutor can be deallocated.
@@ -327,14 +333,14 @@ TEST_F(GarbageCollectorIntegrationTest, Restart)
Future<Nothing> schedule =
FUTURE_DISPATCH(_, &GarbageCollectorProcess::schedule);
- slave = cluster.slaves.start();
+ slave = StartSlave(flags);
ASSERT_SOME(slave);
AWAIT_READY(schedule);
Clock::settle(); // Wait for GarbageCollectorProcess::schedule to complete.
- Clock::advance(cluster.slaves.flags.gc_delay);
+ Clock::advance(flags.gc_delay);
Clock::settle();
@@ -346,21 +352,23 @@ TEST_F(GarbageCollectorIntegrationTest, Restart)
driver.stop();
driver.join();
- cluster.shutdown();
+ Shutdown();
}
TEST_F(GarbageCollectorIntegrationTest, ExitedFramework)
{
- Try<PID<Master> > master = cluster.masters.start();
+ Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
Future<SlaveRegisteredMessage> slaveRegisteredMessage =
FUTURE_PROTOBUF(SlaveRegisteredMessage(), _, _);
- MockExecutor exec;
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
+
+ slave::Flags flags = CreateSlaveFlags();
- Try<PID<Slave> > slave = cluster.slaves.start(DEFAULT_EXECUTOR_ID, &exec);
+ Try<PID<Slave> > slave = StartSlave(&exec, flags);
ASSERT_SOME(slave);
AWAIT_READY(slaveRegisteredMessage);
@@ -374,7 +382,7 @@ TEST_F(GarbageCollectorIntegrationTest, ExitedFramework)
EXPECT_CALL(sched, registered(_, _, _))
.WillOnce(SaveArg<1>(&frameworkId));
- Resources resources = Resources::parse(cluster.slaves.flags.resources.get());
+ Resources resources = Resources::parse(flags.resources.get());
double cpus = resources.get("cpus", Value::Scalar()).value();
double mem = resources.get("mem", Value::Scalar()).value();
@@ -418,7 +426,7 @@ TEST_F(GarbageCollectorIntegrationTest, ExitedFramework)
FUTURE_DISPATCH(_, &GarbageCollectorProcess::schedule);
// Advance clock to kill executor via isolator.
- Clock::advance(cluster.slaves.flags.executor_shutdown_grace_period);
+ Clock::advance(flags.executor_shutdown_grace_period);
Clock::settle();
@@ -426,13 +434,13 @@ TEST_F(GarbageCollectorIntegrationTest, ExitedFramework)
Clock::settle(); // Wait for GarbageCollectorProcess::schedule to complete.
- Clock::advance(cluster.slaves.flags.gc_delay);
+ Clock::advance(flags.gc_delay);
Clock::settle();
// Framework's directory should be gc'ed by now.
const string& frameworkDir = slave::paths::getFrameworkPath(
- cluster.slaves.flags.work_dir, slaveId, frameworkId);
+ flags.work_dir, slaveId, frameworkId);
ASSERT_FALSE(os::exists(frameworkDir));
@@ -443,20 +451,22 @@ TEST_F(GarbageCollectorIntegrationTest, ExitedFramework)
Clock::resume();
- cluster.shutdown(); // Must shutdown before 'isolator' gets deallocated.
+ Shutdown(); // Must shutdown before 'isolator' gets deallocated.
}
TEST_F(GarbageCollectorIntegrationTest, ExitedExecutor)
{
- Try<PID<Master> > master = cluster.masters.start();
+ Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
- MockExecutor exec;
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
+
+ TestingIsolator isolator(&exec);
- TestingIsolator isolator(DEFAULT_EXECUTOR_ID, &exec);
+ slave::Flags flags = CreateSlaveFlags();
- Try<PID<Slave> > slave = cluster.slaves.start(&isolator);
+ Try<PID<Slave> > slave = StartSlave(&isolator);
ASSERT_SOME(slave);
MockScheduler sched;
@@ -466,7 +476,7 @@ TEST_F(GarbageCollectorIntegrationTest, ExitedExecutor)
EXPECT_CALL(sched, registered(_, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
- Resources resources = Resources::parse(cluster.slaves.flags.resources.get());
+ Resources resources = Resources::parse(flags.resources.get());
double cpus = resources.get("cpus", Value::Scalar()).value();
double mem = resources.get("mem", Value::Scalar()).value();
@@ -518,7 +528,7 @@ TEST_F(GarbageCollectorIntegrationTest, ExitedExecutor)
Clock::settle(); // Wait for GarbageCollectorProcess::schedule to complete.
- Clock::advance(cluster.slaves.flags.gc_delay);
+ Clock::advance(flags.gc_delay);
Clock::settle();
@@ -533,20 +543,22 @@ TEST_F(GarbageCollectorIntegrationTest, ExitedExecutor)
driver.stop();
driver.join();
- cluster.shutdown(); // Must shutdown before 'isolator' gets deallocated.
+ Shutdown(); // Must shutdown before 'isolator' gets deallocated.
}
TEST_F(GarbageCollectorIntegrationTest, DiskUsage)
{
- Try<PID<Master> > master = cluster.masters.start();
+ Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
- MockExecutor exec;
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
- TestingIsolator isolator(DEFAULT_EXECUTOR_ID, &exec);
+ TestingIsolator isolator(&exec);
- Try<PID<Slave> > slave = cluster.slaves.start(&isolator);
+ slave::Flags flags = CreateSlaveFlags();
+
+ Try<PID<Slave> > slave = StartSlave(&isolator, flags);
ASSERT_SOME(slave);
MockScheduler sched;
@@ -556,7 +568,7 @@ TEST_F(GarbageCollectorIntegrationTest, DiskUsage)
EXPECT_CALL(sched, registered(_, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
- Resources resources = Resources::parse(cluster.slaves.flags.resources.get());
+ Resources resources = Resources::parse(flags.resources.get());
double cpus = resources.get("cpus", Value::Scalar()).value();
double mem = resources.get("mem", Value::Scalar()).value();
@@ -632,7 +644,7 @@ TEST_F(GarbageCollectorIntegrationTest, DiskUsage)
driver.stop();
driver.join();
- cluster.shutdown(); // Must shutdown before 'isolator' gets deallocated.
+ Shutdown(); // Must shutdown before 'isolator' gets deallocated.
}
@@ -641,17 +653,19 @@ TEST_F(GarbageCollectorIntegrationTest, DiskUsage)
// created by an old executor (with the same id).
TEST_F(GarbageCollectorIntegrationTest, Unschedule)
{
- Try<PID<Master> > master = cluster.masters.start();
+ Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
Future<SlaveRegisteredMessage> slaveRegistered =
FUTURE_PROTOBUF(SlaveRegisteredMessage(), _, _);
- MockExecutor exec;
+ MockExecutor exec(DEFAULT_EXECUTOR_ID);
+
+ TestingIsolator isolator(&exec);
- TestingIsolator isolator(DEFAULT_EXECUTOR_ID, &exec);
+ slave::Flags flags = CreateSlaveFlags();
- Try<PID<Slave> > slave = cluster.slaves.start(&isolator);
+ Try<PID<Slave> > slave = StartSlave(&isolator, flags);
ASSERT_SOME(slave);
AWAIT_READY(slaveRegistered);
@@ -663,7 +677,7 @@ TEST_F(GarbageCollectorIntegrationTest, Unschedule)
EXPECT_CALL(sched, registered(_, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
- Resources resources = Resources::parse(cluster.slaves.flags.resources.get());
+ Resources resources = Resources::parse(flags.resources.get());
double cpus = resources.get("cpus", Value::Scalar()).value();
double mem = resources.get("mem", Value::Scalar()).value();
@@ -755,5 +769,5 @@ TEST_F(GarbageCollectorIntegrationTest, Unschedule)
driver.stop();
driver.join();
- cluster.shutdown(); // Must shutdown before 'isolator' gets deallocated.
+ Shutdown(); // Must shutdown before 'isolator' gets deallocated.
}
http://git-wip-us.apache.org/repos/asf/incubator-mesos/blob/6b1b8208/src/tests/group_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/group_tests.cpp b/src/tests/group_tests.cpp
index d32b74f..c7f789c 100644
--- a/src/tests/group_tests.cpp
+++ b/src/tests/group_tests.cpp
@@ -23,10 +23,11 @@
#include <string>
#include <process/future.hpp>
+#include <process/gtest.hpp>
+#include <stout/gtest.hpp>
#include <stout/option.hpp>
-#include "tests/utils.hpp"
#include "tests/zookeeper.hpp"
#include "zookeeper/authentication.hpp"
http://git-wip-us.apache.org/repos/asf/incubator-mesos/blob/6b1b8208/src/tests/isolator.hpp
----------------------------------------------------------------------
diff --git a/src/tests/isolator.hpp b/src/tests/isolator.hpp
index 17dc7b3..ebfc485 100644
--- a/src/tests/isolator.hpp
+++ b/src/tests/isolator.hpp
@@ -26,6 +26,7 @@
#include <process/future.hpp>
#include <process/pid.hpp>
+#include <stout/os.hpp>
#include <stout/try.hpp>
#include <stout/uuid.hpp>
@@ -34,6 +35,8 @@
#include "slave/isolator.hpp"
+#include "tests/mesos.hpp" // For MockExecutor.
+
namespace mesos {
namespace internal {
namespace tests {
@@ -58,6 +61,12 @@ public:
setup();
}
+ TestingIsolator(MockExecutor* executor)
+ {
+ executors[executor->id] = executor;
+ setup();
+ }
+
virtual ~TestingIsolator() {}
virtual void initialize(
http://git-wip-us.apache.org/repos/asf/incubator-mesos/blob/6b1b8208/src/tests/isolator_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/isolator_tests.cpp b/src/tests/isolator_tests.cpp
index 4715fb7..aae8b2f 100644
--- a/src/tests/isolator_tests.cpp
+++ b/src/tests/isolator_tests.cpp
@@ -32,9 +32,6 @@
#include "detector/detector.hpp"
-#include "master/allocator.hpp"
-#include "master/flags.hpp"
-#include "master/hierarchical_allocator_process.hpp"
#include "master/master.hpp"
#include "slave/flags.hpp"
@@ -44,7 +41,7 @@
#include "slave/process_isolator.hpp"
#include "slave/slave.hpp"
-#include "tests/utils.hpp"
+#include "tests/mesos.hpp"
using namespace mesos;
using namespace mesos::internal;
@@ -52,8 +49,6 @@ using namespace mesos::internal::tests;
using namespace process;
-using mesos::internal::master::Allocator;
-using mesos::internal::master::HierarchicalDRFAllocatorProcess;
using mesos::internal::master::Master;
#ifdef __linux__
@@ -80,6 +75,7 @@ typedef ::testing::Types<ProcessIsolator> IsolatorTypes;
TYPED_TEST_CASE(IsolatorTest, IsolatorTypes);
+
// TODO(bmahler): This test is disabled on OSX, until proc::children
// is implemented for OSX.
#ifdef __APPLE__
@@ -88,20 +84,18 @@ TYPED_TEST(IsolatorTest, DISABLED_Usage)
TYPED_TEST(IsolatorTest, Usage)
#endif
{
- HierarchicalDRFAllocatorProcess allocator;
- Allocator a(&allocator);
- Files files;
- Master m(&a, &files);
- PID<Master> master = process::spawn(&m);
+ Try<PID<Master> > master = this->StartMaster();
+ ASSERT_SOME(master);
TypeParam isolator;
- Slave s(this->slaveFlags, true, &isolator, &files);
- PID<Slave> slave = process::spawn(&s);
- BasicMasterDetector detector(master, slave, true);
+ slave::Flags flags = this->CreateSlaveFlags();
+
+ Try<PID<Slave> > slave = this->StartSlave(&isolator, flags);
+ ASSERT_SOME(slave);
MockScheduler sched;
- MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master);
+ MesosSchedulerDriver driver(&sched, DEFAULT_FRAMEWORK_INFO, master.get());
Future<FrameworkID> frameworkId;
EXPECT_CALL(sched, registered(&driver, _, _))
@@ -112,11 +106,6 @@ TYPED_TEST(IsolatorTest, Usage)
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
- Future<TaskStatus> status1, status2;
- EXPECT_CALL(sched, statusUpdate(&driver, _))
- .WillOnce(FutureArg<1>(&status1))
- .WillOnce(FutureArg<1>(&status2));
-
driver.start();
AWAIT_READY(frameworkId);
@@ -130,7 +119,7 @@ TYPED_TEST(IsolatorTest, Usage)
task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id());
task.mutable_resources()->MergeFrom(offers.get()[0].resources());
- const std::string& file = path::join(this->slaveFlags.work_dir, "ready");
+ const std::string& file = path::join(flags.work_dir, "ready");
// This task induces user/system load in a child process by
// running top in a child process for ten seconds.
@@ -148,11 +137,15 @@ TYPED_TEST(IsolatorTest, Usage)
vector<TaskInfo> tasks;
tasks.push_back(task);
+ Future<TaskStatus> status;
+ EXPECT_CALL(sched, statusUpdate(&driver, _))
+ .WillOnce(FutureArg<1>(&status));
+
driver.launchTasks(offers.get()[0].id(), tasks);
- AWAIT_READY(status1);
+ AWAIT_READY(status);
- EXPECT_EQ(TASK_RUNNING, status1.get().state());
+ EXPECT_EQ(TASK_RUNNING, status.get().state());
// Wait for the task to begin inducing cpu time.
while (!os::exists(file));
@@ -167,8 +160,12 @@ TYPED_TEST(IsolatorTest, Usage)
ResourceStatistics statistics;
Duration waited = Duration::zero();
do {
- const Future<ResourceStatistics>& usage =
- isolator.usage(frameworkId.get(), executorId);
+ Future<ResourceStatistics> usage =
+ process::dispatch(
+ (Isolator*) &isolator, // TODO(benh): Fix after reaper changes.
+ &Isolator::usage,
+ frameworkId.get(),
+ executorId);
AWAIT_READY(usage);
@@ -182,27 +179,26 @@ TYPED_TEST(IsolatorTest, Usage)
}
os::sleep(Milliseconds(100));
- waited = waited + Milliseconds(100);
+ waited += Milliseconds(100);
} while (waited < Seconds(10));
EXPECT_GE(statistics.memory_rss(), 1024u);
EXPECT_GE(statistics.cpu_user_time(), 0.125);
EXPECT_GE(statistics.cpu_system_time(), 0.125);
+ EXPECT_CALL(sched, statusUpdate(&driver, _))
+ .WillOnce(FutureArg<1>(&status));
+
driver.killTask(task.task_id());
- AWAIT_READY(status2);
+ AWAIT_READY(status);
// TODO(bmahler): The command executor is buggy in that it does not
// send TASK_KILLED for a non-zero exit code due to a kill.
- EXPECT_EQ(TASK_FAILED, status2.get().state());
+ EXPECT_EQ(TASK_FAILED, status.get().state());
driver.stop();
driver.join();
- process::terminate(slave);
- process::wait(slave);
-
- process::terminate(master);
- process::wait(master);
+ this->Shutdown(); // Must shutdown before 'isolator' gets deallocated.
}
http://git-wip-us.apache.org/repos/asf/incubator-mesos/blob/6b1b8208/src/tests/log_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/log_tests.cpp b/src/tests/log_tests.cpp
index 208e83e..dbd12b8 100644
--- a/src/tests/log_tests.cpp
+++ b/src/tests/log_tests.cpp
@@ -24,10 +24,12 @@
#include <process/clock.hpp>
#include <process/future.hpp>
#include <process/gmock.hpp>
+#include <process/gtest.hpp>
#include <process/pid.hpp>
#include <process/protobuf.hpp>
#include <process/timeout.hpp>
+#include <stout/gtest.hpp>
#include <stout/option.hpp>
#include <stout/os.hpp>
@@ -39,12 +41,9 @@
#include "messages/messages.hpp"
-#include "tests/utils.hpp"
-
using namespace mesos;
using namespace mesos::internal;
using namespace mesos::internal::log;
-using namespace mesos::internal::tests;
using process::Clock;
using process::Future;
http://git-wip-us.apache.org/repos/asf/incubator-mesos/blob/6b1b8208/src/tests/logging_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/logging_tests.cpp b/src/tests/logging_tests.cpp
index 57eae79..58e9b33 100644
--- a/src/tests/logging_tests.cpp
+++ b/src/tests/logging_tests.cpp
@@ -19,15 +19,14 @@
#include <gmock/gmock.h>
#include <process/future.hpp>
+#include <process/gtest.hpp>
#include <process/http.hpp>
#include <process/pid.hpp>
+#include <process/process.hpp>
#include "logging/logging.hpp"
-#include "tests/utils.hpp"
-
using namespace mesos::internal;
-using namespace mesos::internal::tests;
using process::http::BadRequest;
using process::http::OK;
http://git-wip-us.apache.org/repos/asf/incubator-mesos/blob/6b1b8208/src/tests/main.cpp
----------------------------------------------------------------------
diff --git a/src/tests/main.cpp b/src/tests/main.cpp
index b06c0d1..868bdd5 100644
--- a/src/tests/main.cpp
+++ b/src/tests/main.cpp
@@ -29,9 +29,10 @@
#include "logging/logging.hpp"
+#include "messages/messages.hpp" // For GOOGLE_PROTOBUF_VERIFY_VERSION.
+
#include "tests/environment.hpp"
#include "tests/flags.hpp"
-#include "tests/utils.hpp"
using namespace mesos::internal;
using namespace mesos::internal::tests;
http://git-wip-us.apache.org/repos/asf/incubator-mesos/blob/6b1b8208/src/tests/master_detector_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/master_detector_tests.cpp b/src/tests/master_detector_tests.cpp
index c9d6d20..57f4e3e 100644
--- a/src/tests/master_detector_tests.cpp
+++ b/src/tests/master_detector_tests.cpp
@@ -48,7 +48,8 @@
#include "slave/slave.hpp"
-#include "tests/utils.hpp"
+#include "tests/isolator.hpp"
+#include "tests/mesos.hpp"
#ifdef MESOS_HAS_JAVA
#include "tests/zookeeper.hpp"
#endif
@@ -75,16 +76,20 @@ using testing::AtMost;
using testing::Return;
-class MasterDetectorTest : public MesosClusterTest {};
+class MasterDetectorTest : public MesosTest {};
TEST_F(MasterDetectorTest, File)
{
- Try<PID<Master> > master = cluster.masters.start();
+ Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
+ Files files;
TestingIsolator isolator;
- Slave s(cluster.slaves.flags, true, &isolator, &cluster.files);
+
+ slave::Flags flags = CreateSlaveFlags();
+
+ Slave s(flags, true, &isolator, &files);
PID<Slave> slave = process::spawn(&s);
// Write "master" to a file and use the "file://" mechanism to
@@ -92,7 +97,7 @@ TEST_F(MasterDetectorTest, File)
// detector for the master first.
BasicMasterDetector detector1(master.get(), vector<UPID>(), true);
- const string& path = path::join(cluster.slaves.flags.work_dir, "master");
+ const string& path = path::join(flags.work_dir, "master");
ASSERT_SOME(os::write(path, stringify(master.get())));
Try<MasterDetector*> detector =
@@ -119,7 +124,7 @@ TEST_F(MasterDetectorTest, File)
driver.stop();
driver.join();
- cluster.shutdown();
+ Shutdown();
process::terminate(slave);
process::wait(slave);