You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mesos.apache.org by gr...@apache.org on 2017/07/19 00:19:53 UTC
[3/3] mesos git commit: Added test cases for /slaves, /containers,
/frameworks endpoints.
Added test cases for /slaves, /containers, /frameworks endpoints.
Added query parameter test cases for '/slaves' and '/frameworks' on
the master, and '/containers' on the agent.
Review: https://reviews.apache.org/r/60847/
Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/916a5c9f
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/916a5c9f
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/916a5c9f
Branch: refs/heads/master
Commit: 916a5c9fdbc7619b7c9356c21afb83e043feef88
Parents: 8363449
Author: Quinn Leng <qu...@gmail.com>
Authored: Tue Jul 18 17:07:02 2017 -0700
Committer: Greg Mann <gr...@gmail.com>
Committed: Tue Jul 18 17:11:34 2017 -0700
----------------------------------------------------------------------
src/tests/master_tests.cpp | 317 +++++++++++++++++++++++++++++++++++++---
src/tests/slave_tests.cpp | 306 ++++++++++++++++++++++++++++----------
2 files changed, 526 insertions(+), 97 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/mesos/blob/916a5c9f/src/tests/master_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/master_tests.cpp b/src/tests/master_tests.cpp
index 6e6461c..5742860 100644
--- a/src/tests/master_tests.cpp
+++ b/src/tests/master_tests.cpp
@@ -2505,6 +2505,125 @@ TEST_F(MasterTest, SlavesEndpointTwoSlaves)
}
+// Ensures that the '/slaves' endpoint returns the correct slave and it's in
+// the correct field of the response when provided with a slave ID query
+// parameter.
+TEST_F(MasterTest, SlavesEndpointQuerySlave)
+{
+ master::Flags masterFlags = CreateMasterFlags();
+
+ // Ensure that master can recover from the same work_dir.
+ masterFlags.registry = "replicated_log";
+ Try<Owned<cluster::Master>> master = StartMaster(masterFlags);
+ ASSERT_SOME(master);
+
+ Owned<MasterDetector> detector = master.get()->createDetector();
+
+ // Start two agents.
+
+ Future<SlaveRegisteredMessage> slave1RegisteredMessage =
+ FUTURE_PROTOBUF(SlaveRegisteredMessage(), master.get()->pid, _);
+
+ Try<Owned<cluster::Slave>> slave1 = StartSlave(detector.get());
+ ASSERT_SOME(slave1);
+
+ AWAIT_READY(slave1RegisteredMessage);
+
+ Future<SlaveRegisteredMessage> slave2RegisteredMessage =
+ FUTURE_PROTOBUF(
+ SlaveRegisteredMessage(),
+ master.get()->pid,
+ Not(slave1.get()->pid));
+
+ Try<Owned<cluster::Slave>> slave2 = StartSlave(detector.get());
+ ASSERT_SOME(slave2);
+
+ AWAIT_READY(slave2RegisteredMessage);
+
+ // Query the information about the first agent.
+ {
+ string slaveId = slave1RegisteredMessage->slave_id().value();
+
+ Future<Response> response = process::http::get(
+ master.get()->pid,
+ "slaves?slave_id=" + slaveId,
+ None(),
+ createBasicAuthHeaders(DEFAULT_CREDENTIAL));
+
+ AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response);
+ AWAIT_EXPECT_RESPONSE_HEADER_EQ(APPLICATION_JSON, "Content-Type", response);
+
+ const Try<JSON::Value> value = JSON::parse<JSON::Value>(response->body);
+
+ ASSERT_SOME(value);
+
+ Try<JSON::Object> object = value->as<JSON::Object>();
+
+ Result<JSON::Array> array = object->find<JSON::Array>("slaves");
+ ASSERT_SOME(array);
+ EXPECT_EQ(1u, array->values.size());
+
+ Try<JSON::Value> expected = JSON::parse(
+ "{"
+ "\"slaves\":"
+ "[{"
+ "\"id\":\"" + slaveId + "\""
+ "}]"
+ "}");
+
+ ASSERT_SOME(expected);
+
+ EXPECT_TRUE(value->contains(expected.get()));
+ }
+
+ // Stop agents while the master is down.
+ master->reset();
+ slave1.get()->terminate();
+ slave1->reset();
+ slave2.get()->terminate();
+ slave2->reset();
+
+ // Restart the master, now two agents should be in the 'recovered' state.
+ master = StartMaster(masterFlags);
+ ASSERT_SOME(master);
+
+ // Check if the second agent is in the 'recovered_slaves' field.
+ {
+ string slaveId = slave2RegisteredMessage->slave_id().value();
+
+ Future<Response> response = process::http::get(
+ master.get()->pid,
+ "slaves?slave_id=" + slaveId,
+ None(),
+ createBasicAuthHeaders(DEFAULT_CREDENTIAL));
+
+ AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response);
+ AWAIT_EXPECT_RESPONSE_HEADER_EQ(APPLICATION_JSON, "Content-Type", response);
+
+ const Try<JSON::Value> value = JSON::parse<JSON::Value>(response->body);
+
+ ASSERT_SOME(value);
+ Try<JSON::Object> object = value->as<JSON::Object>();
+
+ Result<JSON::Array> array = object->find<JSON::Array>("recovered_slaves");
+ ASSERT_SOME(array);
+ EXPECT_EQ(1u, array->values.size());
+
+ Try<JSON::Value> expected = JSON::parse(
+ "{"
+ "\"recovered_slaves\":"
+ "[{"
+ "\"id\":\"" + slaveId + "\""
+ "}]"
+ "}");
+
+ ASSERT_SOME(expected);
+
+ EXPECT_TRUE(value->contains(expected.get()));
+ }
+}
+
+
// This test ensures that when a slave is recovered from the registry
// but does not re-register with the master, it is marked unreachable
// in the registry, the framework is informed that the slave is lost,
@@ -5848,43 +5967,195 @@ TEST_F(MasterTest, FrameworksEndpointWithoutFrameworks)
}
-TEST_F(MasterTest, FrameworksEndpointOneFramework)
+// Ensures that the '/master/frameworks' endpoint returns the correct framework
+// when provided with a framework ID query parameter.
+TEST_F(MasterTest, FrameworksEndpointMultipleFrameworks)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
- FrameworkInfo framework = DEFAULT_FRAMEWORK_INFO;
+ // Start a slave to receive shutdown message when framework is terminated.
+ Owned<MasterDetector> detector = master.get()->createDetector();
+ Try<Owned<cluster::Slave>> slave = StartSlave(detector.get());
+ ASSERT_SOME(slave);
- MockScheduler sched;
- MesosSchedulerDriver driver(
- &sched, framework, master.get()->pid, DEFAULT_CREDENTIAL);
+ Future<RegisterSlaveMessage> registerSlaveMessage =
+ FUTURE_PROTOBUF(RegisterSlaveMessage(), _, _);
- Future<Nothing> registered;
- EXPECT_CALL(sched, registered(&driver, _, _))
- .WillOnce(FutureSatisfy(®istered));
+ AWAIT_READY(registerSlaveMessage);
- driver.start();
+ // Start two frameworks.
- AWAIT_READY(registered);
+ Future<FrameworkID> frameworkId1;
+ Future<FrameworkID> frameworkId2;
- Future<Response> response = process::http::get(
+ MockScheduler sched1;
+ MesosSchedulerDriver driver1(
+ &sched1,
+ DEFAULT_FRAMEWORK_INFO,
master.get()->pid,
- "frameworks",
- None(),
- createBasicAuthHeaders(DEFAULT_CREDENTIAL));
+ DEFAULT_CREDENTIAL);
- AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response);
- AWAIT_EXPECT_RESPONSE_HEADER_EQ(APPLICATION_JSON, "Content-Type", response);
+ EXPECT_CALL(sched1, registered(_, _, _))
+ .WillOnce(FutureArg<1>(&frameworkId1));
- Try<JSON::Object> parse = JSON::parse<JSON::Object>(response->body);
- ASSERT_SOME(parse);
+ // Ignore any incoming resource offers to the scheduler.
+ EXPECT_CALL(sched1, resourceOffers(_, _))
+ .WillRepeatedly(Return());
- Result<JSON::Array> array = parse->find<JSON::Array>("frameworks");
- ASSERT_SOME(array);
- EXPECT_EQ(1u, array->values.size());
+ driver1.start();
- driver.stop();
- driver.join();
+ MockScheduler sched2;
+ MesosSchedulerDriver driver2(
+ &sched2,
+ DEFAULT_FRAMEWORK_INFO,
+ master.get()->pid,
+ DEFAULT_CREDENTIAL);
+
+ EXPECT_CALL(sched2, registered(_, _, _))
+ .WillOnce(FutureArg<1>(&frameworkId2));
+
+ // Ignore any incoming resource offers to the scheduler.
+ EXPECT_CALL(sched2, resourceOffers(_, _))
+ .WillRepeatedly(Return());
+
+ driver2.start();
+
+ AWAIT_READY(frameworkId1);
+ AWAIT_READY(frameworkId2);
+
+ // Request with no query parameter.
+ {
+ Future<Response> response = process::http::get(
+ master.get()->pid,
+ "frameworks",
+ None(),
+ createBasicAuthHeaders(DEFAULT_CREDENTIAL));
+
+ AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response);
+ AWAIT_EXPECT_RESPONSE_HEADER_EQ(APPLICATION_JSON, "Content-Type", response);
+
+ Try<JSON::Value> value = JSON::parse<JSON::Value>(response->body);
+ ASSERT_SOME(value);
+
+ JSON::Object object = value->as<JSON::Object>();
+
+ Result<JSON::Array> array = object.find<JSON::Array>("frameworks");
+ ASSERT_SOME(array);
+ EXPECT_EQ(2u, array->values.size());
+
+ Try<JSON::Value> frameworkJson1 = JSON::parse(
+ "{"
+ "\"id\":\"" + frameworkId1->value() + "\","
+ "\"name\":\"default\""
+ "}");
+
+ Try<JSON::Value> frameworkJson2 = JSON::parse(
+ "{"
+ "\"id\":\"" + frameworkId2->value() + "\","
+ "\"name\":\"default\""
+ "}");
+
+ ASSERT_SOME(frameworkJson1);
+ ASSERT_SOME(frameworkJson2);
+
+ // Since frameworks are stored in a hashmap, there is no strict guarantee of
+ // their ordering when listed. For this reason, we test both possibilities.
+ if (array->values[0].contains(frameworkJson1.get())) {
+ ASSERT_TRUE(array->values[1].contains(frameworkJson2.get()));
+ } else {
+ ASSERT_TRUE(array->values[0].contains(frameworkJson2.get()));
+ ASSERT_TRUE(array->values[1].contains(frameworkJson1.get()));
+ }
+ }
+
+ // Query the first framework.
+ {
+ Future<Response> response = process::http::get(
+ master.get()->pid,
+ "frameworks?framework_id=" + frameworkId1->value(),
+ None(),
+ createBasicAuthHeaders(DEFAULT_CREDENTIAL));
+
+ AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response);
+ AWAIT_EXPECT_RESPONSE_HEADER_EQ(APPLICATION_JSON, "Content-Type", response);
+
+ Try<JSON::Value> value = JSON::parse<JSON::Value>(response->body);
+ ASSERT_SOME(value);
+
+ JSON::Object object = value->as<JSON::Object>();
+
+ Result<JSON::Array> array = object.find<JSON::Array>("frameworks");
+ ASSERT_SOME(array);
+ EXPECT_EQ(1u, array->values.size());
+
+ Try<JSON::Value> expected = JSON::parse(
+ "{"
+ "\"frameworks\":"
+ "[{"
+ "\"id\":\"" + frameworkId1->value() + "\","
+ "\"name\":\"default\""
+ "}]"
+ "}");
+
+ ASSERT_SOME(expected);
+
+ EXPECT_TRUE(value->contains(expected.get()));
+ }
+
+ // Expect a teardown call and a shutdown message to ensure that the
+ // master has marked the framework as completed.
+ Future<mesos::scheduler::Call> teardownCall = FUTURE_CALL(
+ mesos::scheduler::Call(), mesos::scheduler::Call::TEARDOWN, _, _);
+ Future<ShutdownFrameworkMessage> shutdownFrameworkMessage =
+ FUTURE_PROTOBUF(ShutdownFrameworkMessage(), _, _);
+
+ // Complete the first framework. As a result, it will appear in the response's
+ // 'completed_frameworks' field.
+ driver1.stop();
+ driver1.join();
+
+ AWAIT_READY(teardownCall);
+
+ AWAIT_READY(shutdownFrameworkMessage);
+
+ // Query the first framework.
+ {
+ Future<Response> response = process::http::get(
+ master.get()->pid,
+ "frameworks?framework_id=" + frameworkId1->value(),
+ None(),
+ createBasicAuthHeaders(DEFAULT_CREDENTIAL));
+
+ AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response);
+ AWAIT_EXPECT_RESPONSE_HEADER_EQ(APPLICATION_JSON, "Content-Type", response);
+
+ Try<JSON::Value> value = JSON::parse<JSON::Value>(response->body);
+ ASSERT_SOME(value);
+
+ JSON::Object object = value->as<JSON::Object>();
+
+ Result<JSON::Array> array =
+ object.find<JSON::Array>("completed_frameworks");
+ ASSERT_SOME(array);
+ EXPECT_EQ(1u, array->values.size());
+
+ Try<JSON::Value> expected = JSON::parse(
+ "{"
+ "\"completed_frameworks\":"
+ "[{"
+ "\"id\":\"" + frameworkId1->value() + "\","
+ "\"name\":\"default\""
+ "}]"
+ "}");
+
+ ASSERT_SOME(expected);
+
+ EXPECT_TRUE(value->contains(expected.get()));
+ }
+
+ driver2.stop();
+ driver2.join();
}
http://git-wip-us.apache.org/repos/asf/mesos/blob/916a5c9f/src/tests/slave_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/slave_tests.cpp b/src/tests/slave_tests.cpp
index 053a14d..e1cc96d 100644
--- a/src/tests/slave_tests.cpp
+++ b/src/tests/slave_tests.cpp
@@ -2358,30 +2358,39 @@ TEST_F(SlaveTest, ContainersEndpointNoExecutor)
// This is an end-to-end test that verifies that the slave returns the
-// correct container status and resource statistics based on the
-// currently running executors, and the values returned by the
-// '/containers' endpoint are as expected.
+// correct container status and resource statistics based on the currently
+// running executors, and ensures that '/containers' endpoint returns the
+// correct container when it is provided a container ID query parameter.
TEST_F(SlaveTest, ContainersEndpoint)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
- MockExecutor exec(DEFAULT_EXECUTOR_ID);
- TestContainerizer containerizer(&exec);
- StandaloneMasterDetector detector(master.get()->pid);
+ // Create two executors so that we can launch tasks in two separate
+ // containers.
+ ExecutorInfo executor1 = createExecutorInfo("executor-1", "exit 1");
+ ExecutorInfo executor2 = createExecutorInfo("executor-2", "exit 1");
- MockSlave slave(CreateSlaveFlags(), &detector, &containerizer);
- spawn(slave);
+ MockExecutor exec1(executor1.executor_id());
+ MockExecutor exec2(executor2.executor_id());
+
+ hashmap<ExecutorID, Executor*> execs;
+ execs[executor1.executor_id()] = &exec1;
+ execs[executor2.executor_id()] = &exec2;
+
+ TestContainerizer containerizer(execs);
+
+ Owned<MasterDetector> detector = master.get()->createDetector();
+ Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), &containerizer);
+ ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
- EXPECT_CALL(sched, registered(_, _, _));
- EXPECT_CALL(exec, registered(_, _, _, _));
+ EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
-
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
@@ -2389,98 +2398,247 @@ TEST_F(SlaveTest, ContainersEndpoint)
driver.start();
AWAIT_READY(offers);
- EXPECT_NE(0u, offers->size());
+ ASSERT_NE(0u, offers->size());
- const Offer& offer = offers.get()[0];
+ // Launch two tasks, each under a different executor.
+ vector<TaskInfo> tasks;
- TaskInfo task = createTask(
- offer.slave_id(),
- Resources::parse("cpus:0.1;mem:32").get(),
- SLEEP_COMMAND(1000),
- exec.id);
+ TaskInfo task1;
+ {
+ task1.set_name("");
+ task1.mutable_task_id()->set_value("1");
+ task1.mutable_slave_id()->MergeFrom(offers->front().slave_id());
+ task1.mutable_resources()->MergeFrom(
+ Resources::parse("cpus:1;mem:512").get());
+ task1.mutable_executor()->MergeFrom(executor1);
+ tasks.push_back(task1);
+ }
- EXPECT_CALL(exec, launchTask(_, _))
- .WillOnce(SendStatusUpdateFromTask(TASK_RUNNING));
+ TaskInfo task2;
+ {
+ task2.set_name("");
+ task2.mutable_task_id()->set_value("2");
+ task2.mutable_slave_id()->MergeFrom(offers->front().slave_id());
+ task2.mutable_resources()->MergeFrom(
+ Resources::parse("cpus:1;mem:512").get());
+ task2.mutable_executor()->MergeFrom(executor2);
+ tasks.push_back(task2);
+ }
- Future<TaskStatus> status;
+ EXPECT_CALL(exec1, registered(_, _, _, _));
+
+ Future<TaskInfo> launchedTask1;
+ EXPECT_CALL(exec1, launchTask(_, _))
+ .WillOnce(DoAll(SendStatusUpdateFromTask(TASK_RUNNING),
+ FutureArg<1>(&launchedTask1)));
+
+ EXPECT_CALL(exec2, registered(_, _, _, _));
+
+ Future<TaskInfo> launchedTask2;
+ EXPECT_CALL(exec2, launchTask(_, _))
+ .WillOnce(DoAll(SendStatusUpdateFromTask(TASK_RUNNING),
+ FutureArg<1>(&launchedTask2)));
+
+ Future<TaskStatus> status1, status2;
EXPECT_CALL(sched, statusUpdate(&driver, _))
- .WillOnce(FutureArg<1>(&status));
+ .WillOnce(FutureArg<1>(&status1))
+ .WillOnce(FutureArg<1>(&status2));
- driver.launchTasks(offer.id(), {task});
+ driver.launchTasks(offers->front().id(), tasks);
- AWAIT_READY(status);
- EXPECT_EQ(TASK_RUNNING, status->state());
+ AWAIT_READY(launchedTask1);
+ EXPECT_EQ(task1.task_id(), launchedTask1->task_id());
+
+ AWAIT_READY(launchedTask2);
+ EXPECT_EQ(task2.task_id(), launchedTask2->task_id());
+
+ AWAIT_READY(status1);
+ EXPECT_EQ(TASK_RUNNING, status1->state());
+
+ AWAIT_READY(status2);
+ EXPECT_EQ(TASK_RUNNING, status2->state());
+
+ // Prepare container statistics.
+ ResourceStatistics statistics1;
+ statistics1.set_mem_limit_bytes(2048);
- ResourceStatistics statistics;
- statistics.set_mem_limit_bytes(2048);
+ ResourceStatistics statistics2;
+ statistics2.set_mem_limit_bytes(2048);
+ // Get the container ID and return simulated statistics.
+ Future<ContainerID> containerId1;
+ Future<ContainerID> containerId2;
+
+ // Will be called twice during the first request. We extract the assigned
+ // container IDs for use when requesting information on a single container.
EXPECT_CALL(containerizer, usage(_))
- .WillOnce(Return(statistics));
+ .WillOnce(DoAll(FutureArg<0>(&containerId1), Return(statistics1)))
+ .WillOnce(DoAll(FutureArg<0>(&containerId2), Return(statistics2)));
- ContainerStatus containerStatus;
+ // Construct the container statuses to be returned. Note that
+ // these container IDs will be different than the actual container
+ // IDs assigned by the agent, but creating them here allows us to
+ // easily confirm the output of '/containers'.
+ ContainerStatus containerStatus1;
+ ContainerStatus containerStatus2;
ContainerID parent;
- ContainerID child;
parent.set_value("parent");
- child.set_value("child");
- child.mutable_parent()->CopyFrom(parent);
- containerStatus.mutable_container_id()->CopyFrom(child);
- CgroupInfo* cgroupInfo = containerStatus.mutable_cgroup_info();
- CgroupInfo::NetCls* netCls = cgroupInfo->mutable_net_cls();
- netCls->set_classid(42);
+ {
+ ContainerID child;
+ child.set_value("child1");
+ child.mutable_parent()->CopyFrom(parent);
+ containerStatus1.mutable_container_id()->CopyFrom(child);
+
+ CgroupInfo* cgroupInfo = containerStatus1.mutable_cgroup_info();
+ CgroupInfo::NetCls* netCls = cgroupInfo->mutable_net_cls();
+ netCls->set_classid(42);
+
+ NetworkInfo* networkInfo = containerStatus1.add_network_infos();
+ NetworkInfo::IPAddress* ipAddr = networkInfo->add_ip_addresses();
+ ipAddr->set_ip_address("192.168.1.20");
+ }
- NetworkInfo* networkInfo = containerStatus.add_network_infos();
- NetworkInfo::IPAddress* ipAddr = networkInfo->add_ip_addresses();
- ipAddr->set_ip_address("192.168.1.20");
+ {
+ ContainerID child;
+ child.set_value("child2");
+ child.mutable_parent()->CopyFrom(parent);
+ containerStatus2.mutable_container_id()->CopyFrom(child);
+
+ CgroupInfo* cgroupInfo = containerStatus2.mutable_cgroup_info();
+ CgroupInfo::NetCls* netCls = cgroupInfo->mutable_net_cls();
+ netCls->set_classid(42);
+
+ NetworkInfo* networkInfo = containerStatus2.add_network_infos();
+ NetworkInfo::IPAddress* ipAddr = networkInfo->add_ip_addresses();
+ ipAddr->set_ip_address("192.168.1.21");
+ }
+ // Will be called twice during the first request.
EXPECT_CALL(containerizer, status(_))
- .WillOnce(Return(containerStatus));
+ .WillOnce(Return(containerStatus1))
+ .WillOnce(Return(containerStatus2));
- Future<Response> response = process::http::get(
- slave.self(),
- "containers",
- None(),
- createBasicAuthHeaders(DEFAULT_CREDENTIAL));
+ // Request information about all containers.
+ {
+ Future<Response> response = process::http::get(
+ slave.get()->pid,
+ "containers",
+ None(),
+ createBasicAuthHeaders(DEFAULT_CREDENTIAL));
- AWAIT_READY(response);
- AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response);
- AWAIT_EXPECT_RESPONSE_HEADER_EQ(APPLICATION_JSON, "Content-Type", response);
+ Try<JSON::Value> value = JSON::parse<JSON::Value>(response->body);
+ ASSERT_SOME(value);
+
+ JSON::Array array = value->as<JSON::Array>();
+
+ EXPECT_TRUE(array.values.size() == 2);
+
+ Try<JSON::Value> containerJson1 = JSON::parse(
+ "{"
+ "\"executor_name\":\"\","
+ "\"source\":\"\","
+ "\"statistics\":{"
+ "\"mem_limit_bytes\":2048"
+ "},"
+ "\"status\":{"
+ "\"container_id\":{"
+ "\"parent\":{\"value\":\"parent\"},"
+ "\"value\":\"child1\""
+ "},"
+ "\"cgroup_info\":{\"net_cls\":{\"classid\":42}},"
+ "\"network_infos\":[{"
+ "\"ip_addresses\":[{\"ip_address\":\"192.168.1.20\"}]"
+ "}]"
+ "}"
+ "}");
+
+ Try<JSON::Value> containerJson2 = JSON::parse(
+ "{"
+ "\"executor_name\":\"\","
+ "\"source\":\"\","
+ "\"statistics\":{"
+ "\"mem_limit_bytes\":2048"
+ "},"
+ "\"status\":{"
+ "\"container_id\":{"
+ "\"parent\":{\"value\":\"parent\"},"
+ "\"value\":\"child2\""
+ "},"
+ "\"cgroup_info\":{\"net_cls\":{\"classid\":42}},"
+ "\"network_infos\":[{"
+ "\"ip_addresses\":[{\"ip_address\":\"192.168.1.21\"}]"
+ "}]"
+ "}"
+ "}");
+
+ // Since containers are stored in a hashmap, there is no strict guarantee of
+ // their ordering when listed. For this reason, we test both possibilities.
+ if (array.values[0].contains(containerJson1.get())) {
+ ASSERT_TRUE(array.values[1].contains(containerJson2.get()));
+ } else {
+ ASSERT_TRUE(array.values[0].contains(containerJson2.get()));
+ ASSERT_TRUE(array.values[1].contains(containerJson1.get()));
+ }
+ }
- Try<JSON::Value> value = JSON::parse(response->body);
- ASSERT_SOME(value);
+ AWAIT_READY(containerId1);
+ AWAIT_READY(containerId2);
- Try<JSON::Value> expected = JSON::parse(
- "[{"
- "\"executor_id\":\"default\","
- "\"executor_name\":\"\","
- "\"source\":\"\","
- "\"statistics\":{"
- "\"mem_limit_bytes\":2048"
- "},"
- "\"status\":{"
+ // Will be called once during the second request.
+ EXPECT_CALL(containerizer, usage(_))
+ .WillOnce(Return(statistics1));
+
+ // Will be called once during the second request.
+ EXPECT_CALL(containerizer, status(_))
+ .WillOnce(Return(containerStatus1));
+
+ {
+ Future<Response> response = process::http::get(
+ slave.get()->pid,
+ "containers?container_id=" + containerId1->value(),
+ None(),
+ createBasicAuthHeaders(DEFAULT_CREDENTIAL));
+
+ Try<JSON::Value> value = JSON::parse<JSON::Value>(response->body);
+ ASSERT_SOME(value);
+
+ JSON::Array array = value->as<JSON::Array>();
+
+ EXPECT_TRUE(array.values.size() == 1);
+
+ Try<JSON::Value> expected = JSON::parse(
+ "[{"
+ "\"container_id\":\"" + containerId1->value() + "\","
+ "\"executor_name\":\"\","
+ "\"source\":\"\","
+ "\"statistics\":{"
+ "\"mem_limit_bytes\":2048"
+ "},"
+ "\"status\":{"
"\"container_id\":{"
"\"parent\":{\"value\":\"parent\"},"
- "\"value\":\"child\""
- "},"
- "\"cgroup_info\":{\"net_cls\":{\"classid\":42}},"
- "\"network_infos\":[{"
- "\"ip_addresses\":[{\"ip_address\":\"192.168.1.20\"}]"
- "}]"
- "}"
- "}]");
-
- ASSERT_SOME(expected);
- EXPECT_TRUE(value->contains(expected.get()));
+ "\"value\":\"child1\""
+ "},"
+ "\"cgroup_info\":{\"net_cls\":{\"classid\":42}},"
+ "\"network_infos\":[{"
+ "\"ip_addresses\":[{\"ip_address\":\"192.168.1.20\"}]"
+ "}]"
+ "}"
+ "}]");
+
+ ASSERT_SOME(expected);
+ EXPECT_TRUE(value->contains(expected.get()));
+ }
- EXPECT_CALL(exec, shutdown(_))
+ EXPECT_CALL(exec1, shutdown(_))
+ .Times(AtMost(1));
+ EXPECT_CALL(exec2, shutdown(_))
.Times(AtMost(1));
driver.stop();
driver.join();
-
- terminate(slave);
- wait(slave);
}