You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mesos.apache.org by al...@apache.org on 2018/08/20 14:38:13 UTC
[mesos] 02/05: Added `LaunchNestedContainerSessionInParallel` test.
This is an automated email from the ASF dual-hosted git repository.
alexr pushed a commit to branch 1.7.x
in repository https://gitbox.apache.org/repos/asf/mesos.git
commit f677c2f085c608b7d4322a3795c159981fea241e
Author: Andrei Budnik <ab...@mesosphere.com>
AuthorDate: Mon Aug 20 16:22:33 2018 +0200
Added `LaunchNestedContainerSessionInParallel` test.
This patch adds a test which verifies that launching multiple
short-lived nested container sessions succeeds. This test
implicitly verifies that agent correctly detects `mnt` namespace
of a command executor's task. If the detection fails, the
containerizer launcher (aka `nanny`) process fails to enter `mnt`
namespace, so it prints an error message into stderr for this
nested container.
This test is disabled until we fix MESOS-8545.
Review: https://reviews.apache.org/r/68256/
---
src/tests/api_tests.cpp | 149 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 149 insertions(+)
diff --git a/src/tests/api_tests.cpp b/src/tests/api_tests.cpp
index 9c9fa91..ee82350 100644
--- a/src/tests/api_tests.cpp
+++ b/src/tests/api_tests.cpp
@@ -6424,6 +6424,155 @@ TEST_P_TEMP_DISABLED_ON_WINDOWS(
}
+// This test launches multiple nested container sessions simultaneously for the
+// command executor. Each nested container prints a short message to the stdout
+// and then terminates. This test verifies that the output of each nested
+// container session contains the written message.
+//
+// TODO(abudnik): The test is flaky due to MESOS-8545 and hence disabled.
+TEST_P_TEMP_DISABLED_ON_WINDOWS(
+ AgentAPITest,
+ DISABLED_ROOT_CGROUPS_LaunchNestedContainerSessionsInParallel)
+{
+ const int numContainers = 10;
+
+ Try<Owned<cluster::Master>> master = StartMaster();
+ ASSERT_SOME(master);
+
+ slave::Flags flags = CreateSlaveFlags();
+
+ flags.isolation = "cgroups/all,filesystem/linux,namespaces/pid";
+
+ Fetcher fetcher(flags);
+
+ Try<MesosContainerizer*> _containerizer =
+ MesosContainerizer::create(flags, false, &fetcher);
+
+ ASSERT_SOME(_containerizer);
+
+ Owned<slave::Containerizer> containerizer(_containerizer.get());
+
+ Owned<MasterDetector> detector = master.get()->createDetector();
+
+ Try<Owned<cluster::Slave>> slave =
+ StartSlave(detector.get(), containerizer.get(), flags);
+
+ ASSERT_SOME(slave);
+
+ MockScheduler sched;
+ MesosSchedulerDriver driver(
+ &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
+
+ EXPECT_CALL(sched, registered(&driver, _, _));
+
+ Future<vector<Offer>> offers;
+ EXPECT_CALL(sched, resourceOffers(_, _))
+ .WillOnce(FutureArg<1>(&offers));
+
+ driver.start();
+
+ AWAIT_READY(offers);
+ ASSERT_FALSE(offers->empty());
+
+ Future<TaskStatus> status;
+ EXPECT_CALL(sched, statusUpdate(_, _))
+ .WillOnce(FutureArg<1>(&status))
+ .WillRepeatedly(Return());
+
+ TaskInfo task = createTask(offers.get()[0], "sleep 1000");
+
+ driver.launchTasks(offers.get()[0].id(), {task});
+
+ AWAIT_READY(status);
+ ASSERT_EQ(TASK_STARTING, status->state());
+
+ Future<hashset<ContainerID>> containerIds = containerizer->containers();
+ AWAIT_READY(containerIds);
+ ASSERT_EQ(1u, containerIds->size());
+
+ v1::ContainerID containerId;
+ containerId.mutable_parent()->set_value(containerIds->begin()->value());
+
+ ContentType messageContentType = GetParam();
+
+ // Launch multiple nested container sessions each running a command
+ // which writes something to stdout and stderr and then exits.
+ vector<Option<http::Pipe::Reader>> outputs;
+
+ for (int i = 0; i < numContainers; i++) {
+ containerId.set_value(id::UUID::random().toString());
+
+ {
+ v1::agent::Call call;
+ call.set_type(v1::agent::Call::LAUNCH_NESTED_CONTAINER_SESSION);
+
+ call.mutable_launch_nested_container_session()->mutable_container_id()
+ ->CopyFrom(containerId);
+
+ call.mutable_launch_nested_container_session()->mutable_command()
+ ->CopyFrom(v1::createCommandInfo("echo echo"));
+
+ http::Headers headers = createBasicAuthHeaders(DEFAULT_CREDENTIAL);
+ headers["Accept"] = stringify(ContentType::RECORDIO);
+ headers[MESSAGE_ACCEPT] = stringify(messageContentType);
+
+ auto response = http::streaming::post(
+ slave.get()->pid,
+ "api/v1",
+ headers,
+ serialize(messageContentType, call),
+ stringify(messageContentType));
+
+ AWAIT_EXPECT_RESPONSE_STATUS_EQ(http::OK().status, response);
+ }
+
+ {
+ v1::agent::Call call;
+ call.set_type(v1::agent::Call::ATTACH_CONTAINER_OUTPUT);
+
+ call.mutable_attach_container_output()->mutable_container_id()
+ ->CopyFrom(containerId);
+
+ http::Headers headers = createBasicAuthHeaders(DEFAULT_CREDENTIAL);
+ headers["Accept"] = stringify(messageContentType);
+
+ auto response = http::streaming::post(
+ slave.get()->pid,
+ "api/v1",
+ headers,
+ serialize(messageContentType, call),
+ stringify(messageContentType));
+
+ AWAIT_EXPECT_RESPONSE_STATUS_EQ(http::OK().status, response);
+ ASSERT_SOME(response->reader);
+
+ outputs.emplace_back(response->reader.get());
+ }
+ }
+
+ foreach (Option<http::Pipe::Reader>& output, outputs) {
+ // Read the output from the ATTACH_CONTAINER_OUTPUT.
+ ASSERT_SOME(output);
+
+ Future<tuple<string, string>> received =
+ getProcessIOData(messageContentType, output.get());
+
+ AWAIT_READY(received);
+
+ string stdoutReceived;
+ string stderrReceived;
+
+ tie(stdoutReceived, stderrReceived) = received.get();
+
+ // Verify the output matches what we sent.
+ ASSERT_EQ("echo\n", stdoutReceived + stderrReceived);
+ }
+
+ driver.stop();
+ driver.join();
+}
+
+
// This test verifies that attaching to the output of a container fails if the
// containerizer doesn't support the operation.
TEST_P(AgentAPITest, AttachContainerOutputFailure)