You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mesos.apache.org by vi...@apache.org on 2016/04/25 23:32:31 UTC

[38/50] mesos git commit: Added unit test for framework/task history flags.

Added unit test for framework/task history flags.

This commit adds tests to verify that the the max_frameworks and
max_tasks_per_frameworks flags for master work properly. Specifically,
we test to verify that the proper amount of history is maintained for
both 0 values to these flags as well as positive values <= to the total
number of frameworks and tasks per framework actually launched.

Review: https://reviews.apache.org/r/42212/


Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/38398e86
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/38398e86
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/38398e86

Branch: refs/heads/0.26.x
Commit: 38398e863ec13feb6dc9d09ea053be9e163fcf5b
Parents: 702c187
Author: Kevin Klues <kl...@gmail.com>
Authored: Thu Jan 14 22:57:11 2016 -0800
Committer: Michael Park <mp...@apache.org>
Committed: Fri Feb 26 20:59:07 2016 -0800

----------------------------------------------------------------------
 src/tests/master_tests.cpp | 168 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 168 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/mesos/blob/38398e86/src/tests/master_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/master_tests.cpp b/src/tests/master_tests.cpp
index be959cb..c3beb09 100644
--- a/src/tests/master_tests.cpp
+++ b/src/tests/master_tests.cpp
@@ -3950,6 +3950,174 @@ TEST_F(MasterTest, FrameworksEndpointOneFramework)
   Shutdown();
 }
 
+
+// Test the max_completed_frameworks flag for master.
+TEST_F(MasterTest, MaxCompletedFrameworksFlag)
+{
+  // In order to verify that the proper amount of history
+  // is maintained, we launch exactly 2 frameworks when
+  // 'max_completed_frameworks' is set to 0, 1, and 2. This
+  // covers the cases of maintaining no history, some history
+  // less than the total number of frameworks launched, and
+  // history equal to the total number of frameworks launched.
+  const size_t totalFrameworks = 2;
+  const size_t maxFrameworksArray[] = {0, 1, 2};
+
+  foreach (const size_t maxFrameworks, maxFrameworksArray) {
+    master::Flags masterFlags = CreateMasterFlags();
+    masterFlags.max_completed_frameworks = maxFrameworks;
+
+    Try<PID<Master>> master = StartMaster(masterFlags);
+    ASSERT_SOME(master);
+
+    Try<PID<Slave>> slave = StartSlave();
+    ASSERT_SOME(slave);
+
+    for (size_t i = 0; i < totalFrameworks; i++) {
+      MockScheduler sched;
+      MesosSchedulerDriver schedDriver(
+          &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL);
+
+      // Ignore any incoming resource offers to the scheduler.
+      EXPECT_CALL(sched, resourceOffers(_, _))
+        .WillRepeatedly(Return());
+
+      Future<Nothing> schedRegistered;
+      EXPECT_CALL(sched, registered(_, _, _))
+        .WillOnce(FutureSatisfy(&schedRegistered));
+
+      schedDriver.start();
+
+      AWAIT_READY(schedRegistered);
+
+      schedDriver.stop();
+      schedDriver.join();
+    }
+
+    Future<process::http::Response> response =
+      process::http::get(master.get(), "state");
+    AWAIT_READY(response);
+
+    Try<JSON::Object> parse = JSON::parse<JSON::Object>(response->body);
+    ASSERT_SOME(parse);
+    JSON::Object state = parse.get();
+
+    // The number of completed frameworks should match the limit.
+    Result<JSON::Array> completedFrameworks =
+      state.values["completed_frameworks"].as<JSON::Array>();
+
+    EXPECT_EQ(maxFrameworks, completedFrameworks->values.size());
+
+    Stop(slave.get());
+    Stop(master.get());
+  }
+}
+
+
+// Test the max_completed_tasks_per_framework flag for master.
+TEST_F(MasterTest, MaxCompletedTasksPerFrameworkFlag)
+{
+  // We verify that the proper amount of history is maintained
+  // by launching a single framework with exactly 2 tasks. We
+  // do this when setting `max_completed_tasks_per_framework`
+  // to 0, 1, and 2. This covers the cases of maintaining no
+  // history, some history less than the total number of tasks
+  // launched, and history equal to the total number of tasks
+  // launched.
+  const size_t totalTasksPerFramework = 2;
+  const size_t maxTasksPerFrameworkArray[] = {0, 1, 2};
+
+  foreach (const size_t maxTasksPerFramework, maxTasksPerFrameworkArray) {
+    master::Flags masterFlags = CreateMasterFlags();
+    masterFlags.max_completed_tasks_per_framework = maxTasksPerFramework;
+
+    Try<PID<Master>> master = StartMaster(masterFlags);
+    ASSERT_SOME(master);
+
+    MockExecutor exec(DEFAULT_EXECUTOR_ID);
+    EXPECT_CALL(exec, registered(_, _, _, _));
+
+    Try<PID<Slave>> slave = StartSlave(&exec);
+    ASSERT_SOME(slave);
+
+    MockScheduler sched;
+    MesosSchedulerDriver schedDriver(
+        &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL);
+
+    Future<Nothing> schedRegistered;
+    EXPECT_CALL(sched, registered(_, _, _))
+      .WillOnce(FutureSatisfy(&schedRegistered));
+
+    schedDriver.start();
+
+    AWAIT_READY(schedRegistered);
+
+    for (size_t i = 0; i < totalTasksPerFramework; i++) {
+      Future<vector<Offer>> offers;
+      EXPECT_CALL(sched, resourceOffers(&schedDriver, _))
+        .WillOnce(FutureArg<1>(&offers))
+        .WillRepeatedly(Return());
+
+      AWAIT_READY(offers);
+      EXPECT_NE(0u, offers->size());
+      Offer offer = offers.get()[0];
+
+      TaskInfo task;
+      task.set_name("");
+      task.mutable_task_id()->set_value(stringify(i));
+      task.mutable_slave_id()->MergeFrom(offer.slave_id());
+      task.mutable_resources()->MergeFrom(offer.resources());
+      task.mutable_executor()->MergeFrom(DEFAULT_EXECUTOR_INFO);
+
+      // Make sure the task passes through its TASK_FINISHED
+      // state properly. We force this state change through
+      // the launchTask() callback on our MockExecutor.
+      Future<TaskStatus> statusFinished;
+      EXPECT_CALL(exec, launchTask(_, _))
+        .WillOnce(SendStatusUpdateFromTask(TASK_FINISHED));
+      EXPECT_CALL(sched, statusUpdate(_, _))
+        .WillOnce(FutureArg<1>(&statusFinished));
+
+      schedDriver.launchTasks(offer.id(), {task});
+
+      AWAIT_READY(statusFinished);
+      EXPECT_EQ(TASK_FINISHED, statusFinished->state());
+    }
+
+    EXPECT_CALL(exec, shutdown(_))
+      .Times(AtMost(1));
+
+    schedDriver.stop();
+    schedDriver.join();
+
+    Future<process::http::Response> response =
+      process::http::get(master.get(), "state");
+    AWAIT_READY(response);
+
+    Try<JSON::Object> parse = JSON::parse<JSON::Object>(response->body);
+    ASSERT_SOME(parse);
+    JSON::Object state = parse.get();
+
+    // There should be only 1 completed framework.
+    Result<JSON::Array> completedFrameworks =
+      state.values["completed_frameworks"].as<JSON::Array>();
+
+    ASSERT_EQ(1u, completedFrameworks->values.size());
+
+    // The number of completed tasks in the completed framework
+    // should match the limit.
+    JSON::Object completedFramework =
+      completedFrameworks->values[0].as<JSON::Object>();
+    Result<JSON::Array> completedTasksPerFramework =
+      completedFramework.values["completed_tasks"].as<JSON::Array>();
+
+    EXPECT_EQ(maxTasksPerFramework, completedTasksPerFramework->values.size());
+
+    Stop(slave.get());
+    Stop(master.get());
+  }
+}
+
 } // namespace tests {
 } // namespace internal {
 } // namespace mesos {