You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mesos.apache.org by ji...@apache.org on 2015/06/12 22:49:02 UTC

[2/2] mesos git commit: Added unit tests for fetching ResourceUsage in both QoS Controller and Resource Estimator.

Added unit tests for fetching ResourceUsage in both QoS Controller and
Resource Estimator.

Review: https://reviews.apache.org/r/35157


Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/b56f7d27
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/b56f7d27
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/b56f7d27

Branch: refs/heads/master
Commit: b56f7d27d2267914cde9a11bbeb67aa19aa7cc85
Parents: 37533b9
Author: Bartek Plotka <bw...@gmail.com>
Authored: Fri Jun 12 13:48:16 2015 -0700
Committer: Jie Yu <yu...@gmail.com>
Committed: Fri Jun 12 13:48:17 2015 -0700

----------------------------------------------------------------------
 include/mesos/type_utils.hpp         |   1 +
 src/common/type_utils.cpp            |   6 +
 src/tests/mesos.cpp                  |  31 +++++
 src/tests/mesos.hpp                  |  14 +++
 src/tests/oversubscription_tests.cpp | 191 +++++++++++++++++++++++++++++-
 5 files changed, 242 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/mesos/blob/b56f7d27/include/mesos/type_utils.hpp
----------------------------------------------------------------------
diff --git a/include/mesos/type_utils.hpp b/include/mesos/type_utils.hpp
index 65668a6..552b360 100644
--- a/include/mesos/type_utils.hpp
+++ b/include/mesos/type_utils.hpp
@@ -52,6 +52,7 @@ bool operator == (const Credential& left, const Credential& right);
 bool operator == (const Environment& left, const Environment& right);
 bool operator == (const ExecutorInfo& left, const ExecutorInfo& right);
 bool operator == (const MasterInfo& left, const MasterInfo& right);
+bool operator == (const ResourceStatistics& left, const ResourceStatistics& right);
 bool operator == (const SlaveInfo& left, const SlaveInfo& right);
 bool operator == (const Volume& left, const Volume& right);
 

http://git-wip-us.apache.org/repos/asf/mesos/blob/b56f7d27/src/common/type_utils.cpp
----------------------------------------------------------------------
diff --git a/src/common/type_utils.cpp b/src/common/type_utils.cpp
index e92f6f3..57c9ebb 100644
--- a/src/common/type_utils.cpp
+++ b/src/common/type_utils.cpp
@@ -313,6 +313,12 @@ bool operator == (const MasterInfo& left, const MasterInfo& right)
 }
 
 
+bool operator == (const ResourceStatistics& left, const ResourceStatistics& right)
+{
+  return left.SerializeAsString() == right.SerializeAsString();
+}
+
+
 bool operator == (const SlaveInfo& left, const SlaveInfo& right)
 {
   return left.hostname() == right.hostname() &&

http://git-wip-us.apache.org/repos/asf/mesos/blob/b56f7d27/src/tests/mesos.cpp
----------------------------------------------------------------------
diff --git a/src/tests/mesos.cpp b/src/tests/mesos.cpp
index 5e574c5..509f9f2 100644
--- a/src/tests/mesos.cpp
+++ b/src/tests/mesos.cpp
@@ -358,6 +358,37 @@ Try<PID<slave::Slave>> MesosTest::StartSlave(
 }
 
 
+Try<PID<slave::Slave>> MesosTest::StartSlave(
+  slave::Containerizer* containerizer,
+  mesos::slave::ResourceEstimator* resourceEstimator,
+  const Option<slave::Flags>& flags)
+{
+  return cluster.slaves.start(
+      flags.isNone() ? CreateSlaveFlags() : flags.get(),
+      containerizer,
+      None(),
+      None(),
+      None(),
+      resourceEstimator);
+}
+
+
+Try<PID<slave::Slave>> MesosTest::StartSlave(
+    slave::Containerizer* containerizer,
+    mesos::slave::QoSController* qoSController,
+    const Option<slave::Flags>& flags)
+{
+  return cluster.slaves.start(
+      flags.isNone() ? CreateSlaveFlags() : flags.get(),
+      containerizer,
+      None(),
+      None(),
+      None(),
+      None(),
+      qoSController);
+}
+
+
 void MesosTest::Stop(const PID<master::Master>& pid)
 {
   cluster.masters.stop(pid);

http://git-wip-us.apache.org/repos/asf/mesos/blob/b56f7d27/src/tests/mesos.hpp
----------------------------------------------------------------------
diff --git a/src/tests/mesos.hpp b/src/tests/mesos.hpp
index b80987b..ecdf910 100644
--- a/src/tests/mesos.hpp
+++ b/src/tests/mesos.hpp
@@ -192,6 +192,20 @@ protected:
       mesos::slave::ResourceEstimator* resourceEstimator,
       const Option<slave::Flags>& flags = None());
 
+  // Starts a slave with the specified resource estimator,
+  // containerizer and flags.
+  virtual Try<process::PID<slave::Slave>> StartSlave(
+      slave::Containerizer* containerizer,
+      mesos::slave::ResourceEstimator* resourceEstimator,
+      const Option<slave::Flags>& flags = None());
+
+  // Starts a slave with the specified QoS Controller,
+  // containerizer and flags.
+  virtual Try<process::PID<slave::Slave>> StartSlave(
+      slave::Containerizer* containerizer,
+      mesos::slave::QoSController* qosController,
+      const Option<slave::Flags>& flags = None());
+
   // Stop the specified master.
   virtual void Stop(
       const process::PID<master::Master>& pid);

http://git-wip-us.apache.org/repos/asf/mesos/blob/b56f7d27/src/tests/oversubscription_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/oversubscription_tests.cpp b/src/tests/oversubscription_tests.cpp
index e8ae053..d7fcaab 100644
--- a/src/tests/oversubscription_tests.cpp
+++ b/src/tests/oversubscription_tests.cpp
@@ -43,6 +43,7 @@
 #include "module/manager.hpp"
 
 #include "slave/flags.hpp"
+#include "slave/monitor.hpp"
 #include "slave/slave.hpp"
 
 #include "tests/flags.hpp"
@@ -54,6 +55,7 @@ using namespace process;
 
 using mesos::internal::master::Master;
 
+using mesos::internal::slave::ResourceMonitor;
 using mesos::internal::slave::Slave;
 
 using mesos::slave::QoSCorrection;
@@ -64,8 +66,10 @@ using std::vector;
 
 using testing::_;
 using testing::AtMost;
-using testing::Return;
+using testing::DoAll;
+using testing::Eq;
 using testing::Invoke;
+using testing::Return;
 
 namespace mesos {
 namespace internal {
@@ -135,12 +139,114 @@ protected:
     return resource;
   }
 
+  ResourceStatistics createResourceStatistics()
+  {
+    ResourceStatistics statistics;
+    statistics.set_cpus_nr_periods(100);
+    statistics.set_cpus_nr_throttled(2);
+    statistics.set_cpus_user_time_secs(4);
+    statistics.set_cpus_system_time_secs(1);
+    statistics.set_cpus_throttled_time_secs(0.5);
+    statistics.set_cpus_limit(1.0);
+    statistics.set_mem_file_bytes(0);
+    statistics.set_mem_anon_bytes(0);
+    statistics.set_mem_mapped_file_bytes(0);
+    statistics.set_mem_rss_bytes(1024);
+    statistics.set_mem_limit_bytes(2048);
+    statistics.set_timestamp(0);
+
+    return statistics;
+  }
+
 private:
   string originalLDLibraryPath;
   Modules modules;
 };
 
 
+// This test verifies that the ResourceEstimator is able to fetch
+// ResourceUsage statistics about running executor from
+// the ResourceMonitor.
+TEST_F(OversubscriptionTest, FetchResourceUsageFromMonitor)
+{
+  Try<PID<Master>> master = StartMaster();
+  ASSERT_SOME(master);
+
+  MockExecutor exec(DEFAULT_EXECUTOR_ID);
+  TestContainerizer containerizer(&exec);
+
+  const ResourceStatistics statistics = createResourceStatistics();
+
+  // Make sure that containerizer will report stub statistics.
+  EXPECT_CALL(containerizer, usage(_))
+    .WillOnce(Return(statistics));
+
+  MockResourceEstimator resourceEstimator;
+
+  Future<lambda::function<Future<ResourceUsage>()>> usageCallback;
+
+  // Catching callback which is passed to the ResourceEstimator.
+  EXPECT_CALL(resourceEstimator, initialize(_))
+    .WillOnce(DoAll(FutureArg<0>(&usageCallback), Return(Nothing())));
+
+  Try<PID<Slave>> slave = StartSlave(
+      &containerizer,
+      &resourceEstimator,
+      CreateSlaveFlags());
+
+  ASSERT_SOME(slave);
+
+  MockScheduler sched;
+  MesosSchedulerDriver driver(
+      &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL);
+
+  EXPECT_CALL(sched, registered(&driver, _, _));
+
+  Future<vector<Offer>> offers;
+  EXPECT_CALL(sched, resourceOffers(&driver, _))
+    .WillOnce(FutureArg<1>(&offers))
+    .WillRepeatedly(Return()); // Ignore subsequent offers.
+
+  driver.start();
+
+  AWAIT_READY(offers);
+  EXPECT_NE(0u, offers.get().size());
+
+  TaskInfo task = createTask(offers.get()[0], "sleep 10", DEFAULT_EXECUTOR_ID);
+
+  Future<TaskStatus> status;
+  EXPECT_CALL(sched, statusUpdate(&driver, _))
+    .WillOnce(FutureArg<1>(&status));
+
+  EXPECT_CALL(exec, registered(_, _, _, _));
+
+  EXPECT_CALL(exec, launchTask(_, _))
+    .WillOnce(SendStatusUpdateFromTask(TASK_RUNNING));
+
+  driver.launchTasks(offers.get()[0].id(), {task});
+
+  AWAIT_READY(status);
+  EXPECT_EQ(TASK_RUNNING, status.get().state());
+
+  AWAIT_READY(usageCallback);
+
+  Future<ResourceUsage> usage = usageCallback.get()();
+  AWAIT_READY(usage);
+
+  // Expecting the same statistics as these returned by mocked containerizer.
+  ASSERT_EQ(1u, usage.get().executors().size());
+  EXPECT_EQ(
+      usage.get().executors(0).executor_info().executor_id(),
+      DEFAULT_EXECUTOR_ID);
+  ASSERT_EQ(usage.get().executors(0).statistics(), statistics);
+
+  driver.stop();
+  driver.join();
+
+  Shutdown();
+}
+
+
 // This test verifies that slave will forward the estimation of the
 // oversubscribed resources to the master.
 TEST_F(OversubscriptionTest, ForwardUpdateSlaveMessage)
@@ -495,6 +601,89 @@ TEST_F(OversubscriptionTest, FixedResourceEstimator)
 }
 
 
+// This test verifies that the QoS Controller is able to fetch
+// ResourceUsage statistics about running executor from
+// the ResourceMonitor.
+TEST_F(OversubscriptionTest, QoSFetchResourceUsageFromMonitor)
+{
+  Try<PID<Master>> master = StartMaster();
+  ASSERT_SOME(master);
+
+  MockExecutor exec(DEFAULT_EXECUTOR_ID);
+  TestContainerizer containerizer(&exec);
+
+  const ResourceStatistics statistics = createResourceStatistics();
+
+  // Make sure that containerizer will report stub statistics.
+  EXPECT_CALL(containerizer, usage(_))
+    .WillOnce(Return(statistics));
+
+  MockQoSController controller;
+
+  Future<lambda::function<Future<ResourceUsage>()>> usageCallback;
+
+  // Catching callback which is passed to QoS Controller.
+  EXPECT_CALL(controller, initialize(_))
+    .WillOnce(DoAll(FutureArg<0>(&usageCallback), Return(Nothing())));
+
+  Try<PID<Slave>> slave = StartSlave(
+      &containerizer,
+      &controller,
+      CreateSlaveFlags());
+
+  ASSERT_SOME(slave);
+
+  MockScheduler sched;
+  MesosSchedulerDriver driver(
+      &sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL);
+
+  EXPECT_CALL(sched, registered(&driver, _, _));
+
+  Future<vector<Offer>> offers;
+  EXPECT_CALL(sched, resourceOffers(&driver, _))
+    .WillOnce(FutureArg<1>(&offers))
+    .WillRepeatedly(Return()); // Ignore subsequent offers.
+
+  driver.start();
+
+  AWAIT_READY(offers);
+  EXPECT_NE(0u, offers.get().size());
+
+  TaskInfo task = createTask(offers.get()[0], "sleep 10", DEFAULT_EXECUTOR_ID);
+
+  Future<TaskStatus> status;
+  EXPECT_CALL(sched, statusUpdate(&driver, _))
+    .WillOnce(FutureArg<1>(&status));
+
+  EXPECT_CALL(exec, registered(_, _, _, _));
+
+  EXPECT_CALL(exec, launchTask(_, _))
+    .WillOnce(SendStatusUpdateFromTask(TASK_RUNNING));
+
+  driver.launchTasks(offers.get()[0].id(), {task});
+
+  AWAIT_READY(status);
+  EXPECT_EQ(TASK_RUNNING, status.get().state());
+
+  AWAIT_READY(usageCallback);
+
+  Future<ResourceUsage> usage = usageCallback.get()();
+  AWAIT_READY(usage);
+
+  // Expecting the same statistics as these returned by mocked containerizer.
+  ASSERT_EQ(1u, usage.get().executors().size());
+  EXPECT_EQ(
+      usage.get().executors(0).executor_info().executor_id(),
+      DEFAULT_EXECUTOR_ID);
+  ASSERT_EQ(usage.get().executors(0).statistics(), statistics);
+
+  driver.stop();
+  driver.join();
+
+  Shutdown();
+}
+
+
 // Tests interactions between QoS Controller and slave. The
 // TestQoSController's correction queue is filled and a mocked slave
 // is checked for receiving the given correction.