You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mesos.apache.org by qi...@apache.org on 2020/05/05 08:12:52 UTC

[mesos] branch master updated (6bb60a4 -> 95b8064)

This is an automated email from the ASF dual-hosted git repository.

qianzhang pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/mesos.git.


    from 6bb60a4  Reverted the changes about `REASON_CONTAINER_MEMORY_REQUEST_EXCEEDED`.
     new 0a9d97f  Added `cpus_soft_limit` field to `ResourceStatistics` protobuf message.
     new ae956f9  Updated UCR's `usage()` method to support resource limits.
     new 97dc2b0  Updated Docker containerizer by not updating resources for command task.
     new 929932f  Updated Docker containerizer to set Docker container's resource limits.
     new 95b8064  Updated Docker containerizer's `usage()` to support resource limits.

The 5 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 include/mesos/mesos.proto                          |   9 +-
 include/mesos/v1/mesos.proto                       |   9 +-
 include/mesos/values.hpp                           |   1 +
 src/common/values.cpp                              |   6 +
 src/slave/containerizer/docker.cpp                 | 350 +++++++++++++++------
 src/slave/containerizer/docker.hpp                 |  20 +-
 src/slave/containerizer/mesos/containerizer.cpp    | 119 ++++++-
 src/slave/containerizer/mesos/containerizer.hpp    |   7 +-
 .../containerizer/docker_containerizer_tests.cpp   | 182 ++---------
 src/tests/slave_recovery_tests.cpp                 |   3 +-
 src/tests/slave_tests.cpp                          |   6 +-
 11 files changed, 425 insertions(+), 287 deletions(-)


[mesos] 03/05: Updated Docker containerizer by not updating resources for command task.

Posted by qi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

qianzhang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/mesos.git

commit 97dc2b069965929105d6241c57f8cb6ee77a5e35
Author: Qian Zhang <zh...@gmail.com>
AuthorDate: Tue Apr 21 16:46:17 2020 +0800

    Updated Docker containerizer by not updating resources for command task.
    
    For command task, its resources will be set when it is launched as a Docker
    container by Docker executor, and we do not need to update its resources
    afterward since we do not support task resizing. But for the case that a
    custom executor launched as a Docker container by Docker containerizer, we
    need to update its resources when it launches a new task or an existing task
    terminates.
    
    Review: https://reviews.apache.org/r/72401
---
 src/slave/containerizer/docker.cpp                 |  11 ++
 src/slave/containerizer/docker.hpp                 |   5 +-
 .../containerizer/docker_containerizer_tests.cpp   | 157 ---------------------
 3 files changed, 15 insertions(+), 158 deletions(-)

diff --git a/src/slave/containerizer/docker.cpp b/src/slave/containerizer/docker.cpp
index 492ac27..3aa6a99 100644
--- a/src/slave/containerizer/docker.cpp
+++ b/src/slave/containerizer/docker.cpp
@@ -1007,6 +1007,7 @@ Future<Nothing> DockerContainerizerProcess::_recover(
         Container* container = new Container(containerId);
         containers_[containerId] = container;
         container->state = Container::RUNNING;
+        container->generatedForCommandTask = executor.generatedForCommandTask;
         container->launchesExecutorContainer =
           executorContainers.contains(containerId);
 
@@ -1675,6 +1676,16 @@ Future<Nothing> DockerContainerizerProcess::update(
     return Nothing();
   }
 
+  if (container->generatedForCommandTask) {
+    LOG(INFO) << "Ignoring updating container " << containerId
+              << " because it is generated for a command task";
+
+    // Store the resources for usage().
+    container->resources = resourceRequests;
+
+    return Nothing();
+  }
+
   if (container->resources == resourceRequests && !force) {
     LOG(INFO) << "Ignoring updating container " << containerId
               << " because resources passed to update are identical to"
diff --git a/src/slave/containerizer/docker.hpp b/src/slave/containerizer/docker.hpp
index 09fc279..d3d5f3a 100644
--- a/src/slave/containerizer/docker.hpp
+++ b/src/slave/containerizer/docker.hpp
@@ -353,7 +353,8 @@ private:
         symlinked(symlinked),
         containerWorkDir(containerWorkDir),
         containerName(name(id)),
-        launchesExecutorContainer(launchesExecutorContainer)
+        launchesExecutorContainer(launchesExecutorContainer),
+        generatedForCommandTask(_containerConfig.has_task_info())
     {
       // NOTE: The task's resources are included in the executor's
       // resources in order to make sure when launching the executor
@@ -531,6 +532,8 @@ private:
     // Marks if this container launches an executor in a docker
     // container.
     bool launchesExecutorContainer;
+
+    bool generatedForCommandTask;
   };
 
   hashmap<ContainerID, Container*> containers_;
diff --git a/src/tests/containerizer/docker_containerizer_tests.cpp b/src/tests/containerizer/docker_containerizer_tests.cpp
index b069f51..42692dc 100644
--- a/src/tests/containerizer/docker_containerizer_tests.cpp
+++ b/src/tests/containerizer/docker_containerizer_tests.cpp
@@ -1086,163 +1086,6 @@ TEST_F(DockerContainerizerTest, ROOT_DOCKER_Usage)
 }
 
 
-#ifdef __linux__
-TEST_F(DockerContainerizerTest, ROOT_DOCKER_Update)
-{
-  Try<Owned<cluster::Master>> master = StartMaster();
-  ASSERT_SOME(master);
-
-  MockDocker* mockDocker =
-    new MockDocker(tests::flags.docker, tests::flags.docker_socket);
-
-  Shared<Docker> docker(mockDocker);
-
-  slave::Flags flags = CreateSlaveFlags();
-
-  Fetcher fetcher(flags);
-
-  Try<ContainerLogger*> logger =
-    ContainerLogger::create(flags.container_logger);
-
-  ASSERT_SOME(logger);
-
-  MockDockerContainerizer dockerContainerizer(
-      flags,
-      &fetcher,
-      Owned<ContainerLogger>(logger.get()),
-      docker);
-
-  Owned<MasterDetector> detector = master.get()->createDetector();
-
-  Try<Owned<cluster::Slave>> slave =
-    StartSlave(detector.get(), &dockerContainerizer, flags);
-  ASSERT_SOME(slave);
-
-  MockScheduler sched;
-  MesosSchedulerDriver driver(
-      &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
-
-  Future<FrameworkID> frameworkId;
-  EXPECT_CALL(sched, registered(&driver, _, _))
-    .WillOnce(FutureArg<1>(&frameworkId));
-
-  Future<vector<Offer>> offers;
-  EXPECT_CALL(sched, resourceOffers(&driver, _))
-    .WillOnce(FutureArg<1>(&offers))
-    .WillRepeatedly(Return()); // Ignore subsequent offers.
-
-  driver.start();
-
-  AWAIT_READY(frameworkId);
-
-  AWAIT_READY(offers);
-  ASSERT_FALSE(offers->empty());
-
-  TaskInfo task = createTask(
-      offers->front().slave_id(),
-      offers->front().resources(),
-      SLEEP_COMMAND(1000));
-
-  // TODO(tnachen): Use local image to test if possible.
-  task.mutable_container()->CopyFrom(createDockerInfo("alpine"));
-
-  Future<ContainerID> containerId;
-  EXPECT_CALL(dockerContainerizer, launch(_, _, _, _))
-    .WillOnce(DoAll(FutureArg<0>(&containerId),
-                    Invoke(&dockerContainerizer,
-                           &MockDockerContainerizer::_launch)));
-
-  Future<TaskStatus> statusStarting;
-  Future<TaskStatus> statusRunning;
-  EXPECT_CALL(sched, statusUpdate(&driver, _))
-    .WillOnce(FutureArg<1>(&statusStarting))
-    .WillOnce(FutureArg<1>(&statusRunning))
-    .WillRepeatedly(DoDefault());
-
-  driver.launchTasks(offers.get()[0].id(), {task});
-
-  AWAIT_READY(containerId);
-
-  AWAIT_READY_FOR(statusStarting, Seconds(60));
-  EXPECT_EQ(TASK_STARTING, statusStarting->state());
-
-  AWAIT_READY_FOR(statusRunning, Seconds(60));
-  EXPECT_EQ(TASK_RUNNING, statusRunning->state());
-
-  ASSERT_TRUE(
-    exists(docker, containerId.get(), ContainerState::RUNNING));
-
-  string name = containerName(containerId.get());
-
-  Future<Docker::Container> inspect = docker->inspect(name);
-
-  AWAIT_READY(inspect);
-
-  Try<Resources> newResources = Resources::parse("cpus:1;mem:128");
-
-  ASSERT_SOME(newResources);
-
-  Future<Nothing> update =
-    dockerContainerizer.update(containerId.get(), newResources.get(), {});
-
-  AWAIT_READY(update);
-
-  Result<string> cpuHierarchy = cgroups::hierarchy("cpu");
-  Result<string> memoryHierarchy = cgroups::hierarchy("memory");
-
-  ASSERT_SOME(cpuHierarchy);
-  ASSERT_SOME(memoryHierarchy);
-
-  Option<pid_t> pid = inspect->pid;
-  ASSERT_SOME(pid);
-
-  Result<string> cpuCgroup = cgroups::cpu::cgroup(pid.get());
-  ASSERT_SOME(cpuCgroup);
-
-  Result<string> memoryCgroup = cgroups::memory::cgroup(pid.get());
-  ASSERT_SOME(memoryCgroup);
-
-  Try<uint64_t> cpu = cgroups::cpu::shares(
-      cpuHierarchy.get(),
-      cpuCgroup.get());
-
-  ASSERT_SOME(cpu);
-
-  Try<Bytes> mem = cgroups::memory::soft_limit_in_bytes(
-      memoryHierarchy.get(),
-      memoryCgroup.get());
-
-  ASSERT_SOME(mem);
-
-  EXPECT_EQ(1024u, cpu.get());
-  EXPECT_EQ(128u, mem->bytes() / Bytes::MEGABYTES);
-
-  newResources = Resources::parse("cpus:1;mem:144");
-
-  // Issue second update that uses the cached cgroups instead of inspect.
-  update = dockerContainerizer.update(containerId.get(), newResources.get(), {});
-
-  AWAIT_READY(update);
-
-  cpu = cgroups::cpu::shares(cpuHierarchy.get(), cpuCgroup.get());
-
-  ASSERT_SOME(cpu);
-
-  mem = cgroups::memory::soft_limit_in_bytes(
-      memoryHierarchy.get(),
-      memoryCgroup.get());
-
-  ASSERT_SOME(mem);
-
-  EXPECT_EQ(1024u, cpu.get());
-  EXPECT_EQ(144u, mem->bytes() / Bytes::MEGABYTES);
-
-  driver.stop();
-  driver.join();
-}
-#endif // __linux__
-
-
 TEST_F(DockerContainerizerTest, ROOT_DOCKER_Recover)
 {
   MockDocker* mockDocker =


[mesos] 04/05: Updated Docker containerizer to set Docker container's resource limits.

Posted by qi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

qianzhang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/mesos.git

commit 929932fc2bd753f097a26caea5b3e7f7f3ac9118
Author: Qian Zhang <zh...@gmail.com>
AuthorDate: Thu Apr 30 09:29:49 2020 +0800

    Updated Docker containerizer to set Docker container's resource limits.
    
    This is to ensure the resource limits of Docker container where custom
    executor runs can be correctly updated when a new task is launched or
    an existing task terminates. And the `resource` field in the `Container`
    struct is also renamed to `resourceRequests`.
    
    Review: https://reviews.apache.org/r/72391
---
 include/mesos/values.hpp           |   1 +
 src/common/values.cpp              |   6 +
 src/slave/containerizer/docker.cpp | 242 ++++++++++++++++++++++++-------------
 src/slave/containerizer/docker.hpp |  15 ++-
 4 files changed, 174 insertions(+), 90 deletions(-)

diff --git a/include/mesos/values.hpp b/include/mesos/values.hpp
index 27f71d1..9288503 100644
--- a/include/mesos/values.hpp
+++ b/include/mesos/values.hpp
@@ -27,6 +27,7 @@ namespace mesos {
 
 std::ostream& operator<<(std::ostream& stream, const Value::Scalar& scalar);
 bool operator==(const Value::Scalar& left, const Value::Scalar& right);
+bool operator!=(const Value::Scalar& left, const Value::Scalar& right);
 bool operator<(const Value::Scalar& left, const Value::Scalar& right);
 bool operator<=(const Value::Scalar& left, const Value::Scalar& right);
 bool operator>(const Value::Scalar& left, const Value::Scalar& right);
diff --git a/src/common/values.cpp b/src/common/values.cpp
index 7520382..d7bc91b 100644
--- a/src/common/values.cpp
+++ b/src/common/values.cpp
@@ -99,6 +99,12 @@ bool operator==(const Value::Scalar& left, const Value::Scalar& right)
 }
 
 
+bool operator!=(const Value::Scalar& left, const Value::Scalar& right)
+{
+  return !(left == right);
+}
+
+
 bool operator<(const Value::Scalar& left, const Value::Scalar& right)
 {
   return convertToFixed(left.value()) < convertToFixed(right.value());
diff --git a/src/slave/containerizer/docker.cpp b/src/slave/containerizer/docker.cpp
index 3aa6a99..8aed025 100644
--- a/src/slave/containerizer/docker.cpp
+++ b/src/slave/containerizer/docker.cpp
@@ -541,7 +541,7 @@ Try<Nothing> DockerContainerizerProcess::updatePersistentVolumes(
         continue;
       }
 
-      if (_container->resources.contains(resource)) {
+      if (_container->resourceRequests.contains(resource)) {
         isVolumeInUse = true;
         break;
       }
@@ -612,7 +612,7 @@ Future<Nothing> DockerContainerizerProcess::mountPersistentVolumes(
   container->state = Container::MOUNTING;
 
   if (!container->containerConfig.has_task_info() &&
-      !container->resources.persistentVolumes().empty()) {
+      !container->resourceRequests.persistentVolumes().empty()) {
     LOG(ERROR) << "Persistent volumes found with container '" << containerId
                << "' but are not supported with custom executors";
     return Nothing();
@@ -622,7 +622,7 @@ Future<Nothing> DockerContainerizerProcess::mountPersistentVolumes(
       containerId,
       container->containerWorkDir,
       Resources(),
-      container->resources);
+      container->resourceRequests);
 
   if (updateVolumes.isError()) {
     return Failure(updateVolumes.error());
@@ -1333,7 +1333,10 @@ Future<Containerizer::LaunchResult> DockerContainerizerProcess::_launch(
       // --cpu-quota to the 'docker run' call in
       // launchExecutorContainer.
       return update(
-          containerId, containerConfig.executor_info().resources(), {}, true)
+          containerId,
+          containerConfig.executor_info().resources(),
+          containerConfig.limits(),
+          true)
         .then([=]() {
           return Future<Docker::Container>(dockerContainer);
         });
@@ -1384,7 +1387,7 @@ Future<Docker::Container> DockerContainerizerProcess::launchExecutorContainer(
         containerName,
         container->containerWorkDir,
         flags.sandbox_directory,
-        container->resources,
+        container->resourceRequests,
 #ifdef __linux__
         flags.cgroups_enable_cfs,
 #else
@@ -1392,8 +1395,8 @@ Future<Docker::Container> DockerContainerizerProcess::launchExecutorContainer(
 #endif
         container->environment,
         None(), // No extra devices.
-        flags.docker_mesos_image.isNone() ? flags.default_container_dns : None()
-    );
+        flags.docker_mesos_image.isNone() ? flags.default_container_dns : None(),
+        container->resourceLimits);
 
     if (runOptions.isError()) {
       return Failure(runOptions.error());
@@ -1516,7 +1519,7 @@ Future<pid_t> DockerContainerizerProcess::launchExecutorProcess(
   Future<Nothing> allocateGpus = Nothing();
 
 #ifdef __linux__
-  Option<double> gpus = Resources(container->resources).gpus();
+  Option<double> gpus = Resources(container->resourceRequests).gpus();
 
   if (gpus.isSome() && gpus.get() > 0) {
     // Make sure that the `gpus` resource is not fractional.
@@ -1677,19 +1680,23 @@ Future<Nothing> DockerContainerizerProcess::update(
   }
 
   if (container->generatedForCommandTask) {
+    // Store the resources for usage().
+    container->resourceRequests = resourceRequests;
+    container->resourceLimits = resourceLimits;
+
     LOG(INFO) << "Ignoring updating container " << containerId
               << " because it is generated for a command task";
 
-    // Store the resources for usage().
-    container->resources = resourceRequests;
-
     return Nothing();
   }
 
-  if (container->resources == resourceRequests && !force) {
+  if (container->resourceRequests == resourceRequests &&
+      container->resourceLimits == resourceLimits &&
+      !force) {
     LOG(INFO) << "Ignoring updating container " << containerId
               << " because resources passed to update are identical to"
               << " existing resources";
+
     return Nothing();
   }
 
@@ -1699,17 +1706,21 @@ Future<Nothing> DockerContainerizerProcess::update(
   // TODO(gyliu): Support updating GPU resources.
 
   // Store the resources for usage().
-  container->resources = resourceRequests;
+  container->resourceRequests = resourceRequests;
+  container->resourceLimits = resourceLimits;
 
 #ifdef __linux__
-  if (!resourceRequests.cpus().isSome() && !resourceRequests.mem().isSome()) {
+  if (!resourceRequests.cpus().isSome() &&
+      !resourceRequests.mem().isSome() &&
+      !resourceLimits.count("cpus") &&
+      !resourceLimits.count("mem")) {
     LOG(WARNING) << "Ignoring update as no supported resources are present";
     return Nothing();
   }
 
   // Skip inspecting the docker container if we already have the cgroups.
   if (container->cpuCgroup.isSome() && container->memoryCgroup.isSome()) {
-    return __update(containerId, resourceRequests);
+    return __update(containerId, resourceRequests, resourceLimits);
   }
 
   string containerName = containers_.at(containerId)->containerName;
@@ -1753,7 +1764,13 @@ Future<Nothing> DockerContainerizerProcess::update(
       });
 
   return inspectLoop
-    .then(defer(self(), &Self::_update, containerId, resourceRequests, lambda::_1));
+    .then(defer(
+        self(),
+        &Self::_update,
+        containerId,
+        resourceRequests,
+        resourceLimits,
+        lambda::_1));
 #else
   return Nothing();
 #endif // __linux__
@@ -1763,7 +1780,8 @@ Future<Nothing> DockerContainerizerProcess::update(
 #ifdef __linux__
 Future<Nothing> DockerContainerizerProcess::_update(
     const ContainerID& containerId,
-    const Resources& _resources,
+    const Resources& resourceRequests,
+    const google::protobuf::Map<string, Value::Scalar>& resourceLimits,
     const Docker::Container& container)
 {
   if (container.pid.isNone()) {
@@ -1832,13 +1850,14 @@ Future<Nothing> DockerContainerizerProcess::_update(
     return Nothing();
   }
 
-  return __update(containerId, _resources);
+  return __update(containerId, resourceRequests, resourceLimits);
 }
 
 
 Future<Nothing> DockerContainerizerProcess::__update(
     const ContainerID& containerId,
-    const Resources& _resources)
+    const Resources& resourceRequests,
+    const google::protobuf::Map<string, Value::Scalar>& resourceLimits)
 {
   CHECK(containers_.contains(containerId));
 
@@ -1849,7 +1868,7 @@ Future<Nothing> DockerContainerizerProcess::__update(
   // we make these static so we can reuse the result for subsequent
   // calls.
   static Result<string> cpuHierarchy = cgroups::hierarchy("cpu");
-  static Result<string> memoryHierarchy = cgroups::hierarchy("memory");
+  static Result<string> memHierarchy = cgroups::hierarchy("memory");
 
   if (cpuHierarchy.isError()) {
     return Failure("Failed to determine the cgroup hierarchy "
@@ -1857,111 +1876,164 @@ Future<Nothing> DockerContainerizerProcess::__update(
                    cpuHierarchy.error());
   }
 
-  if (memoryHierarchy.isError()) {
+  if (memHierarchy.isError()) {
     return Failure("Failed to determine the cgroup hierarchy "
                    "where the 'memory' subsystem is mounted: " +
-                   memoryHierarchy.error());
+                   memHierarchy.error());
   }
 
   Option<string> cpuCgroup = container->cpuCgroup;
-  Option<string> memoryCgroup = container->memoryCgroup;
-
-  // Update the CPU shares (if applicable).
-  if (cpuHierarchy.isSome() &&
-      cpuCgroup.isSome() &&
-      _resources.cpus().isSome()) {
-    double cpuShares = _resources.cpus().get();
+  Option<string> memCgroup = container->memoryCgroup;
 
-    uint64_t shares =
-      std::max((uint64_t) (CPU_SHARES_PER_CPU * cpuShares), MIN_CPU_SHARES);
+  Option<double> cpuRequest = resourceRequests.cpus();
+  Option<Bytes> memRequest = resourceRequests.mem();
 
-    Try<Nothing> write =
-      cgroups::cpu::shares(cpuHierarchy.get(), cpuCgroup.get(), shares);
-
-    if (write.isError()) {
-      return Failure("Failed to update 'cpu.shares': " + write.error());
+  Option<double> cpuLimit, memLimit;
+  foreach (auto&& limit, resourceLimits) {
+    if (limit.first == "cpus") {
+      cpuLimit = limit.second.value();
+    } else if (limit.first == "mem") {
+      memLimit = limit.second.value();
     }
+  }
 
-    LOG(INFO) << "Updated 'cpu.shares' to " << shares
-              << " at " << path::join(cpuHierarchy.get(), cpuCgroup.get())
-              << " for container " << containerId;
+  // Update the CPU shares and CFS quota (if applicable).
+  if (cpuHierarchy.isSome() && cpuCgroup.isSome()) {
+    if (cpuRequest.isSome()) {
+      uint64_t shares = std::max(
+          (uint64_t) (CPU_SHARES_PER_CPU * cpuRequest.get()), MIN_CPU_SHARES);
 
-    // Set cfs quota if enabled.
-    if (flags.cgroups_enable_cfs) {
-      write = cgroups::cpu::cfs_period_us(
-          cpuHierarchy.get(),
-          cpuCgroup.get(),
-          CPU_CFS_PERIOD);
+      Try<Nothing> write =
+        cgroups::cpu::shares(cpuHierarchy.get(), cpuCgroup.get(), shares);
 
       if (write.isError()) {
-        return Failure("Failed to update 'cpu.cfs_period_us': " +
-                       write.error());
+        return Failure("Failed to update 'cpu.shares': " + write.error());
       }
 
-      Duration quota = std::max(CPU_CFS_PERIOD * cpuShares, MIN_CPU_CFS_QUOTA);
+      LOG(INFO) << "Updated 'cpu.shares' to " << shares
+                << " at " << path::join(cpuHierarchy.get(), cpuCgroup.get())
+                << " for container " << containerId;
+    }
 
-      write = cgroups::cpu::cfs_quota_us(
+    // Set CFS quota to CPU limit (if any) or to CPU request (if the
+    // flag `--cgroups_enable_cfs` is true).
+    if (cpuLimit.isSome() || (flags.cgroups_enable_cfs && cpuRequest.isSome())) {
+      Try<Nothing> write = cgroups::cpu::cfs_period_us(
           cpuHierarchy.get(),
           cpuCgroup.get(),
-          quota);
+          CPU_CFS_PERIOD);
 
       if (write.isError()) {
-        return Failure("Failed to update 'cpu.cfs_quota_us': " + write.error());
+        return Failure(
+            "Failed to update 'cpu.cfs_period_us': " + write.error());
       }
 
-      LOG(INFO) << "Updated 'cpu.cfs_period_us' to " << CPU_CFS_PERIOD
-                << " and 'cpu.cfs_quota_us' to " << quota
-                << " (cpus " << cpuShares << ")"
-                << " for container " << containerId;
+      if (cpuLimit.isSome() && std::isinf(cpuLimit.get())) {
+        write = cgroups::write(
+            cpuHierarchy.get(), cpuCgroup.get(), "cpu.cfs_quota_us", "-1");
+
+        if (write.isError()) {
+          return Failure(
+              "Failed to update 'cpu.cfs_quota_us': " + write.error());
+        }
+
+        LOG(INFO) << "Updated 'cpu.cfs_period_us' to " << CPU_CFS_PERIOD
+                  << " and 'cpu.cfs_quota_us' to -1 at "
+                  << path::join(cpuHierarchy.get(), cpuCgroup.get())
+                  << " for container " << containerId;
+      } else {
+        const double& quota =
+          cpuLimit.isSome() ? cpuLimit.get() : cpuRequest.get();
+
+        Duration duration = std::max(CPU_CFS_PERIOD * quota, MIN_CPU_CFS_QUOTA);
+
+        write = cgroups::cpu::cfs_quota_us(
+            cpuHierarchy.get(), cpuCgroup.get(), duration);
+
+        if (write.isError()) {
+          return Failure(
+              "Failed to update 'cpu.cfs_quota_us': " + write.error());
+        }
+
+        LOG(INFO) << "Updated 'cpu.cfs_period_us' to " << CPU_CFS_PERIOD
+                  << " and 'cpu.cfs_quota_us' to " << duration << " (cpus "
+                  << quota << ") at "
+                  << path::join(cpuHierarchy.get(), cpuCgroup.get())
+                  << " for container " << containerId;
+      }
     }
   }
 
   // Update the memory limits (if applicable).
-  if (memoryHierarchy.isSome() &&
-      memoryCgroup.isSome() &&
-      _resources.mem().isSome()) {
+  if (memHierarchy.isSome() && memCgroup.isSome()) {
     // TODO(tnachen): investigate and handle OOM with docker.
-    Bytes mem = _resources.mem().get();
-    Bytes limit = std::max(mem, MIN_MEMORY);
+    if (memRequest.isSome()) {
+      Bytes softLimit = std::max(memRequest.get(), MIN_MEMORY);
 
-    // Always set the soft limit.
-    Try<Nothing> write =
-      cgroups::memory::soft_limit_in_bytes(
-          memoryHierarchy.get(), memoryCgroup.get(), limit);
+      // Always set the soft limit.
+      Try<Nothing> write = cgroups::memory::soft_limit_in_bytes(
+          memHierarchy.get(), memCgroup.get(), softLimit);
 
-    if (write.isError()) {
-      return Failure("Failed to set 'memory.soft_limit_in_bytes': " +
-                     write.error());
+      if (write.isError()) {
+        return Failure("Failed to set 'memory.soft_limit_in_bytes': " +
+                       write.error());
+      }
+
+      LOG(INFO) << "Updated 'memory.soft_limit_in_bytes' to " << softLimit
+                << " at " << path::join(memHierarchy.get(), memCgroup.get())
+                << " for container " << containerId;
     }
 
-    LOG(INFO) << "Updated 'memory.soft_limit_in_bytes' to " << limit
-              << " for container " << containerId;
+    // Read the existing hard limit.
+    Try<Bytes> currentHardLimit = cgroups::memory::limit_in_bytes(
+        memHierarchy.get(), memCgroup.get());
 
-    // Read the existing limit.
-    Try<Bytes> currentLimit =
-      cgroups::memory::limit_in_bytes(
-          memoryHierarchy.get(), memoryCgroup.get());
+    if (currentHardLimit.isError()) {
+      return Failure(
+          "Failed to read 'memory.limit_in_bytes': " +
+          currentHardLimit.error());
+    }
 
-    if (currentLimit.isError()) {
-      return Failure("Failed to read 'memory.limit_in_bytes': " +
-                     currentLimit.error());
+    bool isInfiniteLimit = false;
+    Option<Bytes> hardLimit = None();
+    if (memLimit.isSome()) {
+      if (std::isinf(memLimit.get())) {
+        isInfiniteLimit = true;
+      } else {
+        hardLimit = std::max(
+            Megabytes(static_cast<uint64_t>(memLimit.get())), MIN_MEMORY);
+      }
+    } else if (memRequest.isSome()) {
+      hardLimit = std::max(memRequest.get(), MIN_MEMORY);
     }
 
-    // Only update if new limit is higher.
+    // Only update if new limit is infinite or higher than current limit.
     // TODO(benh): Introduce a MemoryWatcherProcess which monitors the
     // discrepancy between usage and soft limit and introduces a
     // "manual oom" if necessary.
-    if (limit > currentLimit.get()) {
-      write = cgroups::memory::limit_in_bytes(
-          memoryHierarchy.get(), memoryCgroup.get(), limit);
+    if (isInfiniteLimit) {
+      Try<Nothing> write = cgroups::write(
+          memHierarchy.get(), memCgroup.get(), "memory.limit_in_bytes", "-1");
 
       if (write.isError()) {
-        return Failure("Failed to set 'memory.limit_in_bytes': " +
-                       write.error());
+        return Failure(
+            "Failed to update 'memory.limit_in_bytes': " + write.error());
+      }
+
+      LOG(INFO) << "Updated 'memory.limit_in_bytes' to -1 at "
+                << path::join(memHierarchy.get(), memCgroup.get())
+                << " for container " << containerId;
+    } else if (hardLimit.isSome() && hardLimit.get() > currentHardLimit.get()) {
+      Try<Nothing> write = cgroups::memory::limit_in_bytes(
+          memHierarchy.get(), memCgroup.get(), hardLimit.get());
+
+      if (write.isError()) {
+        return Failure(
+            "Failed to set 'memory.limit_in_bytes': " + write.error());
       }
 
-      LOG(INFO) << "Updated 'memory.limit_in_bytes' to " << limit << " at "
-                << path::join(memoryHierarchy.get(), memoryCgroup.get())
+      LOG(INFO) << "Updated 'memory.limit_in_bytes' to " << hardLimit.get()
+                << " at " << path::join(memHierarchy.get(), memCgroup.get())
                 << " for container " << containerId;
     }
   }
@@ -2011,7 +2083,7 @@ Future<ResourceStatistics> DockerContainerizerProcess::usage(
 #endif // __linux__
 
     // Set the resource allocations.
-    const Resources& resource = container->resources;
+    const Resources& resource = container->resourceRequests;
     const Option<Bytes> mem = resource.mem();
     if (mem.isSome()) {
       result.set_mem_limit_bytes(mem->bytes());
diff --git a/src/slave/containerizer/docker.hpp b/src/slave/containerizer/docker.hpp
index d3d5f3a..8bb51bb 100644
--- a/src/slave/containerizer/docker.hpp
+++ b/src/slave/containerizer/docker.hpp
@@ -256,12 +256,14 @@ private:
 #ifdef __linux__
   process::Future<Nothing> _update(
       const ContainerID& containerId,
-      const Resources& resources,
+      const Resources& resourceRequests,
+      const google::protobuf::Map<std::string, Value::Scalar>& resourceLimits,
       const Docker::Container& container);
 
   process::Future<Nothing> __update(
       const ContainerID& containerId,
-      const Resources& resources);
+      const Resources& resourceRequests,
+      const google::protobuf::Map<std::string, Value::Scalar>& resourceLimits);
 #endif // __linux__
 
   process::Future<Nothing> mountPersistentVolumes(
@@ -366,10 +368,12 @@ private:
       // perfect check because an executor might always have a subset
       // of it's resources that match a task, nevertheless, it's
       // better than nothing).
-      resources = containerConfig.resources();
+      resourceRequests = containerConfig.resources();
+      resourceLimits = containerConfig.limits();
 
       if (containerConfig.has_task_info()) {
-        CHECK(resources.contains(containerConfig.task_info().resources()));
+        CHECK(
+            resourceRequests.contains(containerConfig.task_info().resources()));
       }
 
       if (_command.isSome()) {
@@ -506,7 +510,8 @@ private:
     // the ResourceStatistics limits in usage(). Note that this is
     // different than just what we might get from TaskInfo::resources
     // or ExecutorInfo::resources because they can change dynamically.
-    Resources resources;
+    Resources resourceRequests;
+    google::protobuf::Map<std::string, Value::Scalar> resourceLimits;
 
     // The docker pull future is stored so we can discard when
     // destroy is called while docker is pulling the image.


[mesos] 02/05: Updated UCR's `usage()` method to support resource limits.

Posted by qi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

qianzhang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/mesos.git

commit ae956f9dbd30c712eb1310c2f7c98d11f6d5c993
Author: Qian Zhang <zh...@gmail.com>
AuthorDate: Fri Apr 17 20:53:43 2020 +0800

    Updated UCR's `usage()` method to support resource limits.
    
    Review: https://reviews.apache.org/r/72399
---
 src/slave/containerizer/mesos/containerizer.cpp | 119 ++++++++++++++++++++----
 src/slave/containerizer/mesos/containerizer.hpp |   7 +-
 src/tests/slave_recovery_tests.cpp              |   3 +-
 src/tests/slave_tests.cpp                       |   6 +-
 4 files changed, 112 insertions(+), 23 deletions(-)

diff --git a/src/slave/containerizer/mesos/containerizer.cpp b/src/slave/containerizer/mesos/containerizer.cpp
index 6aa4f3f..3c1840c 100644
--- a/src/slave/containerizer/mesos/containerizer.cpp
+++ b/src/slave/containerizer/mesos/containerizer.cpp
@@ -1450,7 +1450,8 @@ Future<Containerizer::LaunchResult> MesosContainerizerProcess::launch(
 
   Owned<Container> container(new Container());
   container->config = containerConfig;
-  container->resources = containerConfig.resources();
+  container->resourceRequests = containerConfig.resources();
+  container->resourceLimits = containerConfig.limits();
   container->directory = containerConfig.directory();
 
   // Maintain the 'children' list in the parent's 'Container' struct,
@@ -2432,7 +2433,8 @@ Future<Nothing> MesosContainerizerProcess::update(
 
   // NOTE: We update container's resources before isolators are updated
   // so that subsequent containerizer->update can be handled properly.
-  container->resources = resourceRequests;
+  container->resourceRequests = resourceRequests;
+  container->resourceLimits = resourceLimits;
 
   // Update each isolator.
   vector<Future<Nothing>> futures;
@@ -2454,12 +2456,11 @@ Future<Nothing> MesosContainerizerProcess::update(
 }
 
 
-// Resources are used to set the limit fields in the statistics but
-// are optional because they aren't known after recovery until/unless
-// update() is called.
 Future<ResourceStatistics> _usage(
     const ContainerID& containerId,
-    const Option<Resources>& resources,
+    const Option<Resources>& resourceRequests,
+    const Option<google::protobuf::Map<string, Value::Scalar>>& resourceLimits,
+    bool enableCfsQuota,
     const vector<Future<ResourceStatistics>>& statistics)
 {
   ResourceStatistics result;
@@ -2478,17 +2479,76 @@ Future<ResourceStatistics> _usage(
     }
   }
 
-  if (resources.isSome()) {
-    // Set the resource allocations.
-    Option<Bytes> mem = resources->mem();
-    if (mem.isSome()) {
-      result.set_mem_limit_bytes(mem->bytes());
+  Option<double> cpuRequest, cpuLimit, memLimit;
+  Option<Bytes> memRequest;
+
+  if (resourceRequests.isSome()) {
+    cpuRequest = resourceRequests->cpus();
+    memRequest = resourceRequests->mem();
+  }
+
+  if (resourceLimits.isSome()) {
+    foreach (auto&& limit, resourceLimits.get()) {
+      if (limit.first == "cpus") {
+        cpuLimit = limit.second.value();
+      } else if (limit.first == "mem") {
+        memLimit = limit.second.value();
+      }
+    }
+  }
+
+  if (cpuRequest.isSome()) {
+    result.set_cpus_soft_limit(cpuRequest.get());
+  }
+
+  if (cpuLimit.isSome()) {
+    // Get the total CPU numbers of this node, we will use it to set container's
+    // hard CPU limit if the CPU limit specified by framework is infinity.
+    static Option<long> totalCPUs;
+    if (totalCPUs.isNone()) {
+      Try<long> cpus = os::cpus();
+      if (cpus.isError()) {
+        return Failure(
+            "Failed to auto-detect the number of cpus: " + cpus.error());
+      }
+
+      totalCPUs = cpus.get();
     }
 
-    Option<double> cpus = resources->cpus();
-    if (cpus.isSome()) {
-      result.set_cpus_limit(cpus.get());
+    CHECK_SOME(totalCPUs);
+
+    result.set_cpus_limit(
+        std::isinf(cpuLimit.get()) ? totalCPUs.get() : cpuLimit.get());
+  } else if (enableCfsQuota && cpuRequest.isSome()) {
+    result.set_cpus_limit(cpuRequest.get());
+  }
+
+  if (memRequest.isSome()) {
+    result.set_mem_soft_limit_bytes(memRequest->bytes());
+  }
+
+  if (memLimit.isSome()) {
+    // Get the total memory of this node, we will use it to set container's hard
+    // memory limit if the memory limit specified by framework is infinity.
+    static Option<Bytes> totalMem;
+    if (totalMem.isNone()) {
+      Try<os::Memory> mem = os::memory();
+      if (mem.isError()) {
+        return Failure(
+            "Failed to auto-detect the size of main memory: " + mem.error());
+      }
+
+      totalMem = mem->total;
     }
+
+    CHECK_SOME(totalMem);
+
+    result.set_mem_limit_bytes(
+        std::isinf(memLimit.get())
+          ? totalMem->bytes()
+          : Megabytes(static_cast<uint64_t>(memLimit.get())).bytes());
+  } else if (memRequest.isSome()) {
+    result.set_mem_limit_bytes(memRequest->bytes());
   }
 
   return result;
@@ -2514,14 +2574,39 @@ Future<ResourceStatistics> MesosContainerizerProcess::usage(
     futures.push_back(isolator->usage(containerId));
   }
 
+  Option<Resources> resourceRequests;
+  Option<google::protobuf::Map<string, Value::Scalar>> resourceLimits;
+
+  // TODO(idownes): After recovery top-level container's resource requests and
+  // limits won't be known until after an update() because they aren't part of
+  // the SlaveState.
+  //
+  // For nested containers, we will get their resource requests and limits from
+  // their `ContainerConfig` since the `resourceRequests` and `resourceLimits`
+  // fields in the `Container` struct won't be recovered for nested containers
+  // after agent restart and update() won't be called for nested containers.
+  if (containerId.has_parent()) {
+    if (containers_.at(containerId)->config.isSome()) {
+      resourceRequests = containers_.at(containerId)->config->resources();
+      resourceLimits = containers_.at(containerId)->config->limits();
+    }
+  } else {
+    resourceRequests = containers_.at(containerId)->resourceRequests;
+    resourceLimits = containers_.at(containerId)->resourceLimits;
+  }
+
   // Use await() here so we can return partial usage statistics.
-  // TODO(idownes): After recovery resources won't be known until
-  // after an update() because they aren't part of the SlaveState.
   return await(futures)
     .then(lambda::bind(
           _usage,
           containerId,
-          containers_.at(containerId)->resources,
+          resourceRequests,
+          resourceLimits,
+#ifdef __linux__
+          flags.cgroups_enable_cfs,
+#else
+          false,
+#endif
           lambda::_1));
 }
 
diff --git a/src/slave/containerizer/mesos/containerizer.hpp b/src/slave/containerizer/mesos/containerizer.hpp
index 2ea033a..56e4c49 100644
--- a/src/slave/containerizer/mesos/containerizer.hpp
+++ b/src/slave/containerizer/mesos/containerizer.hpp
@@ -405,9 +405,10 @@ private:
     // calling cleanup after all isolators have finished isolating.
     process::Future<std::vector<Nothing>> isolation;
 
-    // We keep track of the resources for each container so we can set
-    // the ResourceStatistics limits in usage().
-    Resources resources;
+    // We keep track of the resource requests and limits for each container so
+    // we can set the ResourceStatistics limits in usage().
+    Resources resourceRequests;
+    google::protobuf::Map<std::string, Value::Scalar> resourceLimits;
 
     // The configuration for the container to be launched.
     // This can only be None if the underlying container is launched
diff --git a/src/tests/slave_recovery_tests.cpp b/src/tests/slave_recovery_tests.cpp
index 0efd3a6..da163a2 100644
--- a/src/tests/slave_recovery_tests.cpp
+++ b/src/tests/slave_recovery_tests.cpp
@@ -5332,7 +5332,8 @@ TEST_F(MesosContainerizerSlaveRecoveryTest, ResourceStatistics)
   AWAIT_READY(usage);
 
   // Check the resource limits are set.
-  EXPECT_TRUE(usage->has_cpus_limit());
+  EXPECT_TRUE(usage->has_cpus_soft_limit());
+  EXPECT_TRUE(usage->has_mem_soft_limit_bytes());
   EXPECT_TRUE(usage->has_mem_limit_bytes());
 
   // Destroy the container.
diff --git a/src/tests/slave_tests.cpp b/src/tests/slave_tests.cpp
index 6b264d0..5ad04b2 100644
--- a/src/tests/slave_tests.cpp
+++ b/src/tests/slave_tests.cpp
@@ -2417,11 +2417,13 @@ TEST_F(SlaveTest, StatisticsEndpointRunningExecutor)
   Try<JSON::Value> expected = JSON::parse(strings::format(
       "[{"
           "\"statistics\":{"
-              "\"cpus_limit\":%g,"
-              "\"mem_limit_bytes\":%lu"
+              "\"cpus_soft_limit\":%g,"
+              "\"mem_limit_bytes\":%lu,"
+              "\"mem_soft_limit_bytes\":%lu"
           "}"
       "}]",
       1 + slave::DEFAULT_EXECUTOR_CPUS,
+      (Megabytes(32) + slave::DEFAULT_EXECUTOR_MEM).bytes(),
       (Megabytes(32) + slave::DEFAULT_EXECUTOR_MEM).bytes()).get());
 
   ASSERT_SOME(expected);


[mesos] 05/05: Updated Docker containerizer's `usage()` to support resource limits.

Posted by qi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

qianzhang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/mesos.git

commit 95b806474da6f63ec8d50904a4336f903c0c5d08
Author: Qian Zhang <zh...@gmail.com>
AuthorDate: Tue Apr 21 09:30:26 2020 +0800

    Updated Docker containerizer's `usage()` to support resource limits.
    
    Review: https://reviews.apache.org/r/72402
---
 src/slave/containerizer/docker.cpp                 | 105 +++++++++++++++++++--
 .../containerizer/docker_containerizer_tests.cpp   |  25 +++--
 2 files changed, 116 insertions(+), 14 deletions(-)

diff --git a/src/slave/containerizer/docker.cpp b/src/slave/containerizer/docker.cpp
index 8aed025..431f7c6 100644
--- a/src/slave/containerizer/docker.cpp
+++ b/src/slave/containerizer/docker.cpp
@@ -2082,16 +2082,105 @@ Future<ResourceStatistics> DockerContainerizerProcess::usage(
     result = cgroupStats.get();
 #endif // __linux__
 
-    // Set the resource allocations.
-    const Resources& resource = container->resourceRequests;
-    const Option<Bytes> mem = resource.mem();
-    if (mem.isSome()) {
-      result.set_mem_limit_bytes(mem->bytes());
+    Option<double> cpuRequest, cpuLimit, memLimit;
+    Option<Bytes> memRequest;
+
+    // For command tasks, we should subtract the default resources (0.1 cpus and
+    // 32MB memory) for command executor from the container's resource requests
+    // and limits, otherwise we would report wrong resource statistics.
+    if (container->resourceRequests.cpus().isSome()) {
+      if (container->generatedForCommandTask) {
+        cpuRequest =
+          container->resourceRequests.cpus().get() - DEFAULT_EXECUTOR_CPUS;
+      } else {
+        cpuRequest = container->resourceRequests.cpus();
+      }
+    }
+
+    if (container->resourceRequests.mem().isSome()) {
+      if (container->generatedForCommandTask) {
+        memRequest =
+          container->resourceRequests.mem().get() - DEFAULT_EXECUTOR_MEM;
+      } else {
+        memRequest = container->resourceRequests.mem();
+      }
+    }
+
+    foreach (auto&& limit, container->resourceLimits) {
+      if (limit.first == "cpus") {
+        if (container->generatedForCommandTask &&
+            !std::isinf(limit.second.value())) {
+          cpuLimit = limit.second.value() - DEFAULT_EXECUTOR_CPUS;
+        } else {
+          cpuLimit = limit.second.value();
+        }
+      } else if (limit.first == "mem") {
+        if (container->generatedForCommandTask &&
+            !std::isinf(limit.second.value())) {
+          memLimit = limit.second.value() -
+                     DEFAULT_EXECUTOR_MEM.bytes() / Bytes::MEGABYTES;
+        } else {
+          memLimit = limit.second.value();
+        }
+      }
+    }
+
+    if (cpuRequest.isSome()) {
+      result.set_cpus_soft_limit(cpuRequest.get());
+    }
+
+    if (cpuLimit.isSome()) {
+      // Get the total CPU numbers of this node, we will use
+      // it to set container's hard CPU limit if the CPU limit
+      // specified by framework is infinity.
+      static Option<long> totalCPUs;
+      if (totalCPUs.isNone()) {
+        Try<long> cpus = os::cpus();
+        if (cpus.isError()) {
+          return Failure(
+              "Failed to auto-detect the number of cpus: " + cpus.error());
+        }
+
+        totalCPUs = cpus.get();
+      }
+
+      CHECK_SOME(totalCPUs);
+
+      result.set_cpus_limit(
+          std::isinf(cpuLimit.get()) ? totalCPUs.get() : cpuLimit.get());
+#ifdef __linux__
+    } else if (flags.cgroups_enable_cfs && cpuRequest.isSome()) {
+      result.set_cpus_limit(cpuRequest.get());
+#endif
     }
 
-    const Option<double> cpus = resource.cpus();
-    if (cpus.isSome()) {
-      result.set_cpus_limit(cpus.get());
+    if (memLimit.isSome()) {
+      // Get the total memory of this node, we will use it to
+      // set container's hard memory limit if the memory limit
+      // specified by framework is infinity.
+      static Option<Bytes> totalMem;
+      if (totalMem.isNone()) {
+        Try<os::Memory> mem = os::memory();
+        if (mem.isError()) {
+          return Failure(
+              "Failed to auto-detect the size of main memory: " + mem.error());
+        }
+
+        totalMem = mem->total;
+      }
+
+      CHECK_SOME(totalMem);
+
+      result.set_mem_limit_bytes(
+          std::isinf(memLimit.get())
+            ? totalMem->bytes()
+            : Megabytes(static_cast<uint64_t>(memLimit.get())).bytes());
+
+      if (memRequest.isSome()) {
+        result.set_mem_soft_limit_bytes(memRequest->bytes());
+      }
+    } else if (memRequest.isSome()) {
+      result.set_mem_limit_bytes(memRequest->bytes());
     }
 
     return result;
diff --git a/src/tests/containerizer/docker_containerizer_tests.cpp b/src/tests/containerizer/docker_containerizer_tests.cpp
index 42692dc..fc3a651 100644
--- a/src/tests/containerizer/docker_containerizer_tests.cpp
+++ b/src/tests/containerizer/docker_containerizer_tests.cpp
@@ -946,7 +946,7 @@ TEST_F(DockerContainerizerTest, ROOT_DOCKER_Usage)
   ASSERT_SOME(master);
 
   slave::Flags flags = CreateSlaveFlags();
-  flags.resources = Option<string>("cpus:2;mem:1024");
+  flags.resources = Option<string>("cpus:1;mem:1024");
 
   MockDocker* mockDocker =
     new MockDocker(tests::flags.docker, tests::flags.docker_socket);
@@ -1000,10 +1000,22 @@ TEST_F(DockerContainerizerTest, ROOT_DOCKER_Usage)
   command.set_value("dd if=/dev/zero of=/dev/null");
 #endif // __WINDOWS__
 
+  Value::Scalar cpuLimit, memLimit;
+  cpuLimit.set_value(2);
+  memLimit.set_value(2048);
+
+  google::protobuf::Map<string, Value::Scalar> resourceLimits;
+  resourceLimits.insert({"cpus", cpuLimit});
+  resourceLimits.insert({"mem", memLimit});
+
   TaskInfo task = createTask(
       offers->front().slave_id(),
       offers->front().resources(),
-      command);
+      command,
+      None(),
+      "test-task",
+      id::UUID::random().toString(),
+      resourceLimits);
 
   // TODO(tnachen): Use local image to test if possible.
   task.mutable_container()->CopyFrom(createDockerInfo(DOCKER_TEST_IMAGE));
@@ -1056,10 +1068,11 @@ TEST_F(DockerContainerizerTest, ROOT_DOCKER_Usage)
     waited += Milliseconds(200);
   } while (waited < Seconds(3));
 
-  // Usage includes the executor resources.
-  EXPECT_EQ(2.0 + slave::DEFAULT_EXECUTOR_CPUS, statistics.cpus_limit());
-  EXPECT_EQ((Gigabytes(1) + slave::DEFAULT_EXECUTOR_MEM).bytes(),
-            statistics.mem_limit_bytes());
+  EXPECT_EQ(1, statistics.cpus_soft_limit());
+  EXPECT_EQ(2, statistics.cpus_limit());
+  EXPECT_EQ(Gigabytes(1).bytes(), statistics.mem_soft_limit_bytes());
+  EXPECT_EQ(Gigabytes(2).bytes(), statistics.mem_limit_bytes());
+
 #ifndef __WINDOWS__
   // These aren't provided by the Windows Container APIs, so skip them.
   EXPECT_LT(0, statistics.cpus_user_time_secs());


[mesos] 01/05: Added `cpus_soft_limit` field to `ResourceStatistics` protobuf message.

Posted by qi...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

qianzhang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/mesos.git

commit 0a9d97f801551d273b17d6c4515b4a8884c0be91
Author: Qian Zhang <zh...@gmail.com>
AuthorDate: Fri Apr 17 20:53:03 2020 +0800

    Added `cpus_soft_limit` field to `ResourceStatistics` protobuf message.
    
    Review: https://reviews.apache.org/r/72398
---
 include/mesos/mesos.proto    | 9 ++++++---
 include/mesos/v1/mesos.proto | 9 ++++++---
 2 files changed, 12 insertions(+), 6 deletions(-)

diff --git a/include/mesos/mesos.proto b/include/mesos/mesos.proto
index 470343c..5f795f5 100644
--- a/include/mesos/mesos.proto
+++ b/include/mesos/mesos.proto
@@ -1759,9 +1759,12 @@ message ResourceStatistics {
   optional double cpus_user_time_secs = 2;
   optional double cpus_system_time_secs = 3;
 
-  // Number of CPUs allocated.
+  // Hard CPU limit.
   optional double cpus_limit = 4;
 
+  // Soft CPU limit.
+  optional double cpus_soft_limit = 45;
+
   // cpu.stat on process throttling (for contention issues).
   optional uint32 cpus_nr_periods = 7;
   optional uint32 cpus_nr_throttled = 8;
@@ -1779,10 +1782,10 @@ message ResourceStatistics {
   // Total memory + swap usage. This is set if swap is enabled.
   optional uint64 mem_total_memsw_bytes = 37;
 
-  // Hard memory limit for a container.
+  // Hard memory limit.
   optional uint64 mem_limit_bytes = 6;
 
-  // Soft memory limit for a container.
+  // Soft memory limit.
   optional uint64 mem_soft_limit_bytes = 38;
 
   // Broken out memory usage information: pagecache, rss (anonymous),
diff --git a/include/mesos/v1/mesos.proto b/include/mesos/v1/mesos.proto
index ecf717a..07d2f40 100644
--- a/include/mesos/v1/mesos.proto
+++ b/include/mesos/v1/mesos.proto
@@ -1719,9 +1719,12 @@ message ResourceStatistics {
   optional double cpus_user_time_secs = 2;
   optional double cpus_system_time_secs = 3;
 
-  // Number of CPUs allocated.
+  // Hard CPU limit.
   optional double cpus_limit = 4;
 
+  // Soft CPU limit.
+  optional double cpus_soft_limit = 45;
+
   // cpu.stat on process throttling (for contention issues).
   optional uint32 cpus_nr_periods = 7;
   optional uint32 cpus_nr_throttled = 8;
@@ -1739,10 +1742,10 @@ message ResourceStatistics {
   // Total memory + swap usage. This is set if swap is enabled.
   optional uint64 mem_total_memsw_bytes = 37;
 
-  // Hard memory limit for a container.
+  // Hard memory limit.
   optional uint64 mem_limit_bytes = 6;
 
-  // Soft memory limit for a container.
+  // Soft memory limit.
   optional uint64 mem_soft_limit_bytes = 38;
 
   // Broken out memory usage information: pagecache, rss (anonymous),