You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mesos.apache.org by ji...@apache.org on 2015/10/26 19:40:36 UTC
[12/12] mesos git commit: Relocated MesosContainerizer specific files
to the correct location.
Relocated MesosContainerizer specific files to the correct location.
Review: https://reviews.apache.org/r/39360
Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/9a722d74
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/9a722d74
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/9a722d74
Branch: refs/heads/master
Commit: 9a722d742b2897ea3c6d5574273010c3b1546a58
Parents: 974906b
Author: Gilbert Song <gi...@mesoshere.io>
Authored: Mon Oct 26 10:52:02 2015 -0700
Committer: Jie Yu <yu...@gmail.com>
Committed: Mon Oct 26 10:52:07 2015 -0700
----------------------------------------------------------------------
src/Makefile.am | 118 +-
src/docker/docker.cpp | 4 +-
src/examples/test_isolator_module.cpp | 2 +-
src/slave/containerizer/docker.cpp | 2 +-
.../isolators/cgroups/constants.hpp | 44 -
.../isolators/cgroups/cpushare.cpp | 572 ---
.../isolators/cgroups/cpushare.hpp | 122 -
.../containerizer/isolators/cgroups/mem.cpp | 733 ----
.../containerizer/isolators/cgroups/mem.hpp | 141 -
.../isolators/cgroups/perf_event.cpp | 435 --
.../isolators/cgroups/perf_event.hpp | 127 -
.../isolators/filesystem/linux.cpp | 893 -----
.../isolators/filesystem/linux.hpp | 149 -
.../isolators/filesystem/posix.cpp | 294 --
.../isolators/filesystem/posix.hpp | 89 -
.../isolators/filesystem/shared.cpp | 268 --
.../isolators/filesystem/shared.hpp | 79 -
.../containerizer/isolators/namespaces/pid.cpp | 266 --
.../containerizer/isolators/namespaces/pid.hpp | 91 -
.../containerizer/isolators/network/helper.cpp | 35 -
.../isolators/network/port_mapping.cpp | 3792 ------------------
.../isolators/network/port_mapping.hpp | 403 --
src/slave/containerizer/isolators/posix.hpp | 213 -
.../containerizer/isolators/posix/disk.cpp | 525 ---
.../containerizer/isolators/posix/disk.hpp | 151 -
src/slave/containerizer/linux_launcher.cpp | 2 +-
src/slave/containerizer/mesos/containerizer.cpp | 23 +-
.../mesos/isolators/cgroups/constants.hpp | 44 +
.../mesos/isolators/cgroups/cpushare.cpp | 572 +++
.../mesos/isolators/cgroups/cpushare.hpp | 122 +
.../mesos/isolators/cgroups/mem.cpp | 733 ++++
.../mesos/isolators/cgroups/mem.hpp | 141 +
.../mesos/isolators/cgroups/perf_event.cpp | 435 ++
.../mesos/isolators/cgroups/perf_event.hpp | 127 +
.../mesos/isolators/filesystem/linux.cpp | 893 +++++
.../mesos/isolators/filesystem/linux.hpp | 149 +
.../mesos/isolators/filesystem/posix.cpp | 294 ++
.../mesos/isolators/filesystem/posix.hpp | 89 +
.../mesos/isolators/filesystem/shared.cpp | 268 ++
.../mesos/isolators/filesystem/shared.hpp | 79 +
.../mesos/isolators/namespaces/pid.cpp | 266 ++
.../mesos/isolators/namespaces/pid.hpp | 91 +
.../mesos/isolators/network/helper.cpp | 35 +
.../mesos/isolators/network/port_mapping.cpp | 3792 ++++++++++++++++++
.../mesos/isolators/network/port_mapping.hpp | 403 ++
.../containerizer/mesos/isolators/posix.hpp | 213 +
.../mesos/isolators/posix/disk.cpp | 525 +++
.../mesos/isolators/posix/disk.hpp | 151 +
.../mesos/provisioner/appc/paths.cpp | 85 +
.../mesos/provisioner/appc/paths.hpp | 83 +
.../mesos/provisioner/appc/spec.cpp | 104 +
.../mesos/provisioner/appc/spec.hpp | 54 +
.../mesos/provisioner/appc/store.cpp | 288 ++
.../mesos/provisioner/appc/store.hpp | 63 +
.../containerizer/mesos/provisioner/backend.cpp | 62 +
.../containerizer/mesos/provisioner/backend.hpp | 67 +
.../mesos/provisioner/backends/bind.cpp | 250 ++
.../mesos/provisioner/backends/bind.hpp | 75 +
.../mesos/provisioner/backends/copy.cpp | 203 +
.../mesos/provisioner/backends/copy.hpp | 69 +
.../mesos/provisioner/docker/local_puller.cpp | 355 ++
.../mesos/provisioner/docker/local_puller.hpp | 66 +
.../mesos/provisioner/docker/message.hpp | 116 +
.../mesos/provisioner/docker/message.proto | 100 +
.../provisioner/docker/metadata_manager.cpp | 232 ++
.../provisioner/docker/metadata_manager.hpp | 106 +
.../mesos/provisioner/docker/paths.cpp | 115 +
.../mesos/provisioner/docker/paths.hpp | 98 +
.../mesos/provisioner/docker/puller.cpp | 46 +
.../mesos/provisioner/docker/puller.hpp | 68 +
.../provisioner/docker/registry_client.cpp | 641 +++
.../provisioner/docker/registry_client.hpp | 164 +
.../mesos/provisioner/docker/spec.cpp | 91 +
.../mesos/provisioner/docker/spec.hpp | 50 +
.../mesos/provisioner/docker/store.cpp | 277 ++
.../mesos/provisioner/docker/store.hpp | 68 +
.../mesos/provisioner/docker/token_manager.cpp | 362 ++
.../mesos/provisioner/docker/token_manager.hpp | 179 +
.../containerizer/mesos/provisioner/paths.cpp | 192 +
.../containerizer/mesos/provisioner/paths.hpp | 80 +
.../mesos/provisioner/provisioner.cpp | 444 ++
.../mesos/provisioner/provisioner.hpp | 94 +
.../containerizer/mesos/provisioner/store.cpp | 79 +
.../containerizer/mesos/provisioner/store.hpp | 75 +
.../containerizer/provisioner/appc/paths.cpp | 85 -
.../containerizer/provisioner/appc/paths.hpp | 83 -
.../containerizer/provisioner/appc/spec.cpp | 104 -
.../containerizer/provisioner/appc/spec.hpp | 54 -
.../containerizer/provisioner/appc/store.cpp | 288 --
.../containerizer/provisioner/appc/store.hpp | 63 -
src/slave/containerizer/provisioner/backend.cpp | 62 -
src/slave/containerizer/provisioner/backend.hpp | 67 -
.../containerizer/provisioner/backends/bind.cpp | 250 --
.../containerizer/provisioner/backends/bind.hpp | 75 -
.../containerizer/provisioner/backends/copy.cpp | 203 -
.../containerizer/provisioner/backends/copy.hpp | 69 -
.../provisioner/docker/local_puller.cpp | 355 --
.../provisioner/docker/local_puller.hpp | 66 -
.../provisioner/docker/message.hpp | 116 -
.../provisioner/docker/message.proto | 100 -
.../provisioner/docker/metadata_manager.cpp | 232 --
.../provisioner/docker/metadata_manager.hpp | 106 -
.../containerizer/provisioner/docker/paths.cpp | 115 -
.../containerizer/provisioner/docker/paths.hpp | 98 -
.../containerizer/provisioner/docker/puller.cpp | 46 -
.../containerizer/provisioner/docker/puller.hpp | 68 -
.../provisioner/docker/registry_client.cpp | 641 ---
.../provisioner/docker/registry_client.hpp | 164 -
.../containerizer/provisioner/docker/spec.cpp | 91 -
.../containerizer/provisioner/docker/spec.hpp | 50 -
.../containerizer/provisioner/docker/store.cpp | 277 --
.../containerizer/provisioner/docker/store.hpp | 68 -
.../provisioner/docker/token_manager.cpp | 362 --
.../provisioner/docker/token_manager.hpp | 179 -
src/slave/containerizer/provisioner/paths.cpp | 192 -
src/slave/containerizer/provisioner/paths.hpp | 80 -
.../containerizer/provisioner/provisioner.cpp | 444 --
.../containerizer/provisioner/provisioner.hpp | 94 -
src/slave/containerizer/provisioner/store.cpp | 79 -
src/slave/containerizer/provisioner/store.hpp | 75 -
.../containerizer/filesystem_isolator_tests.cpp | 4 +-
src/tests/containerizer/isolator_tests.cpp | 12 +-
src/tests/containerizer/port_mapping_tests.cpp | 2 +-
src/tests/containerizer/provisioner.hpp | 2 +-
.../containerizer/provisioner_appc_tests.cpp | 8 +-
.../containerizer/provisioner_backend_tests.cpp | 4 +-
.../containerizer/provisioner_docker_tests.cpp | 12 +-
src/tests/disk_quota_tests.cpp | 2 +-
128 files changed, 15021 insertions(+), 15022 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/mesos/blob/9a722d74/src/Makefile.am
----------------------------------------------------------------------
diff --git a/src/Makefile.am b/src/Makefile.am
index 98cbafc..5675b91 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -275,8 +275,8 @@ BUILT_SOURCES += $(STATE_PROTOS)
CLEANFILES += $(STATE_PROTOS)
DOCKER_PROVISIONER_PROTOS = \
- slave/containerizer/provisioner/docker/message.pb.cc \
- slave/containerizer/provisioner/docker/message.pb.h
+ slave/containerizer/mesos/provisioner/docker/message.pb.cc \
+ slave/containerizer/mesos/provisioner/docker/message.pb.h
BUILT_SOURCES += $(DOCKER_PROVISIONER_PROTOS)
CLEANFILES += $(DOCKER_PROVISIONER_PROTOS)
@@ -541,28 +541,28 @@ libmesos_no_3rdparty_la_SOURCES = \
slave/containerizer/external_containerizer.cpp \
slave/containerizer/fetcher.cpp \
slave/containerizer/isolator.cpp \
- slave/containerizer/isolators/filesystem/posix.cpp \
- slave/containerizer/isolators/posix/disk.cpp \
slave/containerizer/launcher.cpp \
slave/containerizer/mesos/containerizer.cpp \
+ slave/containerizer/mesos/isolators/filesystem/posix.cpp \
+ slave/containerizer/mesos/isolators/posix/disk.cpp \
slave/containerizer/mesos/launch.cpp \
- slave/containerizer/provisioner/paths.cpp \
- slave/containerizer/provisioner/provisioner.cpp \
- slave/containerizer/provisioner/store.cpp \
- slave/containerizer/provisioner/appc/paths.cpp \
- slave/containerizer/provisioner/appc/spec.cpp \
- slave/containerizer/provisioner/appc/store.cpp \
- slave/containerizer/provisioner/backend.cpp \
- slave/containerizer/provisioner/backends/copy.cpp \
- slave/containerizer/provisioner/docker/local_puller.cpp \
- slave/containerizer/provisioner/docker/message.proto \
- slave/containerizer/provisioner/docker/metadata_manager.cpp \
- slave/containerizer/provisioner/docker/paths.cpp \
- slave/containerizer/provisioner/docker/puller.cpp \
- slave/containerizer/provisioner/docker/registry_client.cpp \
- slave/containerizer/provisioner/docker/spec.cpp \
- slave/containerizer/provisioner/docker/store.cpp \
- slave/containerizer/provisioner/docker/token_manager.cpp \
+ slave/containerizer/mesos/provisioner/paths.cpp \
+ slave/containerizer/mesos/provisioner/provisioner.cpp \
+ slave/containerizer/mesos/provisioner/store.cpp \
+ slave/containerizer/mesos/provisioner/appc/paths.cpp \
+ slave/containerizer/mesos/provisioner/appc/spec.cpp \
+ slave/containerizer/mesos/provisioner/appc/store.cpp \
+ slave/containerizer/mesos/provisioner/backend.cpp \
+ slave/containerizer/mesos/provisioner/backends/copy.cpp \
+ slave/containerizer/mesos/provisioner/docker/local_puller.cpp \
+ slave/containerizer/mesos/provisioner/docker/message.proto \
+ slave/containerizer/mesos/provisioner/docker/metadata_manager.cpp \
+ slave/containerizer/mesos/provisioner/docker/paths.cpp \
+ slave/containerizer/mesos/provisioner/docker/puller.cpp \
+ slave/containerizer/mesos/provisioner/docker/registry_client.cpp \
+ slave/containerizer/mesos/provisioner/docker/spec.cpp \
+ slave/containerizer/mesos/provisioner/docker/store.cpp \
+ slave/containerizer/mesos/provisioner/docker/token_manager.cpp \
slave/resource_estimators/noop.cpp \
usage/usage.cpp \
v1/attributes.cpp \
@@ -730,14 +730,14 @@ if OS_LINUX
libmesos_no_3rdparty_la_SOURCES += linux/fs.cpp
libmesos_no_3rdparty_la_SOURCES += linux/perf.cpp
libmesos_no_3rdparty_la_SOURCES += linux/systemd.cpp
- libmesos_no_3rdparty_la_SOURCES += slave/containerizer/isolators/cgroups/cpushare.cpp
- libmesos_no_3rdparty_la_SOURCES += slave/containerizer/isolators/cgroups/mem.cpp
- libmesos_no_3rdparty_la_SOURCES += slave/containerizer/isolators/cgroups/perf_event.cpp
- libmesos_no_3rdparty_la_SOURCES += slave/containerizer/isolators/namespaces/pid.cpp
- libmesos_no_3rdparty_la_SOURCES += slave/containerizer/isolators/filesystem/linux.cpp
- libmesos_no_3rdparty_la_SOURCES += slave/containerizer/isolators/filesystem/shared.cpp
libmesos_no_3rdparty_la_SOURCES += slave/containerizer/linux_launcher.cpp
- libmesos_no_3rdparty_la_SOURCES += slave/containerizer/provisioner/backends/bind.cpp
+ libmesos_no_3rdparty_la_SOURCES += slave/containerizer/mesos/isolators/cgroups/cpushare.cpp
+ libmesos_no_3rdparty_la_SOURCES += slave/containerizer/mesos/isolators/cgroups/mem.cpp
+ libmesos_no_3rdparty_la_SOURCES += slave/containerizer/mesos/isolators/cgroups/perf_event.cpp
+ libmesos_no_3rdparty_la_SOURCES += slave/containerizer/mesos/isolators/namespaces/pid.cpp
+ libmesos_no_3rdparty_la_SOURCES += slave/containerizer/mesos/isolators/filesystem/linux.cpp
+ libmesos_no_3rdparty_la_SOURCES += slave/containerizer/mesos/isolators/filesystem/shared.cpp
+ libmesos_no_3rdparty_la_SOURCES += slave/containerizer/mesos/provisioner/backends/bind.cpp
else
EXTRA_DIST += linux/cgroups.cpp
EXTRA_DIST += linux/fs.cpp
@@ -781,8 +781,8 @@ if WITH_NETWORK_ISOLATOR
linux/routing/queueing/statistics.hpp
libmesos_no_3rdparty_la_SOURCES += \
- slave/containerizer/isolators/network/port_mapping.cpp \
- slave/containerizer/isolators/network/port_mapping.hpp
+ slave/containerizer/mesos/isolators/network/port_mapping.cpp \
+ slave/containerizer/mesos/isolators/network/port_mapping.hpp
endif
libmesos_no_3rdparty_la_SOURCES += \
@@ -852,36 +852,36 @@ libmesos_no_3rdparty_la_SOURCES += \
slave/containerizer/isolator.hpp \
slave/containerizer/launcher.hpp \
slave/containerizer/linux_launcher.hpp \
- slave/containerizer/provisioner/paths.hpp \
- slave/containerizer/provisioner/provisioner.hpp \
- slave/containerizer/provisioner/store.hpp \
- slave/containerizer/provisioner/appc/paths.hpp \
- slave/containerizer/provisioner/appc/spec.hpp \
- slave/containerizer/provisioner/appc/store.hpp \
- slave/containerizer/provisioner/backend.hpp \
- slave/containerizer/provisioner/backends/bind.hpp \
- slave/containerizer/provisioner/backends/copy.hpp \
- slave/containerizer/provisioner/docker/local_puller.hpp \
- slave/containerizer/provisioner/docker/message.hpp \
- slave/containerizer/provisioner/docker/metadata_manager.hpp \
- slave/containerizer/provisioner/docker/paths.hpp \
- slave/containerizer/provisioner/docker/puller.hpp \
- slave/containerizer/provisioner/docker/registry_client.hpp \
- slave/containerizer/provisioner/docker/spec.hpp \
- slave/containerizer/provisioner/docker/store.hpp \
- slave/containerizer/provisioner/docker/token_manager.hpp \
- slave/containerizer/isolators/posix.hpp \
- slave/containerizer/isolators/posix/disk.hpp \
- slave/containerizer/isolators/cgroups/constants.hpp \
- slave/containerizer/isolators/cgroups/cpushare.hpp \
- slave/containerizer/isolators/cgroups/mem.hpp \
- slave/containerizer/isolators/cgroups/perf_event.hpp \
- slave/containerizer/isolators/namespaces/pid.hpp \
- slave/containerizer/isolators/filesystem/linux.hpp \
- slave/containerizer/isolators/filesystem/posix.hpp \
- slave/containerizer/isolators/filesystem/shared.hpp \
slave/containerizer/mesos/containerizer.hpp \
+ slave/containerizer/mesos/isolators/posix.hpp \
+ slave/containerizer/mesos/isolators/posix/disk.hpp \
+ slave/containerizer/mesos/isolators/cgroups/constants.hpp \
+ slave/containerizer/mesos/isolators/cgroups/cpushare.hpp \
+ slave/containerizer/mesos/isolators/cgroups/mem.hpp \
+ slave/containerizer/mesos/isolators/cgroups/perf_event.hpp \
+ slave/containerizer/mesos/isolators/namespaces/pid.hpp \
+ slave/containerizer/mesos/isolators/filesystem/linux.hpp \
+ slave/containerizer/mesos/isolators/filesystem/posix.hpp \
+ slave/containerizer/mesos/isolators/filesystem/shared.hpp \
slave/containerizer/mesos/launch.hpp \
+ slave/containerizer/mesos/provisioner/paths.hpp \
+ slave/containerizer/mesos/provisioner/provisioner.hpp \
+ slave/containerizer/mesos/provisioner/store.hpp \
+ slave/containerizer/mesos/provisioner/appc/paths.hpp \
+ slave/containerizer/mesos/provisioner/appc/spec.hpp \
+ slave/containerizer/mesos/provisioner/appc/store.hpp \
+ slave/containerizer/mesos/provisioner/backend.hpp \
+ slave/containerizer/mesos/provisioner/backends/bind.hpp \
+ slave/containerizer/mesos/provisioner/backends/copy.hpp \
+ slave/containerizer/mesos/provisioner/docker/local_puller.hpp \
+ slave/containerizer/mesos/provisioner/docker/message.hpp \
+ slave/containerizer/mesos/provisioner/docker/metadata_manager.hpp \
+ slave/containerizer/mesos/provisioner/docker/paths.hpp \
+ slave/containerizer/mesos/provisioner/docker/puller.hpp \
+ slave/containerizer/mesos/provisioner/docker/registry_client.hpp \
+ slave/containerizer/mesos/provisioner/docker/spec.hpp \
+ slave/containerizer/mesos/provisioner/docker/store.hpp \
+ slave/containerizer/mesos/provisioner/docker/token_manager.hpp \
slave/qos_controllers/noop.hpp \
slave/resource_estimators/noop.hpp \
tests/cluster.hpp \
@@ -1120,7 +1120,7 @@ mesos_containerizer_LDADD = libmesos.la $(LDADD)
if WITH_NETWORK_ISOLATOR
pkglibexec_PROGRAMS += mesos-network-helper
-mesos_network_helper_SOURCES = slave/containerizer/isolators/network/helper.cpp
+mesos_network_helper_SOURCES = slave/containerizer/mesos/isolators/network/helper.cpp
mesos_network_helper_CPPFLAGS = $(MESOS_CPPFLAGS)
mesos_network_helper_LDADD = libmesos.la $(LDADD)
endif
http://git-wip-us.apache.org/repos/asf/mesos/blob/9a722d74/src/docker/docker.cpp
----------------------------------------------------------------------
diff --git a/src/docker/docker.cpp b/src/docker/docker.cpp
index 56d63dc..4ebca66 100755
--- a/src/docker/docker.cpp
+++ b/src/docker/docker.cpp
@@ -38,8 +38,8 @@
#include "linux/cgroups.hpp"
#endif // __linux__
-#include "slave/containerizer/isolators/cgroups/cpushare.hpp"
-#include "slave/containerizer/isolators/cgroups/mem.hpp"
+#include "slave/containerizer/mesos/isolators/cgroups/cpushare.hpp"
+#include "slave/containerizer/mesos/isolators/cgroups/mem.hpp"
#include "slave/constants.hpp"
http://git-wip-us.apache.org/repos/asf/mesos/blob/9a722d74/src/examples/test_isolator_module.cpp
----------------------------------------------------------------------
diff --git a/src/examples/test_isolator_module.cpp b/src/examples/test_isolator_module.cpp
index 577dfca..8123603 100644
--- a/src/examples/test_isolator_module.cpp
+++ b/src/examples/test_isolator_module.cpp
@@ -25,7 +25,7 @@
#include <stout/try.hpp>
-#include "slave/containerizer/isolators/posix.hpp"
+#include "slave/containerizer/mesos/isolators/posix.hpp"
#include "slave/flags.hpp"
using namespace mesos;
http://git-wip-us.apache.org/repos/asf/mesos/blob/9a722d74/src/slave/containerizer/docker.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/docker.cpp b/src/slave/containerizer/docker.cpp
index 7022958..276b6e0 100644
--- a/src/slave/containerizer/docker.cpp
+++ b/src/slave/containerizer/docker.cpp
@@ -48,7 +48,7 @@
#include "slave/containerizer/docker.hpp"
#include "slave/containerizer/fetcher.hpp"
-#include "slave/containerizer/isolators/cgroups/constants.hpp"
+#include "slave/containerizer/mesos/isolators/cgroups/constants.hpp"
#include "usage/usage.hpp"
http://git-wip-us.apache.org/repos/asf/mesos/blob/9a722d74/src/slave/containerizer/isolators/cgroups/constants.hpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/isolators/cgroups/constants.hpp b/src/slave/containerizer/isolators/cgroups/constants.hpp
deleted file mode 100644
index db9dde0..0000000
--- a/src/slave/containerizer/isolators/cgroups/constants.hpp
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __CGROUPS_ISOLATOR_CONSTANTS_HPP__
-#define __CGROUPS_ISOLATOR_CONSTANTS_HPP__
-
-#include <stout/bytes.hpp>
-#include <stout/duration.hpp>
-
-namespace mesos {
-namespace internal {
-namespace slave {
-
-// CPU subsystem constants.
-const uint64_t CPU_SHARES_PER_CPU = 1024;
-const uint64_t CPU_SHARES_PER_CPU_REVOCABLE = 10;
-const uint64_t MIN_CPU_SHARES = 2; // Linux constant.
-const Duration CPU_CFS_PERIOD = Milliseconds(100); // Linux default.
-const Duration MIN_CPU_CFS_QUOTA = Milliseconds(1);
-
-
-// Memory subsystem constants.
-const Bytes MIN_MEMORY = Megabytes(32);
-
-} // namespace slave {
-} // namespace internal {
-} // namespace mesos {
-
-#endif // __CGROUPS_ISOLATOR_CONSTANTS_HPP__
http://git-wip-us.apache.org/repos/asf/mesos/blob/9a722d74/src/slave/containerizer/isolators/cgroups/cpushare.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/isolators/cgroups/cpushare.cpp b/src/slave/containerizer/isolators/cgroups/cpushare.cpp
deleted file mode 100644
index ba748c6..0000000
--- a/src/slave/containerizer/isolators/cgroups/cpushare.cpp
+++ /dev/null
@@ -1,572 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdint.h>
-
-#include <mesos/type_utils.hpp>
-#include <mesos/values.hpp>
-
-#include <process/collect.hpp>
-#include <process/defer.hpp>
-#include <process/pid.hpp>
-
-#include <stout/bytes.hpp>
-#include <stout/check.hpp>
-#include <stout/error.hpp>
-#include <stout/foreach.hpp>
-#include <stout/hashset.hpp>
-#include <stout/nothing.hpp>
-#include <stout/os.hpp>
-#include <stout/path.hpp>
-#include <stout/stringify.hpp>
-#include <stout/try.hpp>
-
-#include "linux/cgroups.hpp"
-
-#include "slave/containerizer/isolators/cgroups/cpushare.hpp"
-
-using namespace process;
-
-using std::list;
-using std::set;
-using std::string;
-using std::vector;
-
-using mesos::slave::ContainerLimitation;
-using mesos::slave::ContainerPrepareInfo;
-using mesos::slave::ContainerState;
-using mesos::slave::Isolator;
-
-namespace mesos {
-namespace internal {
-namespace slave {
-
-CgroupsCpushareIsolatorProcess::CgroupsCpushareIsolatorProcess(
- const Flags& _flags,
- const hashmap<string, string>& _hierarchies,
- const vector<string>& _subsystems)
- : flags(_flags),
- hierarchies(_hierarchies),
- subsystems(_subsystems) {}
-
-
-CgroupsCpushareIsolatorProcess::~CgroupsCpushareIsolatorProcess() {}
-
-
-Try<Isolator*> CgroupsCpushareIsolatorProcess::create(const Flags& flags)
-{
- Try<string> hierarchyCpu = cgroups::prepare(
- flags.cgroups_hierarchy,
- "cpu",
- flags.cgroups_root);
-
- if (hierarchyCpu.isError()) {
- return Error(
- "Failed to prepare hierarchy for cpu subsystem: " +
- hierarchyCpu.error());
- }
-
- Try<string> hierarchyCpuacct = cgroups::prepare(
- flags.cgroups_hierarchy,
- "cpuacct",
- flags.cgroups_root);
-
- if (hierarchyCpuacct.isError()) {
- return Error(
- "Failed to prepare hierarchy for cpuacct subsystem: " +
- hierarchyCpuacct.error());
- }
-
- hashmap<string, string> hierarchies;
- vector<string> subsystems;
-
- hierarchies["cpu"] = hierarchyCpu.get();
- hierarchies["cpuacct"] = hierarchyCpuacct.get();
-
- if (hierarchyCpu.get() == hierarchyCpuacct.get()) {
- // Subsystem cpu and cpuacct are co-mounted (e.g., systemd).
- hierarchies["cpu,cpuacct"] = hierarchyCpu.get();
- subsystems.push_back("cpu,cpuacct");
-
- // Ensure that no other subsystem is attached to the hierarchy.
- Try<set<string>> _subsystems = cgroups::subsystems(hierarchyCpu.get());
- if (_subsystems.isError()) {
- return Error(
- "Failed to get the list of attached subsystems for hierarchy " +
- hierarchyCpu.get());
- } else if (_subsystems.get().size() != 2) {
- return Error(
- "Unexpected subsystems found attached to the hierarchy " +
- hierarchyCpu.get());
- }
- } else {
- // Subsystem cpu and cpuacct are mounted separately.
- subsystems.push_back("cpu");
- subsystems.push_back("cpuacct");
-
- // Ensure that no other subsystem is attached to each of the
- // hierarchy.
- Try<set<string>> _subsystems = cgroups::subsystems(hierarchyCpu.get());
- if (_subsystems.isError()) {
- return Error(
- "Failed to get the list of attached subsystems for hierarchy " +
- hierarchyCpu.get());
- } else if (_subsystems.get().size() != 1) {
- return Error(
- "Unexpected subsystems found attached to the hierarchy " +
- hierarchyCpu.get());
- }
-
- _subsystems = cgroups::subsystems(hierarchyCpuacct.get());
- if (_subsystems.isError()) {
- return Error(
- "Failed to get the list of attached subsystems for hierarchy " +
- hierarchyCpuacct.get());
- } else if (_subsystems.get().size() != 1) {
- return Error(
- "Unexpected subsystems found attached to the hierarchy " +
- hierarchyCpuacct.get());
- }
- }
-
- if (flags.cgroups_enable_cfs) {
- Try<bool> exists = cgroups::exists(
- hierarchies["cpu"],
- flags.cgroups_root,
- "cpu.cfs_quota_us");
-
- if (exists.isError() || !exists.get()) {
- return Error(
- "Failed to find 'cpu.cfs_quota_us'. Your kernel "
- "might be too old to use the CFS cgroups feature.");
- }
- }
-
- process::Owned<MesosIsolatorProcess> process(
- new CgroupsCpushareIsolatorProcess(flags, hierarchies, subsystems));
-
- return new MesosIsolator(process);
-}
-
-
-Future<Nothing> CgroupsCpushareIsolatorProcess::recover(
- const list<ContainerState>& states,
- const hashset<ContainerID>& orphans)
-{
- foreach (const ContainerState& state, states) {
- const ContainerID& containerId = state.container_id();
- const string cgroup = path::join(flags.cgroups_root, containerId.value());
-
- Try<bool> exists = cgroups::exists(hierarchies["cpu"], cgroup);
- if (exists.isError()) {
- foreachvalue (Info* info, infos) {
- delete info;
- }
- infos.clear();
- return Failure(
- "Failed to check cgroup for container " + stringify(containerId));
- }
-
- if (!exists.get()) {
- // This may occur if the executor has exited and the isolator
- // has destroyed the cgroup but the slave dies before noticing
- // this. This will be detected when the containerizer tries to
- // monitor the executor's pid.
- LOG(WARNING) << "Couldn't find cgroup for container " << containerId;
- continue;
- }
-
- infos[containerId] = new Info(containerId, cgroup);
- }
-
- // Remove orphan cgroups.
- foreach (const string& subsystem, subsystems) {
- Try<vector<string>> cgroups = cgroups::get(
- hierarchies[subsystem],
- flags.cgroups_root);
-
- if (cgroups.isError()) {
- foreachvalue (Info* info, infos) {
- delete info;
- }
- infos.clear();
- return Failure(cgroups.error());
- }
-
- foreach (const string& cgroup, cgroups.get()) {
- // Ignore the slave cgroup (see the --slave_subsystems flag).
- // TODO(idownes): Remove this when the cgroups layout is
- // updated, see MESOS-1185.
- if (cgroup == path::join(flags.cgroups_root, "slave")) {
- continue;
- }
-
- ContainerID containerId;
- containerId.set_value(Path(cgroup).basename());
-
- if (infos.contains(containerId)) {
- continue;
- }
-
- // Known orphan cgroups will be destroyed by the containerizer
- // using the normal cleanup path. See MESOS-2367 for details.
- if (orphans.contains(containerId)) {
- infos[containerId] = new Info(containerId, cgroup);
- continue;
- }
-
- LOG(INFO) << "Removing unknown orphaned cgroup '"
- << path::join(subsystem, cgroup) << "'";
-
- // We don't wait on the destroy as we don't want to block recovery.
- cgroups::destroy(
- hierarchies[subsystem],
- cgroup,
- cgroups::DESTROY_TIMEOUT);
- }
- }
-
- return Nothing();
-}
-
-
-Future<Option<ContainerPrepareInfo>> CgroupsCpushareIsolatorProcess::prepare(
- const ContainerID& containerId,
- const ExecutorInfo& executorInfo,
- const string& directory,
- const Option<string>& user)
-{
- if (infos.contains(containerId)) {
- return Failure("Container has already been prepared");
- }
-
- // TODO(bmahler): Don't insert into 'infos' unless we create the
- // cgroup successfully. It's safe for now because 'cleanup' gets
- // called if we return a Failure, but cleanup will fail because the
- // cgroup does not exist when cgroups::destroy is called.
- Info* info = new Info(
- containerId, path::join(flags.cgroups_root, containerId.value()));
-
- infos[containerId] = info;
-
- foreach (const string& subsystem, subsystems) {
- Try<bool> exists = cgroups::exists(hierarchies[subsystem], info->cgroup);
- if (exists.isError()) {
- return Failure("Failed to prepare isolator: " + exists.error());
- } else if (exists.get()) {
- return Failure("Failed to prepare isolator: cgroup already exists");
- }
-
- Try<Nothing> create = cgroups::create(hierarchies[subsystem], info->cgroup);
- if (create.isError()) {
- return Failure("Failed to prepare isolator: " + create.error());
- }
-
- // Chown the cgroup so the executor can create nested cgroups. Do
- // not recurse so the control files are still owned by the slave
- // user and thus cannot be changed by the executor.
- if (user.isSome()) {
- Try<Nothing> chown = os::chown(
- user.get(),
- path::join(hierarchies[subsystem], info->cgroup),
- false);
- if (chown.isError()) {
- return Failure("Failed to prepare isolator: " + chown.error());
- }
- }
- }
-
- return update(containerId, executorInfo.resources())
- .then([]() -> Future<Option<ContainerPrepareInfo>> {
- return None();
- });
-}
-
-
-Future<Nothing> CgroupsCpushareIsolatorProcess::isolate(
- const ContainerID& containerId,
- pid_t pid)
-{
- if (!infos.contains(containerId)) {
- return Failure("Unknown container");
- }
-
- Info* info = CHECK_NOTNULL(infos[containerId]);
-
- CHECK_NONE(info->pid);
- info->pid = pid;
-
- foreach (const string& subsystem, subsystems) {
- Try<Nothing> assign = cgroups::assign(
- hierarchies[subsystem],
- info->cgroup,
- pid);
-
- if (assign.isError()) {
- LOG(ERROR) << "Failed to assign container '" << info->containerId
- << " to its own cgroup '"
- << path::join(hierarchies[subsystem], info->cgroup)
- << "' : " << assign.error();
-
- return Failure("Failed to isolate container: " + assign.error());
- }
- }
-
- return Nothing();
-}
-
-
-Future<ContainerLimitation> CgroupsCpushareIsolatorProcess::watch(
- const ContainerID& containerId)
-{
- if (!infos.contains(containerId)) {
- return Failure("Unknown container");
- }
-
- CHECK_NOTNULL(infos[containerId]);
-
- return infos[containerId]->limitation.future();
-}
-
-
-Future<Nothing> CgroupsCpushareIsolatorProcess::update(
- const ContainerID& containerId,
- const Resources& resources)
-{
- if (resources.cpus().isNone()) {
- return Failure("No cpus resource given");
- }
-
- if (!infos.contains(containerId)) {
- return Failure("Unknown container");
- }
-
- const Option<string>& hierarchy = hierarchies.get("cpu");
- if (hierarchy.isNone()) {
- return Failure("No 'cpu' hierarchy");
- }
-
- Info* info = CHECK_NOTNULL(infos[containerId]);
- info->resources = resources;
-
- double cpus = resources.cpus().get();
-
- // Always set cpu.shares.
- uint64_t shares;
-
- if (flags.revocable_cpu_low_priority &&
- resources.revocable().cpus().isSome()) {
- shares = std::max(
- (uint64_t) (CPU_SHARES_PER_CPU_REVOCABLE * cpus),
- MIN_CPU_SHARES);
- } else {
- shares = std::max(
- (uint64_t) (CPU_SHARES_PER_CPU * cpus),
- MIN_CPU_SHARES);
- }
-
- Try<Nothing> write = cgroups::cpu::shares(
- hierarchy.get(),
- info->cgroup,
- shares);
-
- if (write.isError()) {
- return Failure("Failed to update 'cpu.shares': " + write.error());
- }
-
- LOG(INFO) << "Updated 'cpu.shares' to " << shares
- << " (cpus " << cpus << ")"
- << " for container " << containerId;
-
- // Set cfs quota if enabled.
- if (flags.cgroups_enable_cfs) {
- write = cgroups::cpu::cfs_period_us(
- hierarchy.get(),
- info->cgroup,
- CPU_CFS_PERIOD);
-
- if (write.isError()) {
- return Failure("Failed to update 'cpu.cfs_period_us': " + write.error());
- }
-
- Duration quota = std::max(CPU_CFS_PERIOD * cpus, MIN_CPU_CFS_QUOTA);
-
- write = cgroups::cpu::cfs_quota_us(hierarchy.get(), info->cgroup, quota);
- if (write.isError()) {
- return Failure("Failed to update 'cpu.cfs_quota_us': " + write.error());
- }
-
- LOG(INFO) << "Updated 'cpu.cfs_period_us' to " << CPU_CFS_PERIOD
- << " and 'cpu.cfs_quota_us' to " << quota
- << " (cpus " << cpus << ")"
- << " for container " << containerId;
- }
-
- return Nothing();
-}
-
-
-Future<ResourceStatistics> CgroupsCpushareIsolatorProcess::usage(
- const ContainerID& containerId)
-{
- if (!infos.contains(containerId)) {
- return Failure("Unknown container");
- }
-
- Info* info = CHECK_NOTNULL(infos[containerId]);
-
- ResourceStatistics result;
-
- // TODO(chzhcn): Getting the number of processes and threads is
- // available as long as any cgroup subsystem is used so this best
- // not be tied to a specific cgroup isolator. A better place is
- // probably Linux Launcher, which uses the cgroup freezer subsystem.
- // That requires some change for it to adopt the new semantics of
- // reporting subsystem-independent cgroup usage.
- // Note: The complexity of this operation is linear to the number of
- // processes and threads in a container: the kernel has to allocate
- // memory to contain the list of pids or tids; the userspace has to
- // parse the cgroup files to get the size. If this proves to be a
- // performance bottleneck, some kind of rate limiting mechanism
- // needs to be employed.
- if (flags.cgroups_cpu_enable_pids_and_tids_count) {
- Try<std::set<pid_t>> pids =
- cgroups::processes(hierarchies["cpuacct"], info->cgroup);
- if (pids.isError()) {
- return Failure("Failed to get number of processes: " + pids.error());
- }
-
- result.set_processes(pids.get().size());
-
- Try<std::set<pid_t>> tids =
- cgroups::threads(hierarchies["cpuacct"], info->cgroup);
- if (tids.isError()) {
- return Failure("Failed to get number of threads: " + tids.error());
- }
-
- result.set_threads(tids.get().size());
- }
-
- // Get the number of clock ticks, used for cpu accounting.
- static long ticks = sysconf(_SC_CLK_TCK);
-
- PCHECK(ticks > 0) << "Failed to get sysconf(_SC_CLK_TCK)";
-
- // Add the cpuacct.stat information.
- Try<hashmap<string, uint64_t>> stat = cgroups::stat(
- hierarchies["cpuacct"],
- info->cgroup,
- "cpuacct.stat");
-
- if (stat.isError()) {
- return Failure("Failed to read cpuacct.stat: " + stat.error());
- }
-
- // TODO(bmahler): Add namespacing to cgroups to enforce the expected
- // structure, e.g., cgroups::cpuacct::stat.
- Option<uint64_t> user = stat.get().get("user");
- Option<uint64_t> system = stat.get().get("system");
-
- if (user.isSome() && system.isSome()) {
- result.set_cpus_user_time_secs((double) user.get() / (double) ticks);
- result.set_cpus_system_time_secs((double) system.get() / (double) ticks);
- }
-
- // Add the cpu.stat information only if CFS is enabled.
- if (flags.cgroups_enable_cfs) {
- stat = cgroups::stat(hierarchies["cpu"], info->cgroup, "cpu.stat");
- if (stat.isError()) {
- return Failure("Failed to read cpu.stat: " + stat.error());
- }
-
- Option<uint64_t> nr_periods = stat.get().get("nr_periods");
- if (nr_periods.isSome()) {
- result.set_cpus_nr_periods(nr_periods.get());
- }
-
- Option<uint64_t> nr_throttled = stat.get().get("nr_throttled");
- if (nr_throttled.isSome()) {
- result.set_cpus_nr_throttled(nr_throttled.get());
- }
-
- Option<uint64_t> throttled_time = stat.get().get("throttled_time");
- if (throttled_time.isSome()) {
- result.set_cpus_throttled_time_secs(
- Nanoseconds(throttled_time.get()).secs());
- }
- }
-
- return result;
-}
-
-
-Future<Nothing> CgroupsCpushareIsolatorProcess::cleanup(
- const ContainerID& containerId)
-{
- // Multiple calls may occur during test clean up.
- if (!infos.contains(containerId)) {
- VLOG(1) << "Ignoring cleanup request for unknown container: "
- << containerId;
-
- return Nothing();
- }
-
- Info* info = CHECK_NOTNULL(infos[containerId]);
-
- list<Future<Nothing>> futures;
- foreach (const string& subsystem, subsystems) {
- futures.push_back(cgroups::destroy(
- hierarchies[subsystem],
- info->cgroup,
- cgroups::DESTROY_TIMEOUT));
- }
-
- return collect(futures)
- .onAny(defer(PID<CgroupsCpushareIsolatorProcess>(this),
- &CgroupsCpushareIsolatorProcess::_cleanup,
- containerId,
- lambda::_1))
- .then([]() { return Nothing(); });
-}
-
-
-Future<list<Nothing>> CgroupsCpushareIsolatorProcess::_cleanup(
- const ContainerID& containerId,
- const Future<list<Nothing>>& future)
-{
- if (!infos.contains(containerId)) {
- return Failure("Unknown container");
- }
-
- CHECK_NOTNULL(infos[containerId]);
-
- if (!future.isReady()) {
- return Failure(
- "Failed to clean up container " + stringify(containerId) +
- " : " + (future.isFailed() ? future.failure() : "discarded"));
- }
-
- delete infos[containerId];
- infos.erase(containerId);
-
- return future;
-}
-
-} // namespace slave {
-} // namespace internal {
-} // namespace mesos {
http://git-wip-us.apache.org/repos/asf/mesos/blob/9a722d74/src/slave/containerizer/isolators/cgroups/cpushare.hpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/isolators/cgroups/cpushare.hpp b/src/slave/containerizer/isolators/cgroups/cpushare.hpp
deleted file mode 100644
index 54b83a7..0000000
--- a/src/slave/containerizer/isolators/cgroups/cpushare.hpp
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __CPUSHARE_ISOLATOR_HPP__
-#define __CPUSHARE_ISOLATOR_HPP__
-
-#include <sys/types.h>
-
-#include <string>
-#include <vector>
-
-#include <process/future.hpp>
-
-#include <stout/hashmap.hpp>
-#include <stout/option.hpp>
-
-#include "slave/flags.hpp"
-
-#include "slave/containerizer/isolator.hpp"
-
-#include "slave/containerizer/isolators/cgroups/constants.hpp"
-
-namespace mesos {
-namespace internal {
-namespace slave {
-
-// Use the Linux cpu cgroup controller for cpu isolation which uses the
-// Completely Fair Scheduler (CFS).
-// - cpushare implements proportionally weighted scheduling.
-// - cfs implements hard quota based scheduling.
-class CgroupsCpushareIsolatorProcess : public MesosIsolatorProcess
-{
-public:
- static Try<mesos::slave::Isolator*> create(const Flags& flags);
-
- virtual ~CgroupsCpushareIsolatorProcess();
-
- virtual process::Future<Nothing> recover(
- const std::list<mesos::slave::ContainerState>& states,
- const hashset<ContainerID>& orphans);
-
- virtual process::Future<Option<mesos::slave::ContainerPrepareInfo>> prepare(
- const ContainerID& containerId,
- const ExecutorInfo& executorInfo,
- const std::string& directory,
- const Option<std::string>& user);
-
- virtual process::Future<Nothing> isolate(
- const ContainerID& containerId,
- pid_t pid);
-
- virtual process::Future<mesos::slave::ContainerLimitation> watch(
- const ContainerID& containerId);
-
- virtual process::Future<Nothing> update(
- const ContainerID& containerId,
- const Resources& resources);
-
- virtual process::Future<ResourceStatistics> usage(
- const ContainerID& containerId);
-
- virtual process::Future<Nothing> cleanup(
- const ContainerID& containerId);
-
-private:
- CgroupsCpushareIsolatorProcess(
- const Flags& flags,
- const hashmap<std::string, std::string>& hierarchies,
- const std::vector<std::string>& subsystems);
-
- virtual process::Future<std::list<Nothing>> _cleanup(
- const ContainerID& containerId,
- const process::Future<std::list<Nothing>>& future);
-
- struct Info
- {
- Info(const ContainerID& _containerId, const std::string& _cgroup)
- : containerId(_containerId), cgroup(_cgroup) {}
-
- const ContainerID containerId;
- const std::string cgroup;
- Option<pid_t> pid;
- Option<Resources> resources;
-
- process::Promise<mesos::slave::ContainerLimitation> limitation;
- };
-
- const Flags flags;
-
- // Map from subsystem to hierarchy.
- hashmap<std::string, std::string> hierarchies;
-
- // Subsystems used for this isolator. Typically, there are two
- // elements in the vector: 'cpu' and 'cpuacct'. If cpu and cpuacct
- // systems are co-mounted (e.g., systems using systemd), then there
- // will be only one element in the vector which is 'cpu,cpuacct'.
- std::vector<std::string> subsystems;
-
- // TODO(bmahler): Use Owned<Info>.
- hashmap<ContainerID, Info*> infos;
-};
-
-} // namespace slave {
-} // namespace internal {
-} // namespace mesos {
-
-#endif // __CPUSHARE_ISOLATOR_HPP__
http://git-wip-us.apache.org/repos/asf/mesos/blob/9a722d74/src/slave/containerizer/isolators/cgroups/mem.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/isolators/cgroups/mem.cpp b/src/slave/containerizer/isolators/cgroups/mem.cpp
deleted file mode 100644
index 55fa6f4..0000000
--- a/src/slave/containerizer/isolators/cgroups/mem.cpp
+++ /dev/null
@@ -1,733 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdint.h>
-
-#include <list>
-#include <vector>
-
-#include <mesos/type_utils.hpp>
-#include <mesos/values.hpp>
-
-#include <process/collect.hpp>
-#include <process/defer.hpp>
-#include <process/pid.hpp>
-
-#include <stout/bytes.hpp>
-#include <stout/check.hpp>
-#include <stout/error.hpp>
-#include <stout/foreach.hpp>
-#include <stout/hashset.hpp>
-#include <stout/lambda.hpp>
-#include <stout/os.hpp>
-#include <stout/path.hpp>
-#include <stout/stringify.hpp>
-#include <stout/try.hpp>
-
-#include "common/protobuf_utils.hpp"
-
-#include "slave/containerizer/isolators/cgroups/constants.hpp"
-#include "slave/containerizer/isolators/cgroups/mem.hpp"
-
-using namespace process;
-
-using cgroups::memory::pressure::Level;
-using cgroups::memory::pressure::Counter;
-
-using std::list;
-using std::ostringstream;
-using std::set;
-using std::string;
-using std::vector;
-
-using mesos::slave::ContainerLimitation;
-using mesos::slave::ContainerPrepareInfo;
-using mesos::slave::ContainerState;
-using mesos::slave::Isolator;
-
-namespace mesos {
-namespace internal {
-namespace slave {
-
-static const vector<Level> levels()
-{
- return {Level::LOW, Level::MEDIUM, Level::CRITICAL};
-}
-
-
-CgroupsMemIsolatorProcess::CgroupsMemIsolatorProcess(
- const Flags& _flags,
- const string& _hierarchy,
- const bool _limitSwap)
- : flags(_flags),
- hierarchy(_hierarchy),
- limitSwap(_limitSwap) {}
-
-
-CgroupsMemIsolatorProcess::~CgroupsMemIsolatorProcess() {}
-
-
-Try<Isolator*> CgroupsMemIsolatorProcess::create(const Flags& flags)
-{
- Try<string> hierarchy = cgroups::prepare(
- flags.cgroups_hierarchy,
- "memory",
- flags.cgroups_root);
-
- if (hierarchy.isError()) {
- return Error("Failed to create memory cgroup: " + hierarchy.error());
- }
-
- // Ensure that no other subsystem is attached to the hierarchy.
- Try<set<string>> subsystems = cgroups::subsystems(hierarchy.get());
- if (subsystems.isError()) {
- return Error(
- "Failed to get the list of attached subsystems for hierarchy " +
- hierarchy.get());
- } else if (subsystems.get().size() != 1) {
- return Error(
- "Unexpected subsystems found attached to the hierarchy " +
- hierarchy.get());
- }
-
- // Make sure the kernel OOM-killer is enabled.
- // The Mesos OOM handler, as implemented, is not capable of handling
- // the oom condition by itself safely given the limitations Linux
- // imposes on this code path.
- Try<Nothing> enable = cgroups::memory::oom::killer::enable(
- hierarchy.get(), flags.cgroups_root);
-
- if (enable.isError()) {
- return Error(enable.error());
- }
-
- // Test if memory pressure listening is enabled. We test that on the
- // root cgroup. We rely on 'Counter::create' to test if memory
- // pressure listening is enabled or not. The created counters will
- // be destroyed immediately.
- foreach (Level level, levels()) {
- Try<Owned<Counter>> counter = Counter::create(
- hierarchy.get(),
- flags.cgroups_root,
- level);
-
- if (counter.isError()) {
- return Error("Failed to listen on " + stringify(level) +
- " memory events: " + counter.error());
- }
- }
-
- // Determine whether to limit swap or not.
- bool limitSwap = false;
-
- if (flags.cgroups_limit_swap) {
- Result<Bytes> check = cgroups::memory::memsw_limit_in_bytes(
- hierarchy.get(), flags.cgroups_root);
-
- if (check.isError()) {
- return Error(
- "Failed to read 'memory.memsw.limit_in_bytes': " +
- check.error());
- } else if (check.isNone()) {
- return Error("'memory.memsw.limit_in_bytes' is not available");
- }
-
- limitSwap = true;
- }
-
- process::Owned<MesosIsolatorProcess> process(
- new CgroupsMemIsolatorProcess(flags, hierarchy.get(), limitSwap));
-
- return new MesosIsolator(process);
-}
-
-
-Future<Nothing> CgroupsMemIsolatorProcess::recover(
- const list<ContainerState>& states,
- const hashset<ContainerID>& orphans)
-{
- foreach (const ContainerState& state, states) {
- const ContainerID& containerId = state.container_id();
- const string cgroup = path::join(flags.cgroups_root, containerId.value());
-
- Try<bool> exists = cgroups::exists(hierarchy, cgroup);
- if (exists.isError()) {
- foreachvalue (Info* info, infos) {
- delete info;
- }
- infos.clear();
- return Failure("Failed to check cgroup for container '" +
- stringify(containerId) + "'");
- }
-
- if (!exists.get()) {
- VLOG(1) << "Couldn't find cgroup for container " << containerId;
- // This may occur if the executor has exited and the isolator
- // has destroyed the cgroup but the slave dies before noticing
- // this. This will be detected when the containerizer tries to
- // monitor the executor's pid.
- continue;
- }
-
- infos[containerId] = new Info(containerId, cgroup);
-
- oomListen(containerId);
- pressureListen(containerId);
- }
-
- // Remove orphan cgroups.
- Try<vector<string>> cgroups = cgroups::get(hierarchy, flags.cgroups_root);
- if (cgroups.isError()) {
- foreachvalue (Info* info, infos) {
- delete info;
- }
- infos.clear();
- return Failure(cgroups.error());
- }
-
- foreach (const string& cgroup, cgroups.get()) {
- // Ignore the slave cgroup (see the --slave_subsystems flag).
- // TODO(idownes): Remove this when the cgroups layout is updated,
- // see MESOS-1185.
- if (cgroup == path::join(flags.cgroups_root, "slave")) {
- continue;
- }
-
- ContainerID containerId;
- containerId.set_value(Path(cgroup).basename());
-
- if (infos.contains(containerId)) {
- continue;
- }
-
- // Known orphan cgroups will be destroyed by the containerizer
- // using the normal cleanup path. See MESOS-2367 for details.
- if (orphans.contains(containerId)) {
- infos[containerId] = new Info(containerId, cgroup);
- continue;
- }
-
- LOG(INFO) << "Removing unknown orphaned cgroup '" << cgroup << "'";
-
- // We don't wait on the destroy as we don't want to block recovery.
- cgroups::destroy(hierarchy, cgroup, cgroups::DESTROY_TIMEOUT);
- }
-
- return Nothing();
-}
-
-
-Future<Option<ContainerPrepareInfo>> CgroupsMemIsolatorProcess::prepare(
- const ContainerID& containerId,
- const ExecutorInfo& executorInfo,
- const string& directory,
- const Option<string>& user)
-{
- if (infos.contains(containerId)) {
- return Failure("Container has already been prepared");
- }
-
- // TODO(bmahler): Don't insert into 'infos' unless we create the
- // cgroup successfully. It's safe for now because 'cleanup' gets
- // called if we return a Failure, but cleanup will fail because the
- // cgroup does not exist when cgroups::destroy is called.
- Info* info = new Info(
- containerId, path::join(flags.cgroups_root, containerId.value()));
-
- infos[containerId] = info;
-
- // Create a cgroup for this container.
- Try<bool> exists = cgroups::exists(hierarchy, info->cgroup);
-
- if (exists.isError()) {
- return Failure("Failed to prepare isolator: " + exists.error());
- } else if (exists.get()) {
- return Failure("Failed to prepare isolator: cgroup already exists");
- }
-
- Try<Nothing> create = cgroups::create(hierarchy, info->cgroup);
- if (create.isError()) {
- return Failure("Failed to prepare isolator: " + create.error());
- }
-
- // Chown the cgroup so the executor can create nested cgroups. Do
- // not recurse so the control files are still owned by the slave
- // user and thus cannot be changed by the executor.
- if (user.isSome()) {
- Try<Nothing> chown = os::chown(
- user.get(),
- path::join(hierarchy, info->cgroup),
- false);
- if (chown.isError()) {
- return Failure("Failed to prepare isolator: " + chown.error());
- }
- }
-
- oomListen(containerId);
- pressureListen(containerId);
-
- return update(containerId, executorInfo.resources())
- .then([]() -> Future<Option<ContainerPrepareInfo>> {
- return None();
- });
-}
-
-
-Future<Nothing> CgroupsMemIsolatorProcess::isolate(
- const ContainerID& containerId,
- pid_t pid)
-{
- if (!infos.contains(containerId)) {
- return Failure("Unknown container");
- }
-
- Info* info = CHECK_NOTNULL(infos[containerId]);
-
- CHECK_NONE(info->pid);
- info->pid = pid;
-
- Try<Nothing> assign = cgroups::assign(hierarchy, info->cgroup, pid);
- if (assign.isError()) {
- return Failure("Failed to assign container '" +
- stringify(info->containerId) + "' to its own cgroup '" +
- path::join(hierarchy, info->cgroup) +
- "' : " + assign.error());
- }
-
- return Nothing();
-}
-
-
-Future<ContainerLimitation> CgroupsMemIsolatorProcess::watch(
- const ContainerID& containerId)
-{
- if (!infos.contains(containerId)) {
- return Failure("Unknown container");
- }
-
- CHECK_NOTNULL(infos[containerId]);
-
- return infos[containerId]->limitation.future();
-}
-
-
-Future<Nothing> CgroupsMemIsolatorProcess::update(
- const ContainerID& containerId,
- const Resources& resources)
-{
- if (resources.mem().isNone()) {
- return Failure("No memory resource given");
- }
-
- if (!infos.contains(containerId)) {
- return Failure("Unknown container");
- }
-
- Info* info = CHECK_NOTNULL(infos[containerId]);
-
- // New limit.
- Bytes mem = resources.mem().get();
- Bytes limit = std::max(mem, MIN_MEMORY);
-
- // Always set the soft limit.
- Try<Nothing> write =
- cgroups::memory::soft_limit_in_bytes(hierarchy, info->cgroup, limit);
-
- if (write.isError()) {
- return Failure(
- "Failed to set 'memory.soft_limit_in_bytes': " + write.error());
- }
-
- LOG(INFO) << "Updated 'memory.soft_limit_in_bytes' to " << limit
- << " for container " << containerId;
-
- // Read the existing limit.
- Try<Bytes> currentLimit =
- cgroups::memory::limit_in_bytes(hierarchy, info->cgroup);
-
- // NOTE: If limitSwap is (has been) used then both limit_in_bytes
- // and memsw.limit_in_bytes will always be set to the same value.
- if (currentLimit.isError()) {
- return Failure(
- "Failed to read 'memory.limit_in_bytes': " + currentLimit.error());
- }
-
- // Determine whether to set the hard limit. If this is the first
- // time (info->pid.isNone()), or we're raising the existing limit,
- // then we can update the hard limit safely. Otherwise, if we need
- // to decrease 'memory.limit_in_bytes' we may induce an OOM if too
- // much memory is in use. As a result, we only update the soft limit
- // when the memory reservation is being reduced. This is probably
- // okay if the machine has available resources.
- // TODO(benh): Introduce a MemoryWatcherProcess which monitors the
- // discrepancy between usage and soft limit and introduces a "manual
- // oom" if necessary.
- if (info->pid.isNone() || limit > currentLimit.get()) {
- // We always set limit_in_bytes first and optionally set
- // memsw.limit_in_bytes if limitSwap is true.
- Try<Nothing> write = cgroups::memory::limit_in_bytes(
- hierarchy, info->cgroup, limit);
-
- if (write.isError()) {
- return Failure(
- "Failed to set 'memory.limit_in_bytes': " + write.error());
- }
-
- LOG(INFO) << "Updated 'memory.limit_in_bytes' to " << limit
- << " for container " << containerId;
-
- if (limitSwap) {
- Try<bool> write = cgroups::memory::memsw_limit_in_bytes(
- hierarchy, info->cgroup, limit);
-
- if (write.isError()) {
- return Failure(
- "Failed to set 'memory.memsw.limit_in_bytes': " + write.error());
- }
-
- LOG(INFO) << "Updated 'memory.memsw.limit_in_bytes' to " << limit
- << " for container " << containerId;
- }
- }
-
- return Nothing();
-}
-
-
-Future<ResourceStatistics> CgroupsMemIsolatorProcess::usage(
- const ContainerID& containerId)
-{
- if (!infos.contains(containerId)) {
- return Failure("Unknown container");
- }
-
- Info* info = CHECK_NOTNULL(infos[containerId]);
-
- ResourceStatistics result;
-
- // The rss from memory.stat is wrong in two dimensions:
- // 1. It does not include child cgroups.
- // 2. It does not include any file backed pages.
- Try<Bytes> usage = cgroups::memory::usage_in_bytes(hierarchy, info->cgroup);
- if (usage.isError()) {
- return Failure("Failed to parse memory.usage_in_bytes: " + usage.error());
- }
-
- result.set_mem_total_bytes(usage.get().bytes());
-
- if (limitSwap) {
- Try<Bytes> usage =
- cgroups::memory::memsw_usage_in_bytes(hierarchy, info->cgroup);
- if (usage.isError()) {
- return Failure(
- "Failed to parse memory.memsw.usage_in_bytes: " + usage.error());
- }
-
- result.set_mem_total_memsw_bytes(usage.get().bytes());
- }
-
- // TODO(bmahler): Add namespacing to cgroups to enforce the expected
- // structure, e.g, cgroups::memory::stat.
- Try<hashmap<string, uint64_t>> stat =
- cgroups::stat(hierarchy, info->cgroup, "memory.stat");
- if (stat.isError()) {
- return Failure("Failed to read memory.stat: " + stat.error());
- }
-
- Option<uint64_t> total_cache = stat.get().get("total_cache");
- if (total_cache.isSome()) {
- // TODO(chzhcn): mem_file_bytes is deprecated in 0.23.0 and will
- // be removed in 0.24.0.
- result.set_mem_file_bytes(total_cache.get());
-
- result.set_mem_cache_bytes(total_cache.get());
- }
-
- Option<uint64_t> total_rss = stat.get().get("total_rss");
- if (total_rss.isSome()) {
- // TODO(chzhcn): mem_anon_bytes is deprecated in 0.23.0 and will
- // be removed in 0.24.0.
- result.set_mem_anon_bytes(total_rss.get());
-
- result.set_mem_rss_bytes(total_rss.get());
- }
-
- Option<uint64_t> total_mapped_file = stat.get().get("total_mapped_file");
- if (total_mapped_file.isSome()) {
- result.set_mem_mapped_file_bytes(total_mapped_file.get());
- }
-
- Option<uint64_t> total_swap = stat.get().get("total_swap");
- if (total_swap.isSome()) {
- result.set_mem_swap_bytes(total_swap.get());
- }
-
- Option<uint64_t> total_unevictable = stat.get().get("total_unevictable");
- if (total_unevictable.isSome()) {
- result.set_mem_unevictable_bytes(total_unevictable.get());
- }
-
- // Get pressure counter readings.
- list<Level> levels;
- list<Future<uint64_t>> values;
- foreachpair (Level level,
- const Owned<Counter>& counter,
- info->pressureCounters) {
- levels.push_back(level);
- values.push_back(counter->value());
- }
-
- return await(values)
- .then(defer(PID<CgroupsMemIsolatorProcess>(this),
- &CgroupsMemIsolatorProcess::_usage,
- containerId,
- result,
- levels,
- lambda::_1));
-}
-
-
-Future<ResourceStatistics> CgroupsMemIsolatorProcess::_usage(
- const ContainerID& containerId,
- ResourceStatistics result,
- const list<Level>& levels,
- const list<Future<uint64_t>>& values)
-{
- if (!infos.contains(containerId)) {
- return Failure("Unknown container");
- }
-
- list<Level>::const_iterator iterator = levels.begin();
- foreach (const Future<uint64_t>& value, values) {
- if (value.isReady()) {
- switch (*iterator) {
- case Level::LOW:
- result.set_mem_low_pressure_counter(value.get());
- break;
- case Level::MEDIUM:
- result.set_mem_medium_pressure_counter(value.get());
- break;
- case Level::CRITICAL:
- result.set_mem_critical_pressure_counter(value.get());
- break;
- }
- } else {
- LOG(ERROR) << "Failed to listen on " << stringify(*iterator)
- << " pressure events for container " << containerId << ": "
- << (value.isFailed() ? value.failure() : "discarded");
- }
-
- ++iterator;
- }
-
- return result;
-}
-
-
-Future<Nothing> CgroupsMemIsolatorProcess::cleanup(
- const ContainerID& containerId)
-{
- // Multiple calls may occur during test clean up.
- if (!infos.contains(containerId)) {
- VLOG(1) << "Ignoring cleanup request for unknown container: "
- << containerId;
- return Nothing();
- }
-
- Info* info = CHECK_NOTNULL(infos[containerId]);
-
- if (info->oomNotifier.isPending()) {
- info->oomNotifier.discard();
- }
-
- return cgroups::destroy(hierarchy, info->cgroup, cgroups::DESTROY_TIMEOUT)
- .onAny(defer(PID<CgroupsMemIsolatorProcess>(this),
- &CgroupsMemIsolatorProcess::_cleanup,
- containerId,
- lambda::_1));
-}
-
-
-Future<Nothing> CgroupsMemIsolatorProcess::_cleanup(
- const ContainerID& containerId,
- const Future<Nothing>& future)
-{
- if (!infos.contains(containerId)) {
- return Failure("Unknown container");
- }
-
- CHECK_NOTNULL(infos[containerId]);
-
- if (!future.isReady()) {
- return Failure("Failed to clean up container " + stringify(containerId) +
- " : " + (future.isFailed() ? future.failure()
- : "discarded"));
- }
-
- delete infos[containerId];
- infos.erase(containerId);
-
- return Nothing();
-}
-
-
-void CgroupsMemIsolatorProcess::oomListen(
- const ContainerID& containerId)
-{
- CHECK(infos.contains(containerId));
- Info* info = CHECK_NOTNULL(infos[containerId]);
-
- info->oomNotifier = cgroups::memory::oom::listen(hierarchy, info->cgroup);
-
- // If the listening fails immediately, something very wrong
- // happened. Therefore, we report a fatal error here.
- if (info->oomNotifier.isFailed()) {
- LOG(FATAL) << "Failed to listen for OOM events for container "
- << containerId << ": "
- << info->oomNotifier.failure();
- }
-
- LOG(INFO) << "Started listening for OOM events for container "
- << containerId;
-
- info->oomNotifier.onReady(defer(
- PID<CgroupsMemIsolatorProcess>(this),
- &CgroupsMemIsolatorProcess::oomWaited,
- containerId,
- lambda::_1));
-}
-
-
-void CgroupsMemIsolatorProcess::oomWaited(
- const ContainerID& containerId,
- const Future<Nothing>& future)
-{
- if (future.isDiscarded()) {
- LOG(INFO) << "Discarded OOM notifier for container "
- << containerId;
- } else if (future.isFailed()) {
- LOG(ERROR) << "Listening on OOM events failed for container "
- << containerId << ": " << future.failure();
- } else {
- // Out-of-memory event happened, call the handler.
- LOG(INFO) << "OOM notifier is triggered for container " << containerId;
- oom(containerId);
- }
-}
-
-
-void CgroupsMemIsolatorProcess::oom(const ContainerID& containerId)
-{
- if (!infos.contains(containerId)) {
- // It is likely that process exited is executed before this
- // function (e.g. The kill and OOM events happen at the same
- // time, and the process exit event arrives first.) Therefore, we
- // should not report a fatal error here.
- LOG(INFO) << "OOM detected for an already terminated executor";
- return;
- }
-
- Info* info = CHECK_NOTNULL(infos[containerId]);
-
- LOG(INFO) << "OOM detected for container " << containerId;
-
- // Construct a "message" string to describe why the isolator
- // destroyed the executor's cgroup (in order to assist in
- // debugging).
- ostringstream message;
- message << "Memory limit exceeded: ";
-
- // Output the requested memory limit.
- // NOTE: If limitSwap is (has been) used then both limit_in_bytes
- // and memsw.limit_in_bytes will always be set to the same value.
- Try<Bytes> limit = cgroups::memory::limit_in_bytes(hierarchy, info->cgroup);
-
- if (limit.isError()) {
- LOG(ERROR) << "Failed to read 'memory.limit_in_bytes': "
- << limit.error();
- } else {
- message << "Requested: " << limit.get() << " ";
- }
-
- // Output the maximum memory usage.
- Try<Bytes> usage = cgroups::memory::max_usage_in_bytes(
- hierarchy, info->cgroup);
-
- if (usage.isError()) {
- LOG(ERROR) << "Failed to read 'memory.max_usage_in_bytes': "
- << usage.error();
- } else {
- message << "Maximum Used: " << usage.get() << "\n";
- }
-
- // Output 'memory.stat' of the cgroup to help with debugging.
- // NOTE: With Kernel OOM-killer enabled these stats may not reflect
- // memory state at time of OOM.
- Try<string> read = cgroups::read(hierarchy, info->cgroup, "memory.stat");
- if (read.isError()) {
- LOG(ERROR) << "Failed to read 'memory.stat': " << read.error();
- } else {
- message << "\nMEMORY STATISTICS: \n" << read.get() << "\n";
- }
-
- LOG(INFO) << strings::trim(message.str()); // Trim the extra '\n' at the end.
-
- // TODO(jieyu): This is not accurate if the memory resource is from
- // a non-star role or spans roles (e.g., "*" and "role"). Ideally,
- // we should save the resources passed in and report it here.
- Resources mem = Resources::parse(
- "mem",
- stringify(usage.isSome() ? usage.get().megabytes() : 0),
- "*").get();
-
- info->limitation.set(
- protobuf::slave::createContainerLimitation(
- mem,
- message.str(),
- TaskStatus::REASON_CONTAINER_LIMITATION_MEMORY));
-}
-
-
-void CgroupsMemIsolatorProcess::pressureListen(
- const ContainerID& containerId)
-{
- CHECK(infos.contains(containerId));
- Info* info = CHECK_NOTNULL(infos[containerId]);
-
- foreach (Level level, levels()) {
- Try<Owned<Counter>> counter = Counter::create(
- hierarchy,
- info->cgroup,
- level);
-
- if (counter.isError()) {
- LOG(ERROR) << "Failed to listen on " << level << " memory pressure "
- << "events for container " << containerId << ": "
- << counter.error();
- } else {
- info->pressureCounters[level] = counter.get();
-
- LOG(INFO) << "Started listening on " << level << " memory pressure "
- << "events for container " << containerId;
- }
- }
-}
-
-} // namespace slave {
-} // namespace internal {
-} // namespace mesos {
http://git-wip-us.apache.org/repos/asf/mesos/blob/9a722d74/src/slave/containerizer/isolators/cgroups/mem.hpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/isolators/cgroups/mem.hpp b/src/slave/containerizer/isolators/cgroups/mem.hpp
deleted file mode 100644
index 47f73c3..0000000
--- a/src/slave/containerizer/isolators/cgroups/mem.hpp
+++ /dev/null
@@ -1,141 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __MEM_ISOLATOR_HPP__
-#define __MEM_ISOLATOR_HPP__
-
-#include <sys/types.h>
-
-#include <process/future.hpp>
-#include <process/owned.hpp>
-
-#include <stout/hashmap.hpp>
-#include <stout/nothing.hpp>
-#include <stout/option.hpp>
-
-#include "linux/cgroups.hpp"
-
-#include "slave/flags.hpp"
-
-#include "slave/containerizer/isolator.hpp"
-
-namespace mesos {
-namespace internal {
-namespace slave {
-
-class CgroupsMemIsolatorProcess : public MesosIsolatorProcess
-{
-public:
- static Try<mesos::slave::Isolator*> create(const Flags& flags);
-
- virtual ~CgroupsMemIsolatorProcess();
-
- virtual process::Future<Nothing> recover(
- const std::list<mesos::slave::ContainerState>& states,
- const hashset<ContainerID>& orphans);
-
- virtual process::Future<Option<mesos::slave::ContainerPrepareInfo>> prepare(
- const ContainerID& containerId,
- const ExecutorInfo& executorInfo,
- const std::string& directory,
- const Option<std::string>& user);
-
- virtual process::Future<Nothing> isolate(
- const ContainerID& containerId,
- pid_t pid);
-
- virtual process::Future<mesos::slave::ContainerLimitation> watch(
- const ContainerID& containerId);
-
- virtual process::Future<Nothing> update(
- const ContainerID& containerId,
- const Resources& resources);
-
- virtual process::Future<ResourceStatistics> usage(
- const ContainerID& containerId);
-
- virtual process::Future<Nothing> cleanup(
- const ContainerID& containerId);
-
-private:
- CgroupsMemIsolatorProcess(
- const Flags& flags,
- const std::string& hierarchy,
- bool limitSwap);
-
- process::Future<ResourceStatistics> _usage(
- const ContainerID& containerId,
- ResourceStatistics result,
- const std::list<cgroups::memory::pressure::Level>& levels,
- const std::list<process::Future<uint64_t>>& values);
-
- process::Future<Nothing> _cleanup(
- const ContainerID& containerId,
- const process::Future<Nothing>& future);
-
- struct Info
- {
- Info(const ContainerID& _containerId, const std::string& _cgroup)
- : containerId(_containerId), cgroup(_cgroup) {}
-
- const ContainerID containerId;
- const std::string cgroup;
- Option<pid_t> pid;
-
- process::Promise<mesos::slave::ContainerLimitation> limitation;
-
- // Used to cancel the OOM listening.
- process::Future<Nothing> oomNotifier;
-
- hashmap<cgroups::memory::pressure::Level,
- process::Owned<cgroups::memory::pressure::Counter>>
- pressureCounters;
- };
-
- // Start listening on OOM events. This function will create an
- // eventfd and start polling on it.
- void oomListen(const ContainerID& containerId);
-
- // This function is invoked when the polling on eventfd has a
- // result.
- void oomWaited(
- const ContainerID& containerId,
- const process::Future<Nothing>& future);
-
- // This function is invoked when the OOM event happens.
- void oom(const ContainerID& containerId);
-
- // Start listening on memory pressure events.
- void pressureListen(const ContainerID& containerId);
-
- const Flags flags;
-
- // The path to the cgroups subsystem hierarchy root.
- const std::string hierarchy;
-
- const bool limitSwap;
-
- // TODO(bmahler): Use Owned<Info>.
- hashmap<ContainerID, Info*> infos;
-};
-
-} // namespace slave {
-} // namespace internal {
-} // namespace mesos {
-
-#endif // __MEM_ISOLATOR_HPP__
http://git-wip-us.apache.org/repos/asf/mesos/blob/9a722d74/src/slave/containerizer/isolators/cgroups/perf_event.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/isolators/cgroups/perf_event.cpp b/src/slave/containerizer/isolators/cgroups/perf_event.cpp
deleted file mode 100644
index 03035df..0000000
--- a/src/slave/containerizer/isolators/cgroups/perf_event.cpp
+++ /dev/null
@@ -1,435 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdint.h>
-
-#include <vector>
-
-#include <google/protobuf/descriptor.h>
-#include <google/protobuf/message.h>
-
-#include <process/collect.hpp>
-#include <process/defer.hpp>
-#include <process/delay.hpp>
-#include <process/io.hpp>
-#include <process/pid.hpp>
-#include <process/reap.hpp>
-#include <process/subprocess.hpp>
-
-#include <stout/bytes.hpp>
-#include <stout/check.hpp>
-#include <stout/error.hpp>
-#include <stout/foreach.hpp>
-#include <stout/hashset.hpp>
-#include <stout/lambda.hpp>
-#include <stout/os.hpp>
-#include <stout/path.hpp>
-#include <stout/stringify.hpp>
-#include <stout/try.hpp>
-
-#include "linux/cgroups.hpp"
-#include "linux/perf.hpp"
-
-#include "slave/containerizer/isolators/cgroups/perf_event.hpp"
-
-using mesos::slave::ContainerLimitation;
-using mesos::slave::ContainerPrepareInfo;
-using mesos::slave::ContainerState;
-using mesos::slave::Isolator;
-
-using std::list;
-using std::set;
-using std::string;
-using std::vector;
-
-using process::Clock;
-using process::Failure;
-using process::Future;
-using process::PID;
-using process::Time;
-
-namespace mesos {
-namespace internal {
-namespace slave {
-
-Try<Isolator*> CgroupsPerfEventIsolatorProcess::create(const Flags& flags)
-{
- LOG(INFO) << "Creating PerfEvent isolator";
-
- if (!perf::supported()) {
- return Error("Perf is not supported");
- }
-
- if (flags.perf_duration > flags.perf_interval) {
- return Error("Sampling perf for duration (" +
- stringify(flags.perf_duration) +
- ") > interval (" +
- stringify(flags.perf_interval) +
- ") is not supported.");
- }
-
- if (!flags.perf_events.isSome()) {
- return Error("No perf events specified.");
- }
-
- set<string> events;
- foreach (const string& event,
- strings::tokenize(flags.perf_events.get(), ",")) {
- events.insert(event);
- }
-
- if (!perf::valid(events)) {
- return Error("Failed to create PerfEvent isolator, invalid events: " +
- stringify(events));
- }
-
- Try<string> hierarchy = cgroups::prepare(
- flags.cgroups_hierarchy,
- "perf_event",
- flags.cgroups_root);
-
- if (hierarchy.isError()) {
- return Error("Failed to create perf_event cgroup: " + hierarchy.error());
- }
-
- LOG(INFO) << "PerfEvent isolator will profile for " << flags.perf_duration
- << " every " << flags.perf_interval
- << " for events: " << stringify(events);
-
- process::Owned<MesosIsolatorProcess> process(
- new CgroupsPerfEventIsolatorProcess(flags, hierarchy.get(), events));
-
- return new MesosIsolator(process);
-}
-
-
-CgroupsPerfEventIsolatorProcess::~CgroupsPerfEventIsolatorProcess() {}
-
-
-void CgroupsPerfEventIsolatorProcess::initialize()
-{
- // Start sampling.
- sample();
-}
-
-
-Future<Nothing> CgroupsPerfEventIsolatorProcess::recover(
- const list<ContainerState>& states,
- const hashset<ContainerID>& orphans)
-{
- foreach (const ContainerState& state, states) {
- const ContainerID& containerId = state.container_id();
- const string cgroup = path::join(flags.cgroups_root, containerId.value());
-
- Try<bool> exists = cgroups::exists(hierarchy, cgroup);
- if (exists.isError()) {
- foreachvalue (Info* info, infos) {
- delete info;
- }
-
- infos.clear();
- return Failure("Failed to check cgroup " + cgroup +
- " for container '" + stringify(containerId) + "'");
- }
-
- if (!exists.get()) {
- // This may occur if the executor is exiting and the isolator has
- // destroyed the cgroup but the slave dies before noticing this. This
- // will be detected when the containerizer tries to monitor the
- // executor's pid.
- // NOTE: This could also occur if this isolator is now enabled for a
- // container that was started without this isolator. For this
- // particular isolator it is acceptable to continue running this
- // container without a perf_event cgroup because we don't ever
- // query it and the destroy will succeed immediately.
- VLOG(1) << "Couldn't find perf event cgroup for container " << containerId
- << ", perf statistics will not be available";
- continue;
- }
-
- infos[containerId] = new Info(containerId, cgroup);
- }
-
- // Remove orphan cgroups.
- Try<vector<string>> cgroups = cgroups::get(hierarchy, flags.cgroups_root);
- if (cgroups.isError()) {
- foreachvalue (Info* info, infos) {
- delete info;
- }
- infos.clear();
- return Failure(cgroups.error());
- }
-
- foreach (const string& cgroup, cgroups.get()) {
- // Ignore the slave cgroup (see the --slave_subsystems flag).
- // TODO(idownes): Remove this when the cgroups layout is updated,
- // see MESOS-1185.
- if (cgroup == path::join(flags.cgroups_root, "slave")) {
- continue;
- }
-
- ContainerID containerId;
- containerId.set_value(Path(cgroup).basename());
-
- if (infos.contains(containerId)) {
- continue;
- }
-
- // Known orphan cgroups will be destroyed by the containerizer
- // using the normal cleanup path. See details in MESOS-2367.
- if (orphans.contains(containerId)) {
- infos[containerId] = new Info(containerId, cgroup);
- continue;
- }
-
- LOG(INFO) << "Removing unknown orphaned cgroup '" << cgroup << "'";
-
- // We don't wait on the destroy as we don't want to block recovery.
- cgroups::destroy(hierarchy, cgroup, cgroups::DESTROY_TIMEOUT);
- }
-
- return Nothing();
-}
-
-
-Future<Option<ContainerPrepareInfo>> CgroupsPerfEventIsolatorProcess::prepare(
- const ContainerID& containerId,
- const ExecutorInfo& executorInfo,
- const string& directory,
- const Option<string>& user)
-{
- if (infos.contains(containerId)) {
- return Failure("Container has already been prepared");
- }
-
- LOG(INFO) << "Preparing perf event cgroup for " << containerId;
-
- Info* info = new Info(
- containerId,
- path::join(flags.cgroups_root, containerId.value()));
-
- infos[containerId] = CHECK_NOTNULL(info);
-
- // Create a cgroup for this container.
- Try<bool> exists = cgroups::exists(hierarchy, info->cgroup);
-
- if (exists.isError()) {
- return Failure("Failed to prepare isolator: " + exists.error());
- }
-
- if (exists.get()) {
- return Failure("Failed to prepare isolator: cgroup already exists");
- }
-
- if (!exists.get()) {
- Try<Nothing> create = cgroups::create(hierarchy, info->cgroup);
- if (create.isError()) {
- return Failure("Failed to prepare isolator: " + create.error());
- }
- }
-
- // Chown the cgroup so the executor can create nested cgroups. Do
- // not recurse so the control files are still owned by the slave
- // user and thus cannot be changed by the executor.
- if (user.isSome()) {
- Try<Nothing> chown = os::chown(
- user.get(),
- path::join(hierarchy, info->cgroup),
- false);
- if (chown.isError()) {
- return Failure("Failed to prepare isolator: " + chown.error());
- }
- }
-
- return None();
-}
-
-
-Future<Nothing> CgroupsPerfEventIsolatorProcess::isolate(
- const ContainerID& containerId,
- pid_t pid)
-{
- if (!infos.contains(containerId)) {
- return Failure("Unknown container");
- }
-
- Info* info = CHECK_NOTNULL(infos[containerId]);
-
- Try<Nothing> assign = cgroups::assign(hierarchy, info->cgroup, pid);
- if (assign.isError()) {
- return Failure("Failed to assign container '" +
- stringify(info->containerId) + "' to its own cgroup '" +
- path::join(hierarchy, info->cgroup) +
- "' : " + assign.error());
- }
-
- return Nothing();
-}
-
-
-Future<ContainerLimitation> CgroupsPerfEventIsolatorProcess::watch(
- const ContainerID& containerId)
-{
- // No resources are limited.
- return Future<ContainerLimitation>();
-}
-
-
-Future<Nothing> CgroupsPerfEventIsolatorProcess::update(
- const ContainerID& containerId,
- const Resources& resources)
-{
- // Nothing to update.
- return Nothing();
-}
-
-
-Future<ResourceStatistics> CgroupsPerfEventIsolatorProcess::usage(
- const ContainerID& containerId)
-{
- if (!infos.contains(containerId)) {
- // Return an empty ResourceStatistics, i.e., without
- // PerfStatistics, if we don't know about this container.
- return ResourceStatistics();
- }
-
- CHECK_NOTNULL(infos[containerId]);
-
- ResourceStatistics statistics;
- statistics.mutable_perf()->CopyFrom(infos[containerId]->statistics);
-
- return statistics;
-}
-
-
-Future<Nothing> CgroupsPerfEventIsolatorProcess::cleanup(
- const ContainerID& containerId)
-{
- // Tolerate clean up attempts for unknown containers which may arise from
- // repeated clean up attempts (during test cleanup).
- if (!infos.contains(containerId)) {
- VLOG(1) << "Ignoring cleanup request for unknown container: "
- << containerId;
- return Nothing();
- }
-
- Info* info = CHECK_NOTNULL(infos[containerId]);
-
- info->destroying = true;
-
- return cgroups::destroy(hierarchy, info->cgroup)
- .then(defer(PID<CgroupsPerfEventIsolatorProcess>(this),
- &CgroupsPerfEventIsolatorProcess::_cleanup,
- containerId));
-}
-
-
-Future<Nothing> CgroupsPerfEventIsolatorProcess::_cleanup(
- const ContainerID& containerId)
-{
- if (!infos.contains(containerId)) {
- return Nothing();
- }
-
- delete infos[containerId];
- infos.erase(containerId);
-
- return Nothing();
-}
-
-
-Future<hashmap<string, PerfStatistics>> discardSample(
- Future<hashmap<string, PerfStatistics>> future,
- const Duration& duration,
- const Duration& timeout)
-{
- LOG(ERROR) << "Perf sample of " << stringify(duration)
- << " failed to complete within " << stringify(timeout)
- << "; sampling will be halted";
-
- future.discard();
-
- return future;
-}
-
-
-void CgroupsPerfEventIsolatorProcess::sample()
-{
- // Collect a perf sample for all cgroups that are not being
- // destroyed. Since destroyal is asynchronous, 'perf stat' may
- // fail if the cgroup is destroyed before running perf.
- set<string> cgroups;
-
- foreachvalue (Info* info, infos) {
- CHECK_NOTNULL(info);
-
- if (!info->destroying) {
- cgroups.insert(info->cgroup);
- }
- }
-
- // The discard timeout includes an allowance of twice the
- // reaper interval to ensure we see the perf process exit.
- Duration timeout = flags.perf_duration + process::MAX_REAP_INTERVAL() * 2;
-
- perf::sample(events, cgroups, flags.perf_duration)
- .after(timeout,
- lambda::bind(&discardSample,
- lambda::_1,
- flags.perf_duration,
- timeout))
- .onAny(defer(PID<CgroupsPerfEventIsolatorProcess>(this),
- &CgroupsPerfEventIsolatorProcess::_sample,
- Clock::now() + flags.perf_interval,
- lambda::_1));
-}
-
-
-void CgroupsPerfEventIsolatorProcess::_sample(
- const Time& next,
- const Future<hashmap<string, PerfStatistics>>& statistics)
-{
- if (!statistics.isReady()) {
- // In case the failure is transient or this is due to a timeout,
- // we continue sampling. Note that since sampling is done on an
- // interval, it should be ok if this is a non-transient failure.
- LOG(ERROR) << "Failed to get perf sample: "
- << (statistics.isFailed()
- ? statistics.failure()
- : "discarded due to timeout");
- } else {
- // Store the latest statistics, note that cgroups added in the
- // interim will be picked up by the next sample.
- foreachvalue (Info* info, infos) {
- CHECK_NOTNULL(info);
-
- if (statistics->contains(info->cgroup)) {
- info->statistics = statistics->get(info->cgroup).get();
- }
- }
- }
-
- // Schedule sample for the next time.
- delay(next - Clock::now(),
- PID<CgroupsPerfEventIsolatorProcess>(this),
- &CgroupsPerfEventIsolatorProcess::sample);
-}
-
-} // namespace slave {
-} // namespace internal {
-} // namespace mesos {