You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mesos.apache.org by ji...@apache.org on 2015/03/21 00:16:29 UTC

mesos git commit: Fixed right angle brackets in containerizer code.

Repository: mesos
Updated Branches:
  refs/heads/master f8a1afd31 -> a12242bd6


Fixed right angle brackets in containerizer code.


Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/a12242bd
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/a12242bd
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/a12242bd

Branch: refs/heads/master
Commit: a12242bd63fe9c5664ca342c5fda96029887d803
Parents: f8a1afd
Author: Jie Yu <yu...@gmail.com>
Authored: Fri Mar 20 16:16:06 2015 -0700
Committer: Jie Yu <yu...@gmail.com>
Committed: Fri Mar 20 16:16:06 2015 -0700

----------------------------------------------------------------------
 src/slave/containerizer/composing.cpp           | 10 ++--
 src/slave/containerizer/composing.hpp           |  2 +-
 src/slave/containerizer/containerizer.hpp       |  2 +-
 src/slave/containerizer/docker.cpp              |  6 +--
 .../containerizer/external_containerizer.cpp    | 48 ++++++++++----------
 .../containerizer/external_containerizer.hpp    | 32 ++++++-------
 src/slave/containerizer/isolator.cpp            |  2 +-
 .../isolators/cgroups/cpushare.cpp              | 18 ++++----
 .../containerizer/isolators/cgroups/mem.cpp     | 10 ++--
 .../isolators/cgroups/perf_event.cpp            | 10 ++--
 .../isolators/filesystem/shared.cpp             |  2 +-
 .../isolators/filesystem/shared.hpp             |  2 +-
 .../containerizer/isolators/namespaces/pid.cpp  |  4 +-
 .../containerizer/isolators/namespaces/pid.hpp  |  2 +-
 .../isolators/network/port_mapping.cpp          | 30 ++++++------
 .../isolators/network/port_mapping.hpp          |  6 +--
 src/slave/containerizer/isolators/posix.hpp     |  6 +--
 src/slave/containerizer/launcher.cpp            | 12 ++---
 src/slave/containerizer/launcher.hpp            |  8 ++--
 src/slave/containerizer/linux_launcher.cpp      | 14 +++---
 src/slave/containerizer/linux_launcher.hpp      |  4 +-
 21 files changed, 115 insertions(+), 115 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/composing.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/composing.cpp b/src/slave/containerizer/composing.cpp
index a6ae817..a6a170c 100644
--- a/src/slave/containerizer/composing.cpp
+++ b/src/slave/containerizer/composing.cpp
@@ -87,7 +87,7 @@ public:
 
   void destroy(const ContainerID& containerId);
 
-  Future<hashset<ContainerID> > containers();
+  Future<hashset<ContainerID>> containers();
 
 private:
   // Continuations.
@@ -234,7 +234,7 @@ void ComposingContainerizer::destroy(const ContainerID& containerId)
 }
 
 
-Future<hashset<ContainerID> > ComposingContainerizer::containers()
+Future<hashset<ContainerID>> ComposingContainerizer::containers()
 {
   return dispatch(process, &ComposingContainerizerProcess::containers);
 }
@@ -259,7 +259,7 @@ Future<Nothing> ComposingContainerizerProcess::recover(
     const Option<state::SlaveState>& state)
 {
   // Recover each containerizer in parallel.
-  list<Future<Nothing> > futures;
+  list<Future<Nothing>> futures;
   foreach (Containerizer* containerizer, containerizers_) {
     futures.push_back(containerizer->recover(state));
   }
@@ -272,7 +272,7 @@ Future<Nothing> ComposingContainerizerProcess::recover(
 Future<Nothing> ComposingContainerizerProcess::_recover()
 {
   // Now collect all the running containers in order to multiplex.
-  list<Future<Nothing> > futures;
+  list<Future<Nothing>> futures;
   foreach (Containerizer* containerizer, containerizers_) {
     Future<Nothing> future = containerizer->containers()
       .then(defer(self(), &Self::__recover, containerizer, lambda::_1));
@@ -544,7 +544,7 @@ void ComposingContainerizerProcess::destroy(const ContainerID& containerId)
 }
 
 
-Future<hashset<ContainerID> > ComposingContainerizerProcess::containers()
+Future<hashset<ContainerID>> ComposingContainerizerProcess::containers()
 {
   return containers_.keys();
 }

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/composing.hpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/composing.hpp b/src/slave/containerizer/composing.hpp
index f1e60b0..198ea5c 100644
--- a/src/slave/containerizer/composing.hpp
+++ b/src/slave/containerizer/composing.hpp
@@ -87,7 +87,7 @@ public:
 
   virtual void destroy(const ContainerID& containerId);
 
-  virtual process::Future<hashset<ContainerID> > containers();
+  virtual process::Future<hashset<ContainerID>> containers();
 
 private:
   ComposingContainerizerProcess* process;

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/containerizer.hpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/containerizer.hpp b/src/slave/containerizer/containerizer.hpp
index 129e60f..56c088a 100644
--- a/src/slave/containerizer/containerizer.hpp
+++ b/src/slave/containerizer/containerizer.hpp
@@ -125,7 +125,7 @@ public:
   // termination and manual destruction is not necessary. See wait().
   virtual void destroy(const ContainerID& containerId) = 0;
 
-  virtual process::Future<hashset<ContainerID> > containers() = 0;
+  virtual process::Future<hashset<ContainerID>> containers() = 0;
 };
 
 

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/docker.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/docker.cpp b/src/slave/containerizer/docker.cpp
index 5f4b4ce..e83b912 100644
--- a/src/slave/containerizer/docker.cpp
+++ b/src/slave/containerizer/docker.cpp
@@ -375,7 +375,7 @@ void DockerContainerizer::destroy(const ContainerID& containerId)
 }
 
 
-Future<hashset<ContainerID> > DockerContainerizer::containers()
+Future<hashset<ContainerID>> DockerContainerizer::containers()
 {
   return dispatch(process.get(), &DockerContainerizerProcess::containers);
 }
@@ -1292,7 +1292,7 @@ void DockerContainerizerProcess::__destroy(
 void DockerContainerizerProcess::___destroy(
     const ContainerID& containerId,
     bool killed,
-    const Future<Option<int> >& status)
+    const Future<Option<int>>& status)
 {
   CHECK(containers_.contains(containerId));
 
@@ -1318,7 +1318,7 @@ void DockerContainerizerProcess::___destroy(
 }
 
 
-Future<hashset<ContainerID> > DockerContainerizerProcess::containers()
+Future<hashset<ContainerID>> DockerContainerizerProcess::containers()
 {
   return containers_.keys();
 }

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/external_containerizer.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/external_containerizer.cpp b/src/slave/containerizer/external_containerizer.cpp
index cdbb180..1bbd61c 100644
--- a/src/slave/containerizer/external_containerizer.cpp
+++ b/src/slave/containerizer/external_containerizer.cpp
@@ -82,7 +82,7 @@ Try<ExternalContainerizer*> ExternalContainerizer::create(const Flags& flags)
 
 // Validate the invocation result.
 static Option<Error> validate(
-    const Future<Option<int> >& future)
+    const Future<Option<int>>& future)
 {
   if (!future.isReady()) {
     return Error("Status not ready");
@@ -107,7 +107,7 @@ static Option<Error> validate(
 // message.
 template<typename T>
 static Try<T> result(
-    const Future<tuple<Future<Result<T> >, Future<Option<int> > > >& future)
+    const Future<tuple<Future<Result<T>>, Future<Option<int>>>>& future)
 {
   if (!future.isReady()) {
     return Error("Could not receive any result");
@@ -118,7 +118,7 @@ static Try<T> result(
     return error.get();
   }
 
-  process::Future<Result<T> > result = tuples::get<0>(future.get());
+  Future<Result<T>> result = tuples::get<0>(future.get());
   if (result.isFailed()) {
     return Error("Could not receive any result: " + result.failure());
   }
@@ -240,7 +240,7 @@ void ExternalContainerizer::destroy(const ContainerID& containerId)
 }
 
 
-Future<hashset<ContainerID> > ExternalContainerizer::containers()
+Future<hashset<ContainerID>> ExternalContainerizer::containers()
 {
   return dispatch(process.get(),
                   &ExternalContainerizerProcess::containers);
@@ -274,7 +274,7 @@ Future<Nothing> ExternalContainerizerProcess::recover(
 
 Future<Nothing> ExternalContainerizerProcess::_recover(
     const Option<state::SlaveState>& state,
-    const Future<Option<int> >& future)
+    const Future<Option<int>>& future)
 {
   VLOG(1) << "Recover validation callback triggered";
 
@@ -393,7 +393,7 @@ Future<Nothing> ExternalContainerizerProcess::__recover(
     return Nothing();
   }
 
-  list<Future<containerizer::Termination> > futures;
+  list<Future<containerizer::Termination>> futures;
 
   // Enforce a 'destroy' on all orphaned containers.
   foreach (const ContainerID& containerId, orphaned) {
@@ -541,7 +541,7 @@ Future<bool> ExternalContainerizerProcess::launch(
 
 Future<bool> ExternalContainerizerProcess::_launch(
     const ContainerID& containerId,
-    const Future<Option<int> >& future)
+    const Future<Option<int>>& future)
 {
   VLOG(1) << "Launch validation callback triggered on container '"
           << containerId << "'";
@@ -635,7 +635,7 @@ Future<containerizer::Termination> ExternalContainerizerProcess::_wait(
   Result<containerizer::Termination>(*read)(int, bool, bool) =
     &::protobuf::read<containerizer::Termination>;
 
-  Future<Result<containerizer::Termination> > future = async(
+  Future<Result<containerizer::Termination>> future = async(
       read, invoked.get().out().get(), false, false);
 
   // Await both, a protobuf Message from the subprocess as well as
@@ -654,8 +654,8 @@ Future<containerizer::Termination> ExternalContainerizerProcess::_wait(
 void ExternalContainerizerProcess::__wait(
     const ContainerID& containerId,
     const Future<tuple<
-        Future<Result<containerizer::Termination> >,
-        Future<Option<int> > > >& future)
+        Future<Result<containerizer::Termination>>,
+        Future<Option<int>>>>& future)
 {
   VLOG(1) << "Wait callback triggered on container '" << containerId << "'";
 
@@ -669,7 +669,7 @@ void ExternalContainerizerProcess::__wait(
   // the result validation below will return an error due to a non 0
   // exit status.
   if (actives[containerId]->destroying && future.isReady()) {
-    Future<Option<int> > statusFuture = tuples::get<1>(future.get());
+    Future<Option<int>> statusFuture = tuples::get<1>(future.get());
     if (statusFuture.isReady()) {
       Option<int> status = statusFuture.get();
       if (status.isSome()) {
@@ -765,7 +765,7 @@ Future<Nothing> ExternalContainerizerProcess::_update(
 
 Future<Nothing> ExternalContainerizerProcess::__update(
     const ContainerID& containerId,
-    const Future<Option<int> >& future)
+    const Future<Option<int>>& future)
 {
   VLOG(1) << "Update callback triggered on container '" << containerId << "'";
 
@@ -826,7 +826,7 @@ Future<ResourceStatistics> ExternalContainerizerProcess::_usage(
   Result<ResourceStatistics>(*read)(int, bool, bool) =
     &::protobuf::read<ResourceStatistics>;
 
-  Future<Result<ResourceStatistics> > future = async(
+  Future<Result<ResourceStatistics>> future = async(
       read, invoked.get().out().get(), false, false);
 
   // Await both, a protobuf Message from the subprocess as well as
@@ -843,8 +843,8 @@ Future<ResourceStatistics> ExternalContainerizerProcess::_usage(
 Future<ResourceStatistics> ExternalContainerizerProcess::__usage(
     const ContainerID& containerId,
     const Future<tuple<
-        Future<Result<ResourceStatistics> >,
-        Future<Option<int> > > >& future)
+        Future<Result<ResourceStatistics>>,
+        Future<Option<int>>>>& future)
 {
   VLOG(1) << "Usage callback triggered on container '" << containerId << "'";
 
@@ -930,7 +930,7 @@ void ExternalContainerizerProcess::_destroy(const ContainerID& containerId)
 
 void ExternalContainerizerProcess::__destroy(
     const ContainerID& containerId,
-    const Future<Option<int> >& future)
+    const Future<Option<int>>& future)
 {
   VLOG(1) << "Destroy callback triggered on container '" << containerId << "'";
 
@@ -951,7 +951,7 @@ void ExternalContainerizerProcess::__destroy(
 }
 
 
-Future<hashset<ContainerID> > ExternalContainerizerProcess::containers()
+Future<hashset<ContainerID>> ExternalContainerizerProcess::containers()
 {
   VLOG(1) << "Containers triggered";
 
@@ -964,7 +964,7 @@ Future<hashset<ContainerID> > ExternalContainerizerProcess::containers()
   Result<containerizer::Containers>(*read)(int, bool, bool) =
     &::protobuf::read<containerizer::Containers>;
 
-  Future<Result<containerizer::Containers> > future = async(
+  Future<Result<containerizer::Containers>> future = async(
       read, invoked.get().out().get(), false, false);
 
   // Await both, a protobuf Message from the subprocess as well as
@@ -977,10 +977,10 @@ Future<hashset<ContainerID> > ExternalContainerizerProcess::containers()
 }
 
 
-Future<hashset<ContainerID> > ExternalContainerizerProcess::_containers(
+Future<hashset<ContainerID>> ExternalContainerizerProcess::_containers(
     const Future<tuple<
-        Future<Result<containerizer::Containers> >,
-        Future<Option<int> > > >& future)
+        Future<Result<containerizer::Containers>>,
+        Future<Option<int>>>>& future)
 {
   VLOG(1) << "Containers callback triggered";
 
@@ -1037,7 +1037,7 @@ void ExternalContainerizerProcess::unwait(const ContainerID& containerId)
 
   // TODO(tillt): Add graceful termination as soon as we have an
   // accepted way to do that in place.
-  Try<list<os::ProcessTree> > trees =
+  Try<list<os::ProcessTree>> trees =
     os::killtree(pid.get(), SIGKILL, true, true);
 
   if (trees.isError()) {
@@ -1086,7 +1086,7 @@ static int setup(const string& directory)
 Try<Subprocess> ExternalContainerizerProcess::invoke(
     const string& command,
     const Option<Sandbox>& sandbox,
-    const Option<map<string, string> >& commandEnvironment)
+    const Option<map<string, string>>& commandEnvironment)
 {
   CHECK_SOME(flags.containerizer_path) << "containerizer_path not set";
 
@@ -1200,7 +1200,7 @@ Try<Subprocess> ExternalContainerizerProcess::invoke(
     const string& command,
     const google::protobuf::Message& message,
     const Option<Sandbox>& sandbox,
-    const Option<map<string, string> >& commandEnvironment)
+    const Option<map<string, string>>& commandEnvironment)
 {
   Try<Subprocess> external = invoke(command, sandbox, commandEnvironment);
   if (external.isError()) {

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/external_containerizer.hpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/external_containerizer.hpp b/src/slave/containerizer/external_containerizer.hpp
index 856d1ea..726a97f 100644
--- a/src/slave/containerizer/external_containerizer.hpp
+++ b/src/slave/containerizer/external_containerizer.hpp
@@ -115,7 +115,7 @@ public:
 
   virtual void destroy(const ContainerID& containerId);
 
-  virtual process::Future<hashset<ContainerID> > containers();
+  virtual process::Future<hashset<ContainerID>> containers();
 
 private:
   process::Owned<ExternalContainerizerProcess> process;
@@ -159,7 +159,7 @@ public:
   void destroy(const ContainerID& containerId);
 
   // Get all active container-id's.
-  process::Future<hashset<ContainerID> > containers();
+  process::Future<hashset<ContainerID>> containers();
 
 private:
   // Startup flags.
@@ -210,11 +210,11 @@ private:
   };
 
   // Stores all active containers.
-  hashmap<ContainerID, process::Owned<Container> > actives;
+  hashmap<ContainerID, process::Owned<Container>> actives;
 
   process::Future<Nothing> _recover(
       const Option<state::SlaveState>& state,
-      const process::Future<Option<int> >& future);
+      const process::Future<Option<int>>& future);
 
   process::Future<Nothing> __recover(
       const Option<state::SlaveState>& state,
@@ -224,7 +224,7 @@ private:
 
   process::Future<bool> _launch(
       const ContainerID& containerId,
-      const process::Future<Option<int> >& future);
+      const process::Future<Option<int>>& future);
 
   void __launch(
       const ContainerID& containerId,
@@ -236,8 +236,8 @@ private:
   void __wait(
       const ContainerID& containerId,
       const process::Future<tuples::tuple<
-          process::Future<Result<containerizer::Termination> >,
-          process::Future<Option<int> > > >& future);
+          process::Future<Result<containerizer::Termination>>,
+          process::Future<Option<int>>>>& future);
 
   process::Future<Nothing> _update(
       const ContainerID& containerId,
@@ -245,7 +245,7 @@ private:
 
   process::Future<Nothing> __update(
       const ContainerID& containerId,
-      const process::Future<Option<int> >& future);
+      const process::Future<Option<int>>& future);
 
   process::Future<ResourceStatistics> _usage(
       const ContainerID& containerId);
@@ -253,19 +253,19 @@ private:
   process::Future<ResourceStatistics> __usage(
       const ContainerID& containerId,
       const process::Future<tuples::tuple<
-          process::Future<Result<ResourceStatistics> >,
-          process::Future<Option<int> > > >& future);
+          process::Future<Result<ResourceStatistics>>,
+          process::Future<Option<int>>>>& future);
 
   void _destroy(const ContainerID& containerId);
 
   void __destroy(
       const ContainerID& containerId,
-      const process::Future<Option<int> >& future);
+      const process::Future<Option<int>>& future);
 
-  process::Future<hashset<ContainerID> > _containers(
+  process::Future<hashset<ContainerID>> _containers(
       const process::Future<tuples::tuple<
-          process::Future<Result<containerizer::Containers> >,
-          process::Future<Option<int> > > >& future);
+          process::Future<Result<containerizer::Containers>>,
+          process::Future<Option<int>>>>& future);
 
   // Abort a possibly pending "wait" in the external containerizer
   // process.
@@ -279,7 +279,7 @@ private:
   Try<process::Subprocess> invoke(
       const std::string& command,
       const Option<Sandbox>& sandbox = None(),
-      const Option<std::map<std::string, std::string> >& environment = None());
+      const Option<std::map<std::string, std::string>>& environment = None());
 
   // Invoke the external containerizer with the given command and
   // a protobuf message to be piped into its stdin.
@@ -290,7 +290,7 @@ private:
       const std::string& command,
       const google::protobuf::Message& message,
       const Option<Sandbox>& sandbox = None(),
-      const Option<std::map<std::string, std::string> >& environment = None());
+      const Option<std::map<std::string, std::string>>& environment = None());
 };
 
 

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/isolator.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/isolator.cpp b/src/slave/containerizer/isolator.cpp
index ee294a2..a6ad1d5 100644
--- a/src/slave/containerizer/isolator.cpp
+++ b/src/slave/containerizer/isolator.cpp
@@ -49,7 +49,7 @@ Future<Nothing> Isolator::recover(const list<ExecutorRunState>& state)
 }
 
 
-Future<Option<CommandInfo> > Isolator::prepare(
+Future<Option<CommandInfo>> Isolator::prepare(
     const ContainerID& containerId,
     const ExecutorInfo& executorInfo,
     const string& directory,

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/isolators/cgroups/cpushare.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/isolators/cgroups/cpushare.cpp b/src/slave/containerizer/isolators/cgroups/cpushare.cpp
index c4a5aec..41b2597 100644
--- a/src/slave/containerizer/isolators/cgroups/cpushare.cpp
+++ b/src/slave/containerizer/isolators/cgroups/cpushare.cpp
@@ -62,7 +62,7 @@ using mesos::slave::Limitation;
 
 
 template<class T>
-static Future<Option<T> > none() { return None(); }
+static Future<Option<T>> none() { return None(); }
 
 CgroupsCpushareIsolatorProcess::CgroupsCpushareIsolatorProcess(
     const Flags& _flags,
@@ -112,7 +112,7 @@ Try<Isolator*> CgroupsCpushareIsolatorProcess::create(const Flags& flags)
     subsystems.push_back("cpu,cpuacct");
 
     // Ensure that no other subsystem is attached to the hierarchy.
-    Try<set<string> > _subsystems = cgroups::subsystems(hierarchyCpu.get());
+    Try<set<string>> _subsystems = cgroups::subsystems(hierarchyCpu.get());
     if (_subsystems.isError()) {
       return Error(
           "Failed to get the list of attached subsystems for hierarchy " +
@@ -129,7 +129,7 @@ Try<Isolator*> CgroupsCpushareIsolatorProcess::create(const Flags& flags)
 
     // Ensure that no other subsystem is attached to each of the
     // hierarchy.
-    Try<set<string> > _subsystems = cgroups::subsystems(hierarchyCpu.get());
+    Try<set<string>> _subsystems = cgroups::subsystems(hierarchyCpu.get());
     if (_subsystems.isError()) {
       return Error(
           "Failed to get the list of attached subsystems for hierarchy " +
@@ -206,7 +206,7 @@ Future<Nothing> CgroupsCpushareIsolatorProcess::recover(
 
   // Remove orphans.
   foreach (const string& subsystem, subsystems) {
-    Try<vector<string> > orphans = cgroups::get(
+    Try<vector<string>> orphans = cgroups::get(
         hierarchies[subsystem],
         flags.cgroups_root);
 
@@ -243,7 +243,7 @@ Future<Nothing> CgroupsCpushareIsolatorProcess::recover(
 }
 
 
-Future<Option<CommandInfo> > CgroupsCpushareIsolatorProcess::prepare(
+Future<Option<CommandInfo>> CgroupsCpushareIsolatorProcess::prepare(
     const ContainerID& containerId,
     const ExecutorInfo& executorInfo,
     const string& directory,
@@ -453,7 +453,7 @@ Future<ResourceStatistics> CgroupsCpushareIsolatorProcess::usage(
   PCHECK(ticks > 0) << "Failed to get sysconf(_SC_CLK_TCK)";
 
   // Add the cpuacct.stat information.
-  Try<hashmap<string, uint64_t> > stat = cgroups::stat(
+  Try<hashmap<string, uint64_t>> stat = cgroups::stat(
       hierarchies["cpuacct"],
       info->cgroup,
       "cpuacct.stat");
@@ -520,7 +520,7 @@ Future<Nothing> CgroupsCpushareIsolatorProcess::cleanup(
 
   Info* info = CHECK_NOTNULL(infos[containerId]);
 
-  list<Future<Nothing> > futures;
+  list<Future<Nothing>> futures;
   foreach (const string& subsystem, subsystems) {
     futures.push_back(cgroups::destroy(
         hierarchies[subsystem],
@@ -537,9 +537,9 @@ Future<Nothing> CgroupsCpushareIsolatorProcess::cleanup(
 }
 
 
-Future<list<Nothing> > CgroupsCpushareIsolatorProcess::_cleanup(
+Future<list<Nothing>> CgroupsCpushareIsolatorProcess::_cleanup(
     const ContainerID& containerId,
-    const Future<list<Nothing> >& future)
+    const Future<list<Nothing>>& future)
 {
   if (!infos.contains(containerId)) {
     return Failure("Unknown container");

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/isolators/cgroups/mem.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/isolators/cgroups/mem.cpp b/src/slave/containerizer/isolators/cgroups/mem.cpp
index 6299ca4..eaeb301 100644
--- a/src/slave/containerizer/isolators/cgroups/mem.cpp
+++ b/src/slave/containerizer/isolators/cgroups/mem.cpp
@@ -62,7 +62,7 @@ using mesos::slave::Limitation;
 
 
 template<class T>
-static Future<Option<T> > none() { return None(); }
+static Future<Option<T>> none() { return None(); }
 
 CgroupsMemIsolatorProcess::CgroupsMemIsolatorProcess(
     const Flags& _flags,
@@ -88,7 +88,7 @@ Try<Isolator*> CgroupsMemIsolatorProcess::create(const Flags& flags)
   }
 
   // Ensure that no other subsystem is attached to the hierarchy.
-  Try<set<string> > subsystems = cgroups::subsystems(hierarchy.get());
+  Try<set<string>> subsystems = cgroups::subsystems(hierarchy.get());
   if (subsystems.isError()) {
     return Error(
         "Failed to get the list of attached subsystems for hierarchy " +
@@ -169,7 +169,7 @@ Future<Nothing> CgroupsMemIsolatorProcess::recover(
     oomListen(containerId);
   }
 
-  Try<vector<string> > orphans = cgroups::get(
+  Try<vector<string>> orphans = cgroups::get(
       hierarchy, flags.cgroups_root);
   if (orphans.isError()) {
     foreachvalue (Info* info, infos) {
@@ -198,7 +198,7 @@ Future<Nothing> CgroupsMemIsolatorProcess::recover(
 }
 
 
-Future<Option<CommandInfo> > CgroupsMemIsolatorProcess::prepare(
+Future<Option<CommandInfo>> CgroupsMemIsolatorProcess::prepare(
     const ContainerID& containerId,
     const ExecutorInfo& executorInfo,
     const string& directory,
@@ -395,7 +395,7 @@ Future<ResourceStatistics> CgroupsMemIsolatorProcess::usage(
   // structure, e.g, cgroups::memory::stat.
   result.set_mem_rss_bytes(usage.get().bytes());
 
-  Try<hashmap<string, uint64_t> > stat =
+  Try<hashmap<string, uint64_t>> stat =
     cgroups::stat(hierarchy, info->cgroup, "memory.stat");
 
   if (stat.isError()) {

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/isolators/cgroups/perf_event.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/isolators/cgroups/perf_event.cpp b/src/slave/containerizer/isolators/cgroups/perf_event.cpp
index c6e3055..4dfccc5 100644
--- a/src/slave/containerizer/isolators/cgroups/perf_event.cpp
+++ b/src/slave/containerizer/isolators/cgroups/perf_event.cpp
@@ -181,7 +181,7 @@ Future<Nothing> CgroupsPerfEventIsolatorProcess::recover(
     cgroups.insert(cgroup);
   }
 
-  Try<vector<string> > orphans = cgroups::get(hierarchy, flags.cgroups_root);
+  Try<vector<string>> orphans = cgroups::get(hierarchy, flags.cgroups_root);
   if (orphans.isError()) {
     foreachvalue (Info* info, infos) {
       delete info;
@@ -208,7 +208,7 @@ Future<Nothing> CgroupsPerfEventIsolatorProcess::recover(
 }
 
 
-Future<Option<CommandInfo> > CgroupsPerfEventIsolatorProcess::prepare(
+Future<Option<CommandInfo>> CgroupsPerfEventIsolatorProcess::prepare(
     const ContainerID& containerId,
     const ExecutorInfo& executorInfo,
     const string& directory,
@@ -354,8 +354,8 @@ Future<Nothing> CgroupsPerfEventIsolatorProcess::_cleanup(
 }
 
 
-Future<hashmap<string, PerfStatistics> > discardSample(
-    Future<hashmap<string, PerfStatistics> > future,
+Future<hashmap<string, PerfStatistics>> discardSample(
+    Future<hashmap<string, PerfStatistics>> future,
     const Duration& duration,
     const Duration& timeout)
 {
@@ -414,7 +414,7 @@ void CgroupsPerfEventIsolatorProcess::sample()
 
 void CgroupsPerfEventIsolatorProcess::_sample(
     const Time& next,
-    const Future<hashmap<string, PerfStatistics> >& statistics)
+    const Future<hashmap<string, PerfStatistics>>& statistics)
 {
   if (!statistics.isReady()) {
     // Failure can occur for many reasons but all are unexpected and

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/isolators/filesystem/shared.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/isolators/filesystem/shared.cpp b/src/slave/containerizer/isolators/filesystem/shared.cpp
index f6a8fa1..d5abea2 100644
--- a/src/slave/containerizer/isolators/filesystem/shared.cpp
+++ b/src/slave/containerizer/isolators/filesystem/shared.cpp
@@ -71,7 +71,7 @@ Future<Nothing> SharedFilesystemIsolatorProcess::recover(
 }
 
 
-Future<Option<CommandInfo> > SharedFilesystemIsolatorProcess::prepare(
+Future<Option<CommandInfo>> SharedFilesystemIsolatorProcess::prepare(
     const ContainerID& containerId,
     const ExecutorInfo& executorInfo,
     const string& directory,

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/isolators/filesystem/shared.hpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/isolators/filesystem/shared.hpp b/src/slave/containerizer/isolators/filesystem/shared.hpp
index 99c8952..764a45c 100644
--- a/src/slave/containerizer/isolators/filesystem/shared.hpp
+++ b/src/slave/containerizer/isolators/filesystem/shared.hpp
@@ -42,7 +42,7 @@ public:
   virtual process::Future<Nothing> recover(
       const std::list<mesos::slave::ExecutorRunState>& states);
 
-  virtual process::Future<Option<CommandInfo> > prepare(
+  virtual process::Future<Option<CommandInfo>> prepare(
       const ContainerID& containerId,
       const ExecutorInfo& executorInfo,
       const std::string& directory,

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/isolators/namespaces/pid.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/isolators/namespaces/pid.cpp b/src/slave/containerizer/isolators/namespaces/pid.cpp
index 44b18c6..eb35ae6 100644
--- a/src/slave/containerizer/isolators/namespaces/pid.cpp
+++ b/src/slave/containerizer/isolators/namespaces/pid.cpp
@@ -133,7 +133,7 @@ Future<Nothing> NamespacesPidIsolatorProcess::recover(
   }
 
   // Clean up any orphaned bind mounts and empty files.
-  Try<list<string> > entries = os::ls(BIND_MOUNT_ROOT);
+  Try<list<string>> entries = os::ls(BIND_MOUNT_ROOT);
   if (entries.isError()) {
     return Failure("Failed to list existing containers in '" +
                    BIND_MOUNT_ROOT + "': " + entries.error());
@@ -152,7 +152,7 @@ Future<Nothing> NamespacesPidIsolatorProcess::recover(
 }
 
 
-Future<Option<CommandInfo> > NamespacesPidIsolatorProcess::prepare(
+Future<Option<CommandInfo>> NamespacesPidIsolatorProcess::prepare(
     const ContainerID& containerId,
     const ExecutorInfo& executorInfo,
     const string& directory,

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/isolators/namespaces/pid.hpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/isolators/namespaces/pid.hpp b/src/slave/containerizer/isolators/namespaces/pid.hpp
index e6eb2d9..6a7be80 100644
--- a/src/slave/containerizer/isolators/namespaces/pid.hpp
+++ b/src/slave/containerizer/isolators/namespaces/pid.hpp
@@ -59,7 +59,7 @@ public:
   virtual process::Future<Nothing> recover(
       const std::list<mesos::slave::ExecutorRunState>& states);
 
-  virtual process::Future<Option<CommandInfo> > prepare(
+  virtual process::Future<Option<CommandInfo>> prepare(
       const ContainerID& containerId,
       const ExecutorInfo& executorInfo,
       const std::string& directory,

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/isolators/network/port_mapping.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/isolators/network/port_mapping.cpp b/src/slave/containerizer/isolators/network/port_mapping.cpp
index e5e6fa6..7b4aac8 100644
--- a/src/slave/containerizer/isolators/network/port_mapping.cpp
+++ b/src/slave/containerizer/isolators/network/port_mapping.cpp
@@ -269,7 +269,7 @@ JSON::Object json(const Iterable& ranges)
 }
 
 
-static Try<vector<PortRange> > parse(const JSON::Object& object)
+static Try<vector<PortRange>> parse(const JSON::Object& object)
 {
   Try<Value::Ranges> parsing = protobuf::parse<Value::Ranges>(object);
   if (parsing.isError()) {
@@ -424,11 +424,11 @@ int PortMappingUpdate::execute()
     return 1;
   }
 
-  Option<vector<PortRange> > portsToAdd;
-  Option<vector<PortRange> > portsToRemove;
+  Option<vector<PortRange>> portsToAdd;
+  Option<vector<PortRange>> portsToRemove;
 
   if (flags.ports_to_add.isSome()) {
-    Try<vector<PortRange> > parsing = parse(flags.ports_to_add.get());
+    Try<vector<PortRange>> parsing = parse(flags.ports_to_add.get());
     if (parsing.isError()) {
       cerr << "Parsing 'ports_to_add' failed: " << parsing.error() << endl;
       return 1;
@@ -437,7 +437,7 @@ int PortMappingUpdate::execute()
   }
 
   if (flags.ports_to_remove.isSome()) {
-    Try<vector<PortRange> > parsing = parse(flags.ports_to_remove.get());
+    Try<vector<PortRange>> parsing = parse(flags.ports_to_remove.get());
     if (parsing.isError()) {
       cerr << "Parsing 'ports_to_remove' failed: " << parsing.error() << endl;
       return 1;
@@ -609,7 +609,7 @@ int PortMappingStatistics::execute()
 
     // NOTE: If the underlying library uses the older version of
     // kernel API, the family argument passed in may not be honored.
-    Try<vector<diagnosis::socket::Info> > infos =
+    Try<vector<diagnosis::socket::Info>> infos =
       diagnosis::socket::infos(AF_INET, diagnosis::socket::state::ALL);
 
     if (infos.isError()) {
@@ -1331,7 +1331,7 @@ Future<Nothing> PortMappingIsolatorProcess::recover(
     const list<ExecutorRunState>& states)
 {
   // Extract pids from virtual device names.
-  Try<set<string> > links = net::links();
+  Try<set<string>> links = net::links();
   if (links.isError()) {
     return Failure("Failed to get all the links: " + links.error());
   }
@@ -1445,7 +1445,7 @@ PortMappingIsolatorProcess::_recover(pid_t pid)
   // sure that we add filters to veth before adding filters to host
   // eth0 and host lo. Also, we need to make sure we remove filters
   // from host eth0 and host lo before removing filters from veth.
-  Result<vector<ip::Classifier> > classifiers =
+  Result<vector<ip::Classifier>> classifiers =
     ip::classifiers(veth(pid), ingress::HANDLE);
 
   if (classifiers.isError()) {
@@ -1522,7 +1522,7 @@ PortMappingIsolatorProcess::_recover(pid_t pid)
 }
 
 
-Future<Option<CommandInfo> > PortMappingIsolatorProcess::prepare(
+Future<Option<CommandInfo>> PortMappingIsolatorProcess::prepare(
     const ContainerID& containerId,
     const ExecutorInfo& executorInfo,
     const string& directory,
@@ -1566,7 +1566,7 @@ Future<Option<CommandInfo> > PortMappingIsolatorProcess::prepare(
   }
 
   // Allocate the ephemeral ports used by this container.
-  Try<Interval<uint16_t> > ephemeralPorts = ephemeralPortsAllocator->allocate();
+  Try<Interval<uint16_t>> ephemeralPorts = ephemeralPortsAllocator->allocate();
   if (ephemeralPorts.isError()) {
     return Failure(
         "Failed to allocate ephemeral ports: " + ephemeralPorts.error());
@@ -1850,7 +1850,7 @@ Future<Limitation> PortMappingIsolatorProcess::watch(
 
 void PortMappingIsolatorProcess::_update(
     const ContainerID& containerId,
-    const Future<Option<int> >& status)
+    const Future<Option<int>>& status)
 {
   if (!status.isReady()) {
     ++metrics.updating_container_ip_filters_errors;
@@ -1933,7 +1933,7 @@ Future<Nothing> PortMappingIsolatorProcess::update(
             << containerId << " from " << info->nonEphemeralPorts
             << " to " << nonEphemeralPorts;
 
-  Result<vector<ip::Classifier> > classifiers =
+  Result<vector<ip::Classifier>> classifiers =
     ip::classifiers(veth(pid), ingress::HANDLE);
 
   if (classifiers.isError()) {
@@ -2058,7 +2058,7 @@ Future<ResourceStatistics> PortMappingIsolatorProcess::usage(
     return result;
   }
 
-  Result<hashmap<string, uint64_t> > stat =
+  Result<hashmap<string, uint64_t>> stat =
     link::statistics(veth(info->pid.get()));
 
   if (stat.isError()) {
@@ -2882,13 +2882,13 @@ uint32_t EphemeralPortsAllocator::nextMultipleOf(uint32_t x, uint32_t m)
 }
 
 
-Try<Interval<uint16_t> > EphemeralPortsAllocator::allocate()
+Try<Interval<uint16_t>> EphemeralPortsAllocator::allocate()
 {
   if (portsPerContainer_ == 0) {
     return Error("Number of ephemeral ports per container is zero");
   }
 
-  Option<Interval<uint16_t> > allocated;
+  Option<Interval<uint16_t>> allocated;
 
   foreach (const Interval<uint16_t>& interval, free) {
     uint16_t upper = interval.upper();

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/isolators/network/port_mapping.hpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/isolators/network/port_mapping.hpp b/src/slave/containerizer/isolators/network/port_mapping.hpp
index 0f9ad4a..3aae7e2 100644
--- a/src/slave/containerizer/isolators/network/port_mapping.hpp
+++ b/src/slave/containerizer/isolators/network/port_mapping.hpp
@@ -76,7 +76,7 @@ public:
   // will automatically find one port range with the given container
   // size. Returns error if the allocation cannot be fulfilled (e.g.,
   // exhausting available ephemeral ports).
-  Try<Interval<uint16_t> > allocate();
+  Try<Interval<uint16_t>> allocate();
 
   // Mark the specified ephemeral port range as allocated.
   void allocate(const Interval<uint16_t>& ports);
@@ -133,7 +133,7 @@ public:
   virtual process::Future<Nothing> recover(
       const std::list<mesos::slave::ExecutorRunState>& states);
 
-  virtual process::Future<Option<CommandInfo> > prepare(
+  virtual process::Future<Option<CommandInfo>> prepare(
       const ContainerID& containerId,
       const ExecutorInfo& executorInfo,
       const std::string& directory,
@@ -251,7 +251,7 @@ private:
 
   void _update(
       const ContainerID& containerId,
-      const process::Future<Option<int> >& status);
+      const process::Future<Option<int>>& status);
 
   process::Future<ResourceStatistics> _usage(
       const ResourceStatistics& result,

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/isolators/posix.hpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/isolators/posix.hpp b/src/slave/containerizer/isolators/posix.hpp
index 1e6a396..fc31cec 100644
--- a/src/slave/containerizer/isolators/posix.hpp
+++ b/src/slave/containerizer/isolators/posix.hpp
@@ -53,7 +53,7 @@ public:
 
       pids.put(run.id, run.pid);
 
-      process::Owned<process::Promise<mesos::slave::Limitation> > promise(
+      process::Owned<process::Promise<mesos::slave::Limitation>> promise(
           new process::Promise<mesos::slave::Limitation>());
       promises.put(run.id, promise);
     }
@@ -61,7 +61,7 @@ public:
     return Nothing();
   }
 
-  virtual process::Future<Option<CommandInfo> > prepare(
+  virtual process::Future<Option<CommandInfo>> prepare(
       const ContainerID& containerId,
       const ExecutorInfo& executorInfo,
       const std::string& directory,
@@ -72,7 +72,7 @@ public:
                               " has already been prepared");
     }
 
-    process::Owned<process::Promise<mesos::slave::Limitation> > promise(
+    process::Owned<process::Promise<mesos::slave::Limitation>> promise(
         new process::Promise<mesos::slave::Limitation>());
     promises.put(containerId, promise);
 

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/launcher.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/launcher.cpp b/src/slave/containerizer/launcher.cpp
index 44fcb43..eb798fa 100644
--- a/src/slave/containerizer/launcher.cpp
+++ b/src/slave/containerizer/launcher.cpp
@@ -75,7 +75,7 @@ Future<Nothing> PosixLauncher::recover(const list<ExecutorRunState>& states)
 
 
 // The setup function in child before the exec.
-static int childSetup(const Option<lambda::function<int()> >& setup)
+static int childSetup(const Option<lambda::function<int()>>& setup)
 {
   // POSIX guarantees a forked child's pid does not match any existing
   // process group id so only a single setsid() is required and the
@@ -105,8 +105,8 @@ Try<pid_t> PosixLauncher::fork(
     const Subprocess::IO& out,
     const Subprocess::IO& err,
     const Option<flags::FlagsBase>& flags,
-    const Option<map<string, string> >& environment,
-    const Option<lambda::function<int()> >& setup)
+    const Option<map<string, string>>& environment,
+    const Option<lambda::function<int()>>& setup)
 {
   if (pids.contains(containerId)) {
     return Error("Process has already been forked for container " +
@@ -138,7 +138,7 @@ Try<pid_t> PosixLauncher::fork(
 
 
 // Forward declaration.
-Future<Nothing> _destroy(const Future<Option<int> >& future);
+Future<Nothing> _destroy(const Future<Option<int>>& future);
 
 
 Future<Nothing> PosixLauncher::destroy(const ContainerID& containerId)
@@ -150,7 +150,7 @@ Future<Nothing> PosixLauncher::destroy(const ContainerID& containerId)
   pid_t pid = pids.get(containerId).get();
 
   // Kill all processes in the session and process group.
-  Try<list<os::ProcessTree> > trees =
+  Try<list<os::ProcessTree>> trees =
     os::killtree(pid, SIGKILL, true, true);
 
   pids.erase(containerId);
@@ -162,7 +162,7 @@ Future<Nothing> PosixLauncher::destroy(const ContainerID& containerId)
 }
 
 
-Future<Nothing> _destroy(const Future<Option<int> >& future)
+Future<Nothing> _destroy(const Future<Option<int>>& future)
 {
   if (future.isReady()) {
     return Nothing();

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/launcher.hpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/launcher.hpp b/src/slave/containerizer/launcher.hpp
index 3205b5c..95a7f76 100644
--- a/src/slave/containerizer/launcher.hpp
+++ b/src/slave/containerizer/launcher.hpp
@@ -63,8 +63,8 @@ public:
       const process::Subprocess::IO& out,
       const process::Subprocess::IO& err,
       const Option<flags::FlagsBase>& flags,
-      const Option<std::map<std::string, std::string> >& environment,
-      const Option<lambda::function<int()> >& setup) = 0;
+      const Option<std::map<std::string, std::string>>& environment,
+      const Option<lambda::function<int()>>& setup) = 0;
 
   // Kill all processes in the containerized context.
   virtual process::Future<Nothing> destroy(const ContainerID& containerId) = 0;
@@ -93,8 +93,8 @@ public:
       const process::Subprocess::IO& out,
       const process::Subprocess::IO& err,
       const Option<flags::FlagsBase>& flags,
-      const Option<std::map<std::string, std::string> >& environment,
-      const Option<lambda::function<int()> >& setup);
+      const Option<std::map<std::string, std::string>>& environment,
+      const Option<lambda::function<int()>>& setup);
 
   virtual process::Future<Nothing> destroy(const ContainerID& containerId);
 

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/linux_launcher.cpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/linux_launcher.cpp b/src/slave/containerizer/linux_launcher.cpp
index d357955..b176ac1 100644
--- a/src/slave/containerizer/linux_launcher.cpp
+++ b/src/slave/containerizer/linux_launcher.cpp
@@ -80,7 +80,7 @@ Try<Launcher*> LinuxLauncher::create(const Flags& flags)
   }
 
   // Ensure that no other subsystem is attached to the hierarchy.
-  Try<set<string> > subsystems = cgroups::subsystems(hierarchy.get());
+  Try<set<string>> subsystems = cgroups::subsystems(hierarchy.get());
   if (subsystems.isError()) {
     return Error(
         "Failed to get the list of attached subsystems for hierarchy " +
@@ -119,7 +119,7 @@ Try<Launcher*> LinuxLauncher::create(const Flags& flags)
 }
 
 
-Future<Nothing> _recover(const Future<list<Nothing> >& futures)
+Future<Nothing> _recover(const Future<list<Nothing>>& futures)
 {
   return Nothing();
 }
@@ -167,12 +167,12 @@ Future<Nothing> LinuxLauncher::recover(
     cgroups.insert(cgroup(containerId));
   }
 
-  Try<vector<string> > orphans = cgroups::get(hierarchy, flags.cgroups_root);
+  Try<vector<string>> orphans = cgroups::get(hierarchy, flags.cgroups_root);
   if (orphans.isError()) {
     return Failure(orphans.error());
   }
 
-  list<Future<Nothing> > futures;
+  list<Future<Nothing>> futures;
 
   foreach (const string& orphan, orphans.get()) {
     if (!cgroups.contains(orphan)) {
@@ -222,7 +222,7 @@ static pid_t clone(const lambda::function<int()>& func, int namespaces)
 
 static int childSetup(
     int pipes[2],
-    const Option<lambda::function<int()> >& setup)
+    const Option<lambda::function<int()>>& setup)
 {
   // In child.
   while (::close(pipes[1]) == -1 && errno == EINTR);
@@ -268,8 +268,8 @@ Try<pid_t> LinuxLauncher::fork(
     const process::Subprocess::IO& out,
     const process::Subprocess::IO& err,
     const Option<flags::FlagsBase>& flags,
-    const Option<map<string, string> >& environment,
-    const Option<lambda::function<int()> >& setup)
+    const Option<map<string, string>>& environment,
+    const Option<lambda::function<int()>>& setup)
 {
   // Create a freezer cgroup for this container if necessary.
   Try<bool> exists = cgroups::exists(hierarchy, cgroup(containerId));

http://git-wip-us.apache.org/repos/asf/mesos/blob/a12242bd/src/slave/containerizer/linux_launcher.hpp
----------------------------------------------------------------------
diff --git a/src/slave/containerizer/linux_launcher.hpp b/src/slave/containerizer/linux_launcher.hpp
index 52540f3..60082c7 100644
--- a/src/slave/containerizer/linux_launcher.hpp
+++ b/src/slave/containerizer/linux_launcher.hpp
@@ -45,8 +45,8 @@ public:
       const process::Subprocess::IO& out,
       const process::Subprocess::IO& err,
       const Option<flags::FlagsBase>& flags,
-      const Option<std::map<std::string, std::string> >& environment,
-      const Option<lambda::function<int()> >& setup);
+      const Option<std::map<std::string, std::string>>& environment,
+      const Option<lambda::function<int()>>& setup);
 
   virtual process::Future<Nothing> destroy(const ContainerID& containerId);