You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mesos.apache.org by be...@apache.org on 2015/04/26 21:43:40 UTC

[07/10] mesos git commit: Switched from 'memory::' to 'std::' in Mesos.

Switched from 'memory::' to 'std::' in Mesos.

Review: https://reviews.apache.org/r/33566


Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/9d6ffb9d
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/9d6ffb9d
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/9d6ffb9d

Branch: refs/heads/master
Commit: 9d6ffb9d3e9aed4a73c495ca2c6585e220961836
Parents: b043004
Author: Michael Park <mc...@gmail.com>
Authored: Sun Apr 26 12:25:10 2015 -0700
Committer: Benjamin Hindman <be...@gmail.com>
Committed: Sun Apr 26 12:25:17 2015 -0700

----------------------------------------------------------------------
 src/linux/routing/internal.hpp |   4 +-
 src/local/local.cpp            |   5 +-
 src/master/http.cpp            |  10 +--
 src/master/main.cpp            |   5 +-
 src/master/master.cpp          |   7 +-
 src/master/master.hpp          |  14 ++--
 src/slave/http.cpp             |   3 +-
 src/slave/slave.cpp            |  18 ++--
 src/slave/slave.hpp            |  19 +++--
 src/tests/cluster.hpp          |  18 ++--
 src/tests/master_tests.cpp     | 161 ++++++++++++++++++------------------
 src/tests/mesos.cpp            |  35 ++++----
 src/tests/mesos.hpp            |   3 +-
 src/tests/scheduler_tests.cpp  |   2 +-
 src/tests/slave_tests.cpp      |  93 +++++++++++----------
 15 files changed, 197 insertions(+), 200 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/mesos/blob/9d6ffb9d/src/linux/routing/internal.hpp
----------------------------------------------------------------------
diff --git a/src/linux/routing/internal.hpp b/src/linux/routing/internal.hpp
index aa60796..7e636cd 100644
--- a/src/linux/routing/internal.hpp
+++ b/src/linux/routing/internal.hpp
@@ -30,10 +30,10 @@
 #include <netlink/route/link.h>
 #include <netlink/route/qdisc.h>
 
+#include <memory>
 #include <string>
 
 #include <stout/error.hpp>
-#include <stout/memory.hpp>
 #include <stout/try.hpp>
 
 #include "linux/routing/utils.hpp"
@@ -79,7 +79,7 @@ private:
     T* object;
   };
 
-  memory::shared_ptr<Data> data;
+  std::shared_ptr<Data> data;
 };
 
 

http://git-wip-us.apache.org/repos/asf/mesos/blob/9d6ffb9d/src/local/local.cpp
----------------------------------------------------------------------
diff --git a/src/local/local.cpp b/src/local/local.cpp
index ab58a4a..dda25ab 100644
--- a/src/local/local.cpp
+++ b/src/local/local.cpp
@@ -17,6 +17,7 @@
  */
 
 #include <map>
+#include <memory>
 #include <set>
 #include <sstream>
 #include <string>
@@ -33,7 +34,6 @@
 #include <stout/duration.hpp>
 #include <stout/exit.hpp>
 #include <stout/foreach.hpp>
-#include <stout/memory.hpp>
 #include <stout/os.hpp>
 #include <stout/path.hpp>
 #include <stout/try.hpp>
@@ -71,8 +71,6 @@
 #include "state/protobuf.hpp"
 #include "state/storage.hpp"
 
-using memory::shared_ptr;
-
 using namespace mesos::internal;
 using namespace mesos::internal::log;
 
@@ -100,6 +98,7 @@ using process::UPID;
 
 using std::map;
 using std::set;
+using std::shared_ptr;
 using std::string;
 using std::stringstream;
 using std::vector;

http://git-wip-us.apache.org/repos/asf/mesos/blob/9d6ffb9d/src/master/http.cpp
----------------------------------------------------------------------
diff --git a/src/master/http.cpp b/src/master/http.cpp
index 00c22c4..fb44825 100644
--- a/src/master/http.cpp
+++ b/src/master/http.cpp
@@ -18,6 +18,7 @@
 
 #include <iomanip>
 #include <map>
+#include <memory>
 #include <sstream>
 #include <string>
 #include <utility>
@@ -35,7 +36,6 @@
 #include <stout/foreach.hpp>
 #include <stout/json.hpp>
 #include <stout/lambda.hpp>
-#include <stout/memory.hpp>
 #include <stout/net.hpp>
 #include <stout/numify.hpp>
 #include <stout/os.hpp>
@@ -160,7 +160,7 @@ JSON::Object model(const Framework& framework)
     JSON::Array array;
     array.values.reserve(framework.completedTasks.size()); // MESOS-2353.
 
-    foreach (const memory::shared_ptr<Task>& task, framework.completedTasks) {
+    foreach (const std::shared_ptr<Task>& task, framework.completedTasks) {
       array.values.push_back(model(*task));
     }
 
@@ -500,7 +500,7 @@ Future<Response> Master::Http::state(const Request& request)
     JSON::Array array;
     array.values.reserve(master->frameworks.completed.size()); // MESOS-2353.
 
-    foreach (const memory::shared_ptr<Framework>& framework,
+    foreach (const std::shared_ptr<Framework>& framework,
              master->frameworks.completed) {
       array.values.push_back(model(*framework));
     }
@@ -747,7 +747,7 @@ Future<Response> Master::Http::tasks(const Request& request)
   foreachvalue (Framework* framework, master->frameworks.registered) {
     frameworks.push_back(framework);
   }
-  foreach (const memory::shared_ptr<Framework>& framework,
+  foreach (const std::shared_ptr<Framework>& framework,
            master->frameworks.completed) {
     frameworks.push_back(framework.get());
   }
@@ -759,7 +759,7 @@ Future<Response> Master::Http::tasks(const Request& request)
       CHECK_NOTNULL(task);
       tasks.push_back(task);
     }
-    foreach (const memory::shared_ptr<Task>& task, framework->completedTasks) {
+    foreach (const std::shared_ptr<Task>& task, framework->completedTasks) {
       tasks.push_back(task.get());
     }
   }

http://git-wip-us.apache.org/repos/asf/mesos/blob/9d6ffb9d/src/master/main.cpp
----------------------------------------------------------------------
diff --git a/src/master/main.cpp b/src/master/main.cpp
index d141c76..18f8c31 100644
--- a/src/master/main.cpp
+++ b/src/master/main.cpp
@@ -18,6 +18,7 @@
 
 #include <stdint.h>
 
+#include <memory>
 #include <set>
 #include <string>
 #include <vector>
@@ -36,7 +37,6 @@
 #include <stout/duration.hpp>
 #include <stout/exit.hpp>
 #include <stout/flags.hpp>
-#include <stout/memory.hpp>
 #include <stout/nothing.hpp>
 #include <stout/option.hpp>
 #include <stout/os.hpp>
@@ -78,8 +78,6 @@ using namespace mesos::internal::log;
 using namespace mesos::internal::master;
 using namespace zookeeper;
 
-using memory::shared_ptr;
-
 using mesos::MasterInfo;
 
 using mesos::modules::Anonymous;
@@ -93,6 +91,7 @@ using std::cerr;
 using std::cout;
 using std::endl;
 using std::set;
+using std::shared_ptr;
 using std::string;
 using std::vector;
 

http://git-wip-us.apache.org/repos/asf/mesos/blob/9d6ffb9d/src/master/master.cpp
----------------------------------------------------------------------
diff --git a/src/master/master.cpp b/src/master/master.cpp
index ce9d263..d42a6f3 100644
--- a/src/master/master.cpp
+++ b/src/master/master.cpp
@@ -23,6 +23,7 @@
 #include <fstream>
 #include <iomanip>
 #include <list>
+#include <memory>
 #include <sstream>
 
 #include <mesos/module.hpp>
@@ -49,7 +50,6 @@
 #include <stout/error.hpp>
 #include <stout/ip.hpp>
 #include <stout/lambda.hpp>
-#include <stout/memory.hpp>
 #include <stout/multihashmap.hpp>
 #include <stout/net.hpp>
 #include <stout/nothing.hpp>
@@ -84,6 +84,7 @@
 #include "watcher/whitelist_watcher.hpp"
 
 using std::list;
+using std::shared_ptr;
 using std::string;
 using std::vector;
 
@@ -106,8 +107,6 @@ using process::UPID;
 
 using process::metrics::Counter;
 
-using memory::shared_ptr;
-
 namespace mesos {
 namespace internal {
 namespace master {
@@ -1452,7 +1451,7 @@ void Master::detected(const Future<Option<MasterInfo>>& _leader)
 }
 
 
-// Helper to convert authorization result to Future<Option<Error> >.
+// Helper to convert authorization result to Future<Option<Error>>.
 static Future<Option<Error>> _authorize(const string& message, bool authorized)
 {
   if (authorized) {

http://git-wip-us.apache.org/repos/asf/mesos/blob/9d6ffb9d/src/master/master.hpp
----------------------------------------------------------------------
diff --git a/src/master/master.hpp b/src/master/master.hpp
index bf1661a..49ee050 100644
--- a/src/master/master.hpp
+++ b/src/master/master.hpp
@@ -22,6 +22,7 @@
 #include <stdint.h>
 
 #include <list>
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -47,7 +48,6 @@
 #include <stout/foreach.hpp>
 #include <stout/hashmap.hpp>
 #include <stout/hashset.hpp>
-#include <stout/memory.hpp>
 #include <stout/multihashmap.hpp>
 #include <stout/option.hpp>
 
@@ -102,7 +102,7 @@ public:
          MasterContender* contender,
          MasterDetector* detector,
          const Option<Authorizer*>& authorizer,
-         const Option<memory::shared_ptr<process::RateLimiter>>&
+         const Option<std::shared_ptr<process::RateLimiter>>&
            slaveRemovalLimiter,
          const Flags& flags = Flags());
 
@@ -607,7 +607,7 @@ private:
     // health checks.
     // NOTE: Using a 'shared_ptr' here is OK because 'RateLimiter' is
     // a wrapper around libprocess process which is thread safe.
-    Option<memory::shared_ptr<process::RateLimiter>> limiter;
+    Option<std::shared_ptr<process::RateLimiter>> limiter;
 
     bool transitioning(const Option<SlaveID>& slaveId)
     {
@@ -628,7 +628,7 @@ private:
     Frameworks() : completed(MAX_COMPLETED_FRAMEWORKS) {}
 
     hashmap<FrameworkID, Framework*> registered;
-    boost::circular_buffer<memory::shared_ptr<Framework>> completed;
+    boost::circular_buffer<std::shared_ptr<Framework>> completed;
 
     // Principals of frameworks keyed by PID.
     // NOTE: Multiple PIDs can map to the same principal. The
@@ -678,7 +678,7 @@ private:
   // thread safe.
   // TODO(dhamon): This does not need to be a shared_ptr. Metrics contains
   // copyable metric types only.
-  memory::shared_ptr<Metrics> metrics;
+  std::shared_ptr<Metrics> metrics;
 
   // Gauge handlers.
   double _uptime_secs()
@@ -1043,7 +1043,7 @@ struct Framework
   void addCompletedTask(const Task& task)
   {
     // TODO(adam-mesos): Check if completed task already exists.
-    completedTasks.push_back(memory::shared_ptr<Task>(new Task(task)));
+    completedTasks.push_back(std::shared_ptr<Task>(new Task(task)));
   }
 
   void removeTask(Task* task)
@@ -1208,7 +1208,7 @@ struct Framework
   // NOTE: We use a shared pointer for Task because clang doesn't like
   // Boost's implementation of circular_buffer with Task (Boost
   // attempts to do some memset's which are unsafe).
-  boost::circular_buffer<memory::shared_ptr<Task>> completedTasks;
+  boost::circular_buffer<std::shared_ptr<Task>> completedTasks;
 
   hashset<Offer*> offers; // Active offers for framework.
 

http://git-wip-us.apache.org/repos/asf/mesos/blob/9d6ffb9d/src/slave/http.cpp
----------------------------------------------------------------------
diff --git a/src/slave/http.cpp b/src/slave/http.cpp
index 914e7e5..f678aab 100644
--- a/src/slave/http.cpp
+++ b/src/slave/http.cpp
@@ -17,6 +17,7 @@
  */
 
 #include <map>
+#include <memory>
 #include <sstream>
 #include <string>
 #include <vector>
@@ -183,7 +184,7 @@ JSON::Object model(const Executor& executor)
   object.values["queued_tasks"] = queued;
 
   JSON::Array completed;
-  foreach (const memory::shared_ptr<Task>& task, executor.completedTasks) {
+  foreach (const std::shared_ptr<Task>& task, executor.completedTasks) {
     completed.values.push_back(model(*task));
   }
 

http://git-wip-us.apache.org/repos/asf/mesos/blob/9d6ffb9d/src/slave/slave.cpp
----------------------------------------------------------------------
diff --git a/src/slave/slave.cpp b/src/slave/slave.cpp
index e531283..c5b4847 100644
--- a/src/slave/slave.cpp
+++ b/src/slave/slave.cpp
@@ -24,6 +24,7 @@
 #include <iomanip>
 #include <list>
 #include <map>
+#include <memory>
 #include <set>
 #include <sstream>
 #include <string>
@@ -233,7 +234,7 @@ void Slave::initialize()
 
       // Exit if there are processes running inside the cgroup - this
       // indicates a prior slave (or child process) is still running.
-      Try<set<pid_t> > processes = cgroups::processes(hierarchy.get(), cgroup);
+      Try<set<pid_t>> processes = cgroups::processes(hierarchy.get(), cgroup);
       if (processes.isError()) {
         EXIT(1) << "Failed to check for existing threads in cgroup " << cgroup
                 << " for subsystem " << subsystem
@@ -598,7 +599,7 @@ Nothing Slave::detachFile(const string& path)
 }
 
 
-void Slave::detected(const Future<Option<MasterInfo> >& _master)
+void Slave::detected(const Future<Option<MasterInfo>>& _master)
 {
   CHECK(state == DISCONNECTED ||
         state == RUNNING ||
@@ -1071,8 +1072,7 @@ void Slave::doReliableRegistration(Duration maxBackoff)
           VLOG(2) << "Reregistering terminated task " << task->task_id();
           completedFramework_->add_tasks()->CopyFrom(*task);
         }
-        foreach (const memory::shared_ptr<Task>& task,
-                 executor->completedTasks) {
+        foreach (const std::shared_ptr<Task>& task, executor->completedTasks) {
           VLOG(2) << "Reregistering completed task " << task->task_id();
           completedFramework_->add_tasks()->CopyFrom(*task);
         }
@@ -1185,7 +1185,7 @@ void Slave::runTask(
     // executors to this framework and remove it from that list.
     // TODO(brenden): Consider using stout/cache.hpp instead of boost
     // circular_buffer.
-    for (boost::circular_buffer<Owned<Framework> >::iterator i =
+    for (boost::circular_buffer<Owned<Framework>>::iterator i =
         completedFrameworks.begin(); i != completedFrameworks.end(); ++i) {
       if ((*i)->id() == frameworkId) {
         framework->completedExecutors = (*i)->completedExecutors;
@@ -2642,7 +2642,7 @@ void Slave::statusUpdate(StatusUpdate update, const UPID& pid)
 
 
 void Slave::_statusUpdate(
-    const Option<Future<Nothing> >& future,
+    const Option<Future<Nothing>>& future,
     const StatusUpdate& update,
     const UPID& pid,
     const ExecutorID& executorId,
@@ -2909,7 +2909,7 @@ void Slave::ping(const UPID& from, bool connected)
 }
 
 
-void Slave::pingTimeout(Future<Option<MasterInfo> > future)
+void Slave::pingTimeout(Future<Option<MasterInfo>> future)
 {
   // It's possible that a new ping arrived since the timeout fired
   // and we were unable to cancel this timeout. If this occurs, don't
@@ -3906,7 +3906,7 @@ void Slave::__recover(const Future<Nothing>& future)
   // in the recovery code, to recover all slaves instead of only
   // the latest slave.
   const string& directory = path::join(flags.work_dir, "slaves");
-  Try<list<string> > entries = os::ls(directory);
+  Try<list<string>> entries = os::ls(directory);
   if (entries.isSome()) {
     foreach (const string& entry, entries.get()) {
       string path = path::join(directory, entry);
@@ -4648,7 +4648,7 @@ void Executor::completeTask(const TaskID& taskId)
     << "Failed to find terminated task " << taskId;
 
   Task* task = terminatedTasks[taskId];
-  completedTasks.push_back(memory::shared_ptr<Task>(task));
+  completedTasks.push_back(std::shared_ptr<Task>(task));
   terminatedTasks.erase(taskId);
 }
 

http://git-wip-us.apache.org/repos/asf/mesos/blob/9d6ffb9d/src/slave/slave.hpp
----------------------------------------------------------------------
diff --git a/src/slave/slave.hpp b/src/slave/slave.hpp
index 1b8c512..654a869 100644
--- a/src/slave/slave.hpp
+++ b/src/slave/slave.hpp
@@ -22,6 +22,7 @@
 #include <stdint.h>
 
 #include <list>
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -192,7 +193,7 @@ public:
   // Continue handling the status update after optionally updating the
   // container's resources.
   void _statusUpdate(
-      const Option<Future<Nothing> >& future,
+      const Option<Future<Nothing>>& future,
       const StatusUpdate& update,
       const UPID& pid,
       const ExecutorID& executorId,
@@ -247,7 +248,7 @@ public:
 
   // Invoked whenever the detector detects a change in masters.
   // Made public for testing purposes.
-  void detected(const process::Future<Option<MasterInfo> >& pid);
+  void detected(const process::Future<Option<MasterInfo>>& pid);
 
   enum State {
     RECOVERING,   // Slave is doing recovery.
@@ -284,7 +285,7 @@ public:
 
   // Triggers a re-detection of the master when the slave does
   // not receive a ping.
-  void pingTimeout(process::Future<Option<MasterInfo> > future);
+  void pingTimeout(process::Future<Option<MasterInfo>> future);
 
   void authenticate();
 
@@ -436,7 +437,7 @@ private:
 
   hashmap<FrameworkID, Framework*> frameworks;
 
-  boost::circular_buffer<process::Owned<Framework> > completedFrameworks;
+  boost::circular_buffer<process::Owned<Framework>> completedFrameworks;
 
   MasterDetector* detector;
 
@@ -458,7 +459,7 @@ private:
   StatusUpdateManager* statusUpdateManager;
 
   // Master detection future.
-  process::Future<Option<MasterInfo> > detection;
+  process::Future<Option<MasterInfo>> detection;
 
   // Timer for triggering re-detection when no ping is received from
   // the master.
@@ -482,7 +483,7 @@ private:
   Authenticatee* authenticatee;
 
   // Indicates if an authentication attempt is in progress.
-  Option<Future<bool> > authenticating;
+  Option<Future<bool>> authenticating;
 
   // Indicates if the authentication is successful.
   bool authenticated;
@@ -566,7 +567,7 @@ struct Executor
   // NOTE: We use a shared pointer for Task because clang doesn't like
   // Boost's implementation of circular_buffer with Task (Boost
   // attempts to do some memset's which are unsafe).
-  boost::circular_buffer<memory::shared_ptr<Task> > completedTasks;
+  boost::circular_buffer<std::shared_ptr<Task>> completedTasks;
 
 private:
   Executor(const Executor&);              // No copying.
@@ -611,13 +612,13 @@ struct Framework
   UPID pid;
 
   // Executors with pending tasks.
-  hashmap<ExecutorID, hashmap<TaskID, TaskInfo> > pending;
+  hashmap<ExecutorID, hashmap<TaskID, TaskInfo>> pending;
 
   // Current running executors.
   hashmap<ExecutorID, Executor*> executors;
 
   // Up to MAX_COMPLETED_EXECUTORS_PER_FRAMEWORK completed executors.
-  boost::circular_buffer<process::Owned<Executor> > completedExecutors;
+  boost::circular_buffer<process::Owned<Executor>> completedExecutors;
 private:
   Framework(const Framework&);              // No copying.
   Framework& operator = (const Framework&); // No assigning.

http://git-wip-us.apache.org/repos/asf/mesos/blob/9d6ffb9d/src/tests/cluster.hpp
----------------------------------------------------------------------
diff --git a/src/tests/cluster.hpp b/src/tests/cluster.hpp
index 26f4af2..9506166 100644
--- a/src/tests/cluster.hpp
+++ b/src/tests/cluster.hpp
@@ -20,6 +20,7 @@
 #define __TESTS_CLUSTER_HPP__
 
 #include <map>
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -40,7 +41,6 @@
 #include <stout/error.hpp>
 #include <stout/foreach.hpp>
 #include <stout/none.hpp>
-#include <stout/memory.hpp>
 #include <stout/nothing.hpp>
 #include <stout/option.hpp>
 #include <stout/path.hpp>
@@ -104,11 +104,11 @@ public:
     void shutdown();
 
     // Start a new master with the provided flags and injections.
-    Try<process::PID<master::Master> > start(
+    Try<process::PID<master::Master>> start(
         const master::Flags& flags = master::Flags(),
         const Option<mesos::master::allocator::Allocator*>& allocator = None(),
         const Option<Authorizer*>& authorizer = None(),
-        const Option<memory::shared_ptr<process::RateLimiter> >&
+        const Option<std::shared_ptr<process::RateLimiter>>&
           slaveRemovalLimiter = None());
 
     // Stops and cleans up a master at the specified PID.
@@ -145,7 +145,7 @@ public:
 
       process::Owned<Authorizer> authorizer;
 
-      Option<memory::shared_ptr<process::RateLimiter>> slaveRemovalLimiter;
+      Option<std::shared_ptr<process::RateLimiter>> slaveRemovalLimiter;
 
       master::Master* master;
     };
@@ -164,7 +164,7 @@ public:
     void shutdown();
 
     // Start a new slave with the provided flags and injections.
-    Try<process::PID<slave::Slave> > start(
+    Try<process::PID<slave::Slave>> start(
         const slave::Flags& flags = slave::Flags(),
         const Option<slave::Containerizer*>& containerizer = None(),
         const Option<MasterDetector*>& detector = None(),
@@ -253,11 +253,11 @@ inline void Cluster::Masters::shutdown()
 }
 
 
-inline Try<process::PID<master::Master> > Cluster::Masters::start(
+inline Try<process::PID<master::Master>> Cluster::Masters::start(
     const master::Flags& flags,
     const Option<mesos::master::allocator::Allocator*>& allocator,
     const Option<Authorizer*>& authorizer,
-    const Option<memory::shared_ptr<process::RateLimiter>>& slaveRemovalLimiter)
+    const Option<std::shared_ptr<process::RateLimiter>>& slaveRemovalLimiter)
 {
   // Disallow multiple masters when not using ZooKeeper.
   if (!masters.empty() && url.isNone()) {
@@ -383,7 +383,7 @@ inline Try<process::PID<master::Master> > Cluster::Masters::start(
               << ": " << duration.error();
     }
 
-    master.slaveRemovalLimiter = memory::shared_ptr<process::RateLimiter>(
+    master.slaveRemovalLimiter = std::shared_ptr<process::RateLimiter>(
         new process::RateLimiter(permits.get(), duration.get()));
   }
 
@@ -525,7 +525,7 @@ inline void Cluster::Slaves::shutdown()
 }
 
 
-inline Try<process::PID<slave::Slave> > Cluster::Slaves::start(
+inline Try<process::PID<slave::Slave>> Cluster::Slaves::start(
     const slave::Flags& flags,
     const Option<slave::Containerizer*>& containerizer,
     const Option<MasterDetector*>& detector,

http://git-wip-us.apache.org/repos/asf/mesos/blob/9d6ffb9d/src/tests/master_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/master_tests.cpp b/src/tests/master_tests.cpp
index 8405105..bdfccb2 100644
--- a/src/tests/master_tests.cpp
+++ b/src/tests/master_tests.cpp
@@ -20,6 +20,7 @@
 
 #include <gmock/gmock.h>
 
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -38,7 +39,6 @@
 #include <process/metrics/metrics.hpp>
 
 #include <stout/json.hpp>
-#include <stout/memory.hpp>
 #include <stout/net.hpp>
 #include <stout/option.hpp>
 #include <stout/os.hpp>
@@ -62,8 +62,6 @@
 #include "tests/mesos.hpp"
 #include "tests/utils.hpp"
 
-using memory::shared_ptr;
-
 using mesos::internal::master::Master;
 
 using mesos::internal::master::allocator::MesosAllocatorProcess;
@@ -78,6 +76,7 @@ using process::Future;
 using process::PID;
 using process::Promise;
 
+using std::shared_ptr;
 using std::string;
 using std::vector;
 
@@ -104,14 +103,14 @@ class MasterTest : public MesosTest {};
 
 TEST_F(MasterTest, TaskRunning)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
 
   TestContainerizer containerizer(&exec);
 
-  Try<PID<Slave> > slave = StartSlave(&containerizer);
+  Try<PID<Slave>> slave = StartSlave(&containerizer);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -121,7 +120,7 @@ TEST_F(MasterTest, TaskRunning)
   EXPECT_CALL(sched, registered(&driver, _, _))
     .Times(1);
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers))
     .WillRepeatedly(Return()); // Ignore subsequent offers.
@@ -292,12 +291,12 @@ TEST_F(MasterTest, ShutdownFrameworkWhileTaskRunning)
 
 TEST_F(MasterTest, KillTask)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
 
-  Try<PID<Slave> > slave = StartSlave(&exec);
+  Try<PID<Slave>> slave = StartSlave(&exec);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -307,7 +306,7 @@ TEST_F(MasterTest, KillTask)
   EXPECT_CALL(sched, registered(&driver, _, _))
     .Times(1);
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers))
     .WillRepeatedly(Return()); // Ignore subsequent offers.
@@ -370,12 +369,12 @@ TEST_F(MasterTest, KillTask)
 // TASK_LOST when there are no slaves in transitionary states.
 TEST_F(MasterTest, KillUnknownTask)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
 
-  Try<PID<Slave> > slave = StartSlave(&exec);
+  Try<PID<Slave>> slave = StartSlave(&exec);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -385,7 +384,7 @@ TEST_F(MasterTest, KillUnknownTask)
   EXPECT_CALL(sched, registered(&driver, _, _))
     .Times(1);
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers))
     .WillRepeatedly(Return()); // Ignore subsequent offers.
@@ -446,7 +445,7 @@ TEST_F(MasterTest, KillUnknownTask)
 
 TEST_F(MasterTest, KillUnknownTaskSlaveInTransition)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   StandaloneMasterDetector detector(master.get());
@@ -459,7 +458,7 @@ TEST_F(MasterTest, KillUnknownTaskSlaveInTransition)
   // Reuse slaveFlags so both StartSlave() use the same work_dir.
   slave::Flags slaveFlags = CreateSlaveFlags();
 
-  Try<PID<Slave> > slave = StartSlave(&exec, slaveFlags);
+  Try<PID<Slave>> slave = StartSlave(&exec, slaveFlags);
   ASSERT_SOME(slave);
 
   // Wait for slave registration.
@@ -473,7 +472,7 @@ TEST_F(MasterTest, KillUnknownTaskSlaveInTransition)
   EXPECT_CALL(sched, registered(&driver, _, _))
     .WillOnce(FutureArg<1>(&frameworkId));
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers))
     .WillRepeatedly(Return()); // Ignore subsequent offers.
@@ -573,12 +572,12 @@ TEST_F(MasterTest, KillUnknownTaskSlaveInTransition)
 
 TEST_F(MasterTest, StatusUpdateAck)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
 
-  Try<PID<Slave> > slave = StartSlave(&exec);
+  Try<PID<Slave>> slave = StartSlave(&exec);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -588,7 +587,7 @@ TEST_F(MasterTest, StatusUpdateAck)
   EXPECT_CALL(sched, registered(&driver, _, _))
     .Times(1);
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers))
     .WillRepeatedly(Return()); // Ignore subsequent offers.
@@ -641,7 +640,7 @@ TEST_F(MasterTest, StatusUpdateAck)
 
 TEST_F(MasterTest, RecoverResources)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
@@ -652,7 +651,7 @@ TEST_F(MasterTest, RecoverResources)
   flags.resources = Option<string>(
       "cpus:2;mem:1024;disk:1024;ports:[1-10, 20-30]");
 
-  Try<PID<Slave> > slave = StartSlave(&containerizer, flags);
+  Try<PID<Slave>> slave = StartSlave(&containerizer, flags);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -662,7 +661,7 @@ TEST_F(MasterTest, RecoverResources)
   EXPECT_CALL(sched, registered(&driver, _, _))
     .Times(1);
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers));
 
@@ -761,12 +760,12 @@ TEST_F(MasterTest, RecoverResources)
 
 TEST_F(MasterTest, FrameworkMessage)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
 
-  Try<PID<Slave> > slave = StartSlave(&exec);
+  Try<PID<Slave>> slave = StartSlave(&exec);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -776,7 +775,7 @@ TEST_F(MasterTest, FrameworkMessage)
   EXPECT_CALL(sched, registered(&schedDriver, _, _))
     .Times(1);
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&schedDriver, _))
     .WillOnce(FutureArg<1>(&offers))
     .WillRepeatedly(Return()); // Ignore subsequent offers.
@@ -843,7 +842,7 @@ TEST_F(MasterTest, FrameworkMessage)
 
 TEST_F(MasterTest, MultipleExecutors)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   ExecutorInfo executor1; // Bug in gcc 4.1.*, must assign on next line.
@@ -861,7 +860,7 @@ TEST_F(MasterTest, MultipleExecutors)
 
   TestContainerizer containerizer(execs);
 
-  Try<PID<Slave> > slave = StartSlave(&containerizer);
+  Try<PID<Slave>> slave = StartSlave(&containerizer);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -871,7 +870,7 @@ TEST_F(MasterTest, MultipleExecutors)
   EXPECT_CALL(sched, registered(&driver, _, _))
     .Times(1);
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers))
     .WillRepeatedly(Return()); // Ignore subsequent offers.
@@ -951,10 +950,10 @@ TEST_F(MasterTest, MultipleExecutors)
 
 TEST_F(MasterTest, MasterInfo)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
-  Try<PID<Slave> > slave = StartSlave();
+  Try<PID<Slave>> slave = StartSlave();
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -983,12 +982,12 @@ TEST_F(MasterTest, MasterInfo)
 
 TEST_F(MasterTest, MasterInfoOnReElection)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   StandaloneMasterDetector detector(master.get());
 
-  Try<PID<Slave> > slave = StartSlave(&detector);
+  Try<PID<Slave>> slave = StartSlave(&detector);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -1069,12 +1068,12 @@ TEST_F(WhitelistTest, WhitelistSlave)
   master::Flags flags = CreateMasterFlags();
   flags.whitelist = path;
 
-  Try<PID<Master> > master = StartMaster(flags);
+  Try<PID<Master>> master = StartMaster(flags);
   ASSERT_SOME(master);
 
   slave::Flags slaveFlags = CreateSlaveFlags();
   slaveFlags.hostname = hostname.get();
-  Try<PID<Slave> > slave = StartSlave(slaveFlags);
+  Try<PID<Slave>> slave = StartSlave(slaveFlags);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -1084,7 +1083,7 @@ TEST_F(WhitelistTest, WhitelistSlave)
   EXPECT_CALL(sched, registered(&driver, _, _))
     .Times(1);
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers));
 
@@ -1101,12 +1100,12 @@ TEST_F(WhitelistTest, WhitelistSlave)
 
 TEST_F(MasterTest, MasterLost)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   StandaloneMasterDetector detector(master.get());
 
-  Try<PID<Slave> > slave = StartSlave();
+  Try<PID<Slave>> slave = StartSlave();
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -1149,7 +1148,7 @@ TEST_F(MasterTest, MasterLost)
 // all slave resources and a single task should be able to run on these.
 TEST_F(MasterTest, LaunchCombinedOfferTest)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
@@ -1163,7 +1162,7 @@ TEST_F(MasterTest, LaunchCombinedOfferTest)
   slave::Flags flags = CreateSlaveFlags();
   flags.resources = Option<string>(stringify(fullSlave));
 
-  Try<PID<Slave> > slave = StartSlave(&containerizer, flags);
+  Try<PID<Slave>> slave = StartSlave(&containerizer, flags);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -1173,7 +1172,7 @@ TEST_F(MasterTest, LaunchCombinedOfferTest)
   EXPECT_CALL(sched, registered(&driver, _, _));
 
   // Get 1st offer and use half of the slave resources to get subsequent offer.
-  Future<vector<Offer> > offers1;
+  Future<vector<Offer>> offers1;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers1));
 
@@ -1203,7 +1202,7 @@ TEST_F(MasterTest, LaunchCombinedOfferTest)
   EXPECT_CALL(sched, statusUpdate(&driver, _))
     .WillOnce(FutureArg<1>(&status1));
 
-  Future<vector<Offer> > offers2;
+  Future<vector<Offer>> offers2;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers2));
 
@@ -1231,7 +1230,7 @@ TEST_F(MasterTest, LaunchCombinedOfferTest)
   EXPECT_CALL(sched, statusUpdate(&driver, _))
     .WillOnce(FutureArg<1>(&status2));
 
-  Future<vector<Offer> > offers3;
+  Future<vector<Offer>> offers3;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers3))
     .WillRepeatedly(Return()); // Ignore subsequent offers.
@@ -1289,7 +1288,7 @@ TEST_F(MasterTest, LaunchCombinedOfferTest)
 // Test ensures offers for launchTasks cannot span multiple slaves.
 TEST_F(MasterTest, LaunchAcrossSlavesTest)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
@@ -1302,7 +1301,7 @@ TEST_F(MasterTest, LaunchAcrossSlavesTest)
   slave::Flags flags = CreateSlaveFlags();
   flags.resources = Option<string>(stringify(fullSlave));
 
-  Try<PID<Slave> > slave1 = StartSlave(&containerizer, flags);
+  Try<PID<Slave>> slave1 = StartSlave(&containerizer, flags);
   ASSERT_SOME(slave1);
 
   MockScheduler sched;
@@ -1311,7 +1310,7 @@ TEST_F(MasterTest, LaunchAcrossSlavesTest)
 
   EXPECT_CALL(sched, registered(&driver, _, _));
 
-  Future<vector<Offer> > offers1;
+  Future<vector<Offer>> offers1;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers1));
 
@@ -1324,7 +1323,7 @@ TEST_F(MasterTest, LaunchAcrossSlavesTest)
   EXPECT_EQ(Megabytes(1024), resources1.mem().get());
 
   // Test that offers cannot span multiple slaves.
-  Future<vector<Offer> > offers2;
+  Future<vector<Offer>> offers2;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers2))
     .WillRepeatedly(Return()); // Ignore subsequent offers.
@@ -1398,7 +1397,7 @@ TEST_F(MasterTest, LaunchAcrossSlavesTest)
 // for launchTasks.
 TEST_F(MasterTest, LaunchDuplicateOfferTest)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
@@ -1410,7 +1409,7 @@ TEST_F(MasterTest, LaunchDuplicateOfferTest)
   slave::Flags flags = CreateSlaveFlags();
   flags.resources = Option<string>(stringify(fullSlave));
 
-  Try<PID<Slave> > slave = StartSlave(&containerizer, flags);
+  Try<PID<Slave>> slave = StartSlave(&containerizer, flags);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -1421,7 +1420,7 @@ TEST_F(MasterTest, LaunchDuplicateOfferTest)
 
   // Test that same offers cannot be used more than once.
   // Kill 2nd task and get offer for full slave.
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers))
     .WillRepeatedly(Return()); // Ignore subsequent offers.
@@ -1674,7 +1673,7 @@ TEST_F(MasterTest, RecoveredSlaveDoesNotReregister)
 {
   // Step 1: Start a master.
   master::Flags masterFlags = CreateMasterFlags();
-  Try<PID<Master> > master = StartMaster(masterFlags);
+  Try<PID<Master>> master = StartMaster(masterFlags);
   ASSERT_SOME(master);
 
   // Step 2: Start a slave.
@@ -1684,7 +1683,7 @@ TEST_F(MasterTest, RecoveredSlaveDoesNotReregister)
   // Reuse slaveFlags so both StartSlave() use the same work_dir.
   slave::Flags slaveFlags = this->CreateSlaveFlags();
 
-  Try<PID<Slave> > slave = StartSlave(slaveFlags);
+  Try<PID<Slave>> slave = StartSlave(slaveFlags);
   ASSERT_SOME(slave);
 
   AWAIT_READY(slaveRegisteredMessage);
@@ -1750,7 +1749,7 @@ TEST_F(MasterTest, NonStrictRegistryWriteOnly)
   master::Flags masterFlags = CreateMasterFlags();
   masterFlags.registry_strict = false;
 
-  Try<PID<Master> > master = StartMaster(masterFlags);
+  Try<PID<Master>> master = StartMaster(masterFlags);
   ASSERT_SOME(master);
 
   // Step 2: Start a slave.
@@ -1760,7 +1759,7 @@ TEST_F(MasterTest, NonStrictRegistryWriteOnly)
   // Reuse slaveFlags so both StartSlave() use the same work_dir.
   slave::Flags slaveFlags = this->CreateSlaveFlags();
 
-  Try<PID<Slave> > slave = StartSlave(slaveFlags);
+  Try<PID<Slave>> slave = StartSlave(slaveFlags);
   ASSERT_SOME(slave);
 
   AWAIT_READY(slaveRegisteredMessage);
@@ -1827,14 +1826,14 @@ TEST_F(MasterTest, RateLimitRecoveredSlaveRemoval)
 {
   // Start a master.
   master::Flags masterFlags = CreateMasterFlags();
-  Try<PID<Master> > master = StartMaster(masterFlags);
+  Try<PID<Master>> master = StartMaster(masterFlags);
   ASSERT_SOME(master);
 
   Future<SlaveRegisteredMessage> slaveRegisteredMessage =
     FUTURE_PROTOBUF(SlaveRegisteredMessage(), master.get(), _);
 
   // Start a slave.
-  Try<PID<Slave> > slave = StartSlave();
+  Try<PID<Slave>> slave = StartSlave();
   ASSERT_SOME(slave);
 
   AWAIT_READY(slaveRegisteredMessage);
@@ -1902,7 +1901,7 @@ TEST_F(MasterTest, CancelRecoveredSlaveRemoval)
 {
   // Start a master.
   master::Flags masterFlags = CreateMasterFlags();
-  Try<PID<Master> > master = StartMaster(masterFlags);
+  Try<PID<Master>> master = StartMaster(masterFlags);
   ASSERT_SOME(master);
 
   Future<SlaveRegisteredMessage> slaveRegisteredMessage =
@@ -1911,7 +1910,7 @@ TEST_F(MasterTest, CancelRecoveredSlaveRemoval)
   // Reuse slaveFlags so both StartSlave() use the same work_dir.
   slave::Flags slaveFlags = CreateSlaveFlags();
 
-  Try<PID<Slave> > slave = StartSlave(slaveFlags);
+  Try<PID<Slave>> slave = StartSlave(slaveFlags);
   ASSERT_SOME(slave);
 
   AWAIT_READY(slaveRegisteredMessage);
@@ -1994,7 +1993,7 @@ TEST_F(MasterTest, RecoveredSlaveReregisters)
 {
   // Step 1: Start a master.
   master::Flags masterFlags = CreateMasterFlags();
-  Try<PID<Master> > master = StartMaster(masterFlags);
+  Try<PID<Master>> master = StartMaster(masterFlags);
   ASSERT_SOME(master);
 
   // Step 2: Start a slave.
@@ -2004,7 +2003,7 @@ TEST_F(MasterTest, RecoveredSlaveReregisters)
   // Reuse slaveFlags so both StartSlave() use the same work_dir.
   slave::Flags slaveFlags = this->CreateSlaveFlags();
 
-  Try<PID<Slave> > slave = StartSlave(slaveFlags);
+  Try<PID<Slave>> slave = StartSlave(slaveFlags);
   ASSERT_SOME(slave);
 
   AWAIT_READY(slaveRegisteredMessage);
@@ -2150,14 +2149,14 @@ TEST_F(MasterZooKeeperTest, LostZooKeeperCluster)
 TEST_F(MasterTest, OrphanTasks)
 {
   // Start a master.
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
   StandaloneMasterDetector detector (master.get());
 
   // Start a slave.
-  Try<PID<Slave> > slave = StartSlave(&exec, &detector);
+  Try<PID<Slave>> slave = StartSlave(&exec, &detector);
   ASSERT_SOME(slave);
 
   // Create a task on the slave.
@@ -2318,7 +2317,7 @@ TEST_F(MasterTest, OrphanTasks)
 // resource from offers so that frameworks cannot see it.
 TEST_F(MasterTest, IgnoreEphemeralPortsResource)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   string resourcesWithoutEphemeralPorts =
@@ -2330,7 +2329,7 @@ TEST_F(MasterTest, IgnoreEphemeralPortsResource)
   slave::Flags flags = CreateSlaveFlags();
   flags.resources = resourcesWithEphemeralPorts;
 
-  Try<PID<Slave> > slave = StartSlave(flags);
+  Try<PID<Slave>> slave = StartSlave(flags);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -2339,7 +2338,7 @@ TEST_F(MasterTest, IgnoreEphemeralPortsResource)
 
   EXPECT_CALL(sched, registered(&driver, _, _));
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers));
 
@@ -2365,14 +2364,14 @@ TEST_F(MasterTest, MaxExecutorsPerSlave)
   master::Flags flags = CreateMasterFlags();
   flags.max_executors_per_slave = 0;
 
-  Try<PID<Master> > master = StartMaster(flags);
+  Try<PID<Master>> master = StartMaster(flags);
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
 
   TestContainerizer containerizer(&exec);
 
-  Try<PID<Slave> > slave = StartSlave(&containerizer);
+  Try<PID<Slave>> slave = StartSlave(&containerizer);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -2383,7 +2382,7 @@ TEST_F(MasterTest, MaxExecutorsPerSlave)
   EXPECT_CALL(sched, registered(&driver, _, _))
     .WillOnce(FutureArg<2>(&masterInfo));
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .Times(0);
 
@@ -2407,10 +2406,10 @@ TEST_F(MasterTest, OfferTimeout)
 {
   master::Flags masterFlags = MesosTest::CreateMasterFlags();
   masterFlags.offer_timeout = Seconds(30);
-  Try<PID<Master> > master = StartMaster(masterFlags);
+  Try<PID<Master>> master = StartMaster(masterFlags);
   ASSERT_SOME(master);
 
-  Try<PID<Slave> > slave = StartSlave();
+  Try<PID<Slave>> slave = StartSlave();
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -2421,8 +2420,8 @@ TEST_F(MasterTest, OfferTimeout)
   EXPECT_CALL(sched, registered(&driver, _, _))
     .WillOnce(FutureSatisfy(&registered));
 
-  Future<vector<Offer> > offers1;
-  Future<vector<Offer> > offers2;
+  Future<vector<Offer>> offers1;
+  Future<vector<Offer>> offers2;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers1))
     .WillOnce(FutureArg<1>(&offers2));
@@ -2470,14 +2469,14 @@ TEST_F(MasterTest, OfferNotRescindedOnceUsed)
 {
   master::Flags masterFlags = MesosTest::CreateMasterFlags();
   masterFlags.offer_timeout = Seconds(30);
-  Try<PID<Master> > master = StartMaster(masterFlags);
+  Try<PID<Master>> master = StartMaster(masterFlags);
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
 
   TestContainerizer containerizer(&exec);
 
-  Try<PID<Slave> > slave = StartSlave(&containerizer);
+  Try<PID<Slave>> slave = StartSlave(&containerizer);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -2531,14 +2530,14 @@ TEST_F(MasterTest, OfferNotRescindedOnceDeclined)
 {
   master::Flags masterFlags = MesosTest::CreateMasterFlags();
   masterFlags.offer_timeout = Seconds(30);
-  Try<PID<Master> > master = StartMaster(masterFlags);
+  Try<PID<Master>> master = StartMaster(masterFlags);
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
 
   TestContainerizer containerizer(&exec);
 
-  Try<PID<Slave> > slave = StartSlave(&containerizer);
+  Try<PID<Slave>> slave = StartSlave(&containerizer);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -2583,7 +2582,7 @@ TEST_F(MasterTest, OfferNotRescindedOnceDeclined)
 TEST_F(MasterTest, UnacknowledgedTerminalTask)
 {
   master::Flags masterFlags = CreateMasterFlags();
-  Try<PID<Master> > master = StartMaster(masterFlags);
+  Try<PID<Master>> master = StartMaster(masterFlags);
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
@@ -2592,7 +2591,7 @@ TEST_F(MasterTest, UnacknowledgedTerminalTask)
 
   slave::Flags slaveFlags = CreateSlaveFlags();
   slaveFlags.resources = "cpus:1;mem:64";
-  Try<PID<Slave> > slave = StartSlave(&containerizer, slaveFlags);
+  Try<PID<Slave>> slave = StartSlave(&containerizer, slaveFlags);
   ASSERT_SOME(slave);
 
   // Launch a framework and get a task into a terminal state.
@@ -2604,8 +2603,8 @@ TEST_F(MasterTest, UnacknowledgedTerminalTask)
   EXPECT_CALL(sched, registered(&driver, _, _))
     .WillOnce(FutureArg<1>(&frameworkId));
 
-  Future<vector<Offer> > offers1;
-  Future<vector<Offer> > offers2;
+  Future<vector<Offer>> offers1;
+  Future<vector<Offer>> offers2;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(DoAll(FutureArg<1>(&offers1),
                     LaunchTasks(DEFAULT_EXECUTOR_INFO, 1, 1, 64, "*")))
@@ -2662,7 +2661,7 @@ TEST_F(MasterTest, UnacknowledgedTerminalTask)
 // latest state set).
 TEST_F(MasterTest, ReleaseResourcesForTerminalTaskWithPendingUpdates)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
@@ -2671,7 +2670,7 @@ TEST_F(MasterTest, ReleaseResourcesForTerminalTaskWithPendingUpdates)
 
   slave::Flags slaveFlags = CreateSlaveFlags();
   slaveFlags.resources = "cpus:1;mem:64";
-  Try<PID<Slave> > slave = StartSlave(&containerizer, slaveFlags);
+  Try<PID<Slave>> slave = StartSlave(&containerizer, slaveFlags);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -2831,7 +2830,7 @@ TEST_F(MasterTest, StateEndpoint)
 // state.json endpoint, if provided by the framework.
 TEST_F(MasterTest, FrameworkWebUIUrl)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   FrameworkInfo framework = DEFAULT_FRAMEWORK_INFO;

http://git-wip-us.apache.org/repos/asf/mesos/blob/9d6ffb9d/src/tests/mesos.cpp
----------------------------------------------------------------------
diff --git a/src/tests/mesos.cpp b/src/tests/mesos.cpp
index 1dde4fe..bc082e8 100644
--- a/src/tests/mesos.cpp
+++ b/src/tests/mesos.cpp
@@ -16,12 +16,12 @@
  * limitations under the License.
  */
 
+#include <memory>
 #include <string>
 
 #include <stout/check.hpp>
 #include <stout/foreach.hpp>
 #include <stout/json.hpp>
-#include <stout/memory.hpp>
 #include <stout/os.hpp>
 #include <stout/path.hpp>
 #include <stout/result.hpp>
@@ -48,8 +48,7 @@
 #include "tests/flags.hpp"
 #include "tests/mesos.hpp"
 
-using memory::shared_ptr;
-
+using std::shared_ptr;
 using std::string;
 using testing::_;
 using testing::Invoke;
@@ -180,7 +179,7 @@ slave::Flags MesosTest::CreateSlaveFlags()
 }
 
 
-Try<PID<master::Master> > MesosTest::StartMaster(
+Try<PID<master::Master>> MesosTest::StartMaster(
     const Option<master::Flags>& flags)
 {
   return cluster.masters.start(
@@ -188,7 +187,7 @@ Try<PID<master::Master> > MesosTest::StartMaster(
 }
 
 
-Try<PID<master::Master> > MesosTest::StartMaster(
+Try<PID<master::Master>> MesosTest::StartMaster(
     mesos::master::allocator::Allocator* allocator,
     const Option<master::Flags>& flags)
 {
@@ -198,7 +197,7 @@ Try<PID<master::Master> > MesosTest::StartMaster(
 }
 
 
-Try<PID<master::Master> > MesosTest::StartMaster(
+Try<PID<master::Master>> MesosTest::StartMaster(
     Authorizer* authorizer,
     const Option<master::Flags>& flags)
 {
@@ -221,7 +220,7 @@ Try<PID<master::Master>> MesosTest::StartMaster(
 }
 
 
-Try<PID<slave::Slave> > MesosTest::StartSlave(
+Try<PID<slave::Slave>> MesosTest::StartSlave(
     const Option<slave::Flags>& flags)
 {
   return cluster.slaves.start(
@@ -229,13 +228,13 @@ Try<PID<slave::Slave> > MesosTest::StartSlave(
 }
 
 
-Try<PID<slave::Slave> > MesosTest::StartSlave(
+Try<PID<slave::Slave>> MesosTest::StartSlave(
     MockExecutor* executor,
     const Option<slave::Flags>& flags)
 {
   slave::Containerizer* containerizer = new TestContainerizer(executor);
 
-  Try<PID<slave::Slave> > pid = StartSlave(containerizer, flags);
+  Try<PID<slave::Slave>> pid = StartSlave(containerizer, flags);
 
   if (pid.isError()) {
     delete containerizer;
@@ -248,7 +247,7 @@ Try<PID<slave::Slave> > MesosTest::StartSlave(
 }
 
 
-Try<PID<slave::Slave> > MesosTest::StartSlave(
+Try<PID<slave::Slave>> MesosTest::StartSlave(
     slave::Containerizer* containerizer,
     const Option<slave::Flags>& flags)
 {
@@ -258,7 +257,7 @@ Try<PID<slave::Slave> > MesosTest::StartSlave(
 }
 
 
-Try<PID<slave::Slave> > MesosTest::StartSlave(
+Try<PID<slave::Slave>> MesosTest::StartSlave(
     slave::Containerizer* containerizer,
     MasterDetector* detector,
     const Option<slave::Flags>& flags)
@@ -270,7 +269,7 @@ Try<PID<slave::Slave> > MesosTest::StartSlave(
 }
 
 
-Try<PID<slave::Slave> > MesosTest::StartSlave(
+Try<PID<slave::Slave>> MesosTest::StartSlave(
     MasterDetector* detector,
     const Option<slave::Flags>& flags)
 {
@@ -281,7 +280,7 @@ Try<PID<slave::Slave> > MesosTest::StartSlave(
 }
 
 
-Try<PID<slave::Slave> > MesosTest::StartSlave(
+Try<PID<slave::Slave>> MesosTest::StartSlave(
     MasterDetector* detector,
     slave::GarbageCollector* gc,
     const Option<slave::Flags>& flags)
@@ -294,14 +293,14 @@ Try<PID<slave::Slave> > MesosTest::StartSlave(
 }
 
 
-Try<PID<slave::Slave> > MesosTest::StartSlave(
+Try<PID<slave::Slave>> MesosTest::StartSlave(
     MockExecutor* executor,
     MasterDetector* detector,
     const Option<slave::Flags>& flags)
 {
   slave::Containerizer* containerizer = new TestContainerizer(executor);
 
-  Try<PID<slave::Slave> > pid = cluster.slaves.start(
+  Try<PID<slave::Slave>> pid = cluster.slaves.start(
       flags.isNone() ? CreateSlaveFlags() : flags.get(),
           containerizer,
       detector);
@@ -494,7 +493,7 @@ void ContainerizerTest<slave::MesosContainerizer>::SetUpTestCase()
 
   if (cgroups::enabled() && user.get() == "root") {
     // Clean up any testing hierarchies.
-    Try<std::set<string> > hierarchies = cgroups::hierarchies();
+    Try<std::set<string>> hierarchies = cgroups::hierarchies();
     ASSERT_SOME(hierarchies);
     foreach (const string& hierarchy, hierarchies.get()) {
       if (strings::startsWith(hierarchy, TEST_CGROUPS_HIERARCHY)) {
@@ -512,7 +511,7 @@ void ContainerizerTest<slave::MesosContainerizer>::TearDownTestCase()
 
   if (cgroups::enabled() && user.get() == "root") {
     // Clean up any testing hierarchies.
-    Try<std::set<string> > hierarchies = cgroups::hierarchies();
+    Try<std::set<string>> hierarchies = cgroups::hierarchies();
     ASSERT_SOME(hierarchies);
     foreach (const string& hierarchy, hierarchies.get()) {
       if (strings::startsWith(hierarchy, TEST_CGROUPS_HIERARCHY)) {
@@ -604,7 +603,7 @@ void ContainerizerTest<slave::MesosContainerizer>::TearDown()
     foreach (const string& subsystem, subsystems) {
       string hierarchy = path::join(baseHierarchy, subsystem);
 
-      Try<std::vector<string> > cgroups = cgroups::get(hierarchy);
+      Try<std::vector<string>> cgroups = cgroups::get(hierarchy);
       CHECK_SOME(cgroups);
 
       foreach (const string& cgroup, cgroups.get()) {

http://git-wip-us.apache.org/repos/asf/mesos/blob/9d6ffb9d/src/tests/mesos.hpp
----------------------------------------------------------------------
diff --git a/src/tests/mesos.hpp b/src/tests/mesos.hpp
index 4294e28..19db712 100644
--- a/src/tests/mesos.hpp
+++ b/src/tests/mesos.hpp
@@ -20,6 +20,7 @@
 #define __TESTS_MESOS_HPP__
 
 #include <map>
+#include <memory>
 #include <set>
 #include <string>
 #include <vector>
@@ -113,7 +114,7 @@ protected:
 
   // Starts a master with a slave removal rate limiter and flags.
   virtual Try<process::PID<master::Master> > StartMaster(
-      const memory::shared_ptr<MockRateLimiter>& slaveRemovalLimiter,
+      const std::shared_ptr<MockRateLimiter>& slaveRemovalLimiter,
       const Option<master::Flags>& flags = None());
 
   // TODO(bmahler): Consider adding a builder style interface, e.g.

http://git-wip-us.apache.org/repos/asf/mesos/blob/9d6ffb9d/src/tests/scheduler_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/scheduler_tests.cpp b/src/tests/scheduler_tests.cpp
index 9a2f306..cbe6c91 100644
--- a/src/tests/scheduler_tests.cpp
+++ b/src/tests/scheduler_tests.cpp
@@ -18,6 +18,7 @@
 
 #include <gmock/gmock.h>
 
+#include <memory>
 #include <string>
 #include <queue>
 #include <vector>
@@ -39,7 +40,6 @@
 
 #include <stout/json.hpp>
 #include <stout/lambda.hpp>
-#include <stout/memory.hpp>
 #include <stout/try.hpp>
 #include <stout/uuid.hpp>
 

http://git-wip-us.apache.org/repos/asf/mesos/blob/9d6ffb9d/src/tests/slave_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/slave_tests.cpp b/src/tests/slave_tests.cpp
index b826000..04e79ec 100644
--- a/src/tests/slave_tests.cpp
+++ b/src/tests/slave_tests.cpp
@@ -22,6 +22,7 @@
 
 #include <algorithm>
 #include <map>
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -34,7 +35,6 @@
 #include <process/pid.hpp>
 #include <process/subprocess.hpp>
 
-#include <stout/memory.hpp>
 #include <stout/option.hpp>
 #include <stout/os.hpp>
 #include <stout/try.hpp>
@@ -59,8 +59,6 @@
 #include "tests/limiter.hpp"
 #include "tests/mesos.hpp"
 
-using memory::shared_ptr;
-
 using namespace mesos::internal::slave;
 
 using mesos::internal::master::Master;
@@ -72,6 +70,7 @@ using process::Promise;
 using process::UPID;
 
 using std::map;
+using std::shared_ptr;
 using std::string;
 using std::vector;
 
@@ -100,7 +99,7 @@ class SlaveTest : public MesosTest {};
 
 TEST_F(SlaveTest, ShutdownUnregisteredExecutor)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   // Need flags for 'executor_registration_timeout'.
@@ -114,7 +113,7 @@ TEST_F(SlaveTest, ShutdownUnregisteredExecutor)
     MesosContainerizer::create(flags, false, &fetcher);
   CHECK_SOME(containerizer);
 
-  Try<PID<Slave> > slave = StartSlave(containerizer.get());
+  Try<PID<Slave>> slave = StartSlave(containerizer.get());
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -124,7 +123,7 @@ TEST_F(SlaveTest, ShutdownUnregisteredExecutor)
   EXPECT_CALL(sched, registered(&driver, _, _))
     .Times(1);
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers))
     .WillRepeatedly(Return()); // Ignore subsequent offers.
@@ -196,14 +195,14 @@ TEST_F(SlaveTest, ShutdownUnregisteredExecutor)
 // registering with slave, it is properly cleaned up.
 TEST_F(SlaveTest, RemoveUnregisteredTerminatedExecutor)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
 
   TestContainerizer containerizer(&exec);
 
-  Try<PID<Slave> > slave = StartSlave(&containerizer);
+  Try<PID<Slave>> slave = StartSlave(&containerizer);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -213,7 +212,7 @@ TEST_F(SlaveTest, RemoveUnregisteredTerminatedExecutor)
   EXPECT_CALL(sched, registered(&driver, _, _))
     .Times(1);
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers))
     .WillRepeatedly(Return()); // Ignore subsequent offers.
@@ -274,12 +273,12 @@ TEST_F(SlaveTest, RemoveUnregisteredTerminatedExecutor)
 // command to use via the --override argument.
 TEST_F(SlaveTest, CommandExecutorWithOverride)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   TestContainerizer containerizer;
 
-  Try<PID<Slave> > slave = StartSlave(&containerizer);
+  Try<PID<Slave>> slave = StartSlave(&containerizer);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -289,7 +288,7 @@ TEST_F(SlaveTest, CommandExecutorWithOverride)
   EXPECT_CALL(sched, registered(&driver, _, _))
     .Times(1);
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers))
     .WillRepeatedly(Return()); // Ignore subsequent offers.
@@ -412,7 +411,7 @@ TEST_F(SlaveTest, CommandExecutorWithOverride)
 // This assumes the ability to execute '/bin/echo --author'.
 TEST_F(SlaveTest, ComamndTaskWithArguments)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   // Need flags for 'executor_registration_timeout'.
@@ -425,7 +424,7 @@ TEST_F(SlaveTest, ComamndTaskWithArguments)
     MesosContainerizer::create(flags, false, &fetcher);
   CHECK_SOME(containerizer);
 
-  Try<PID<Slave> > slave = StartSlave(containerizer.get());
+  Try<PID<Slave>> slave = StartSlave(containerizer.get());
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -435,7 +434,7 @@ TEST_F(SlaveTest, ComamndTaskWithArguments)
   EXPECT_CALL(sched, registered(&driver, _, _))
     .Times(1);
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers))
     .WillRepeatedly(Return()); // Ignore subsequent offers.
@@ -540,7 +539,7 @@ TEST_F(SlaveTest, GetExecutorInfo)
 // slave user (in this case, root).
 TEST_F(SlaveTest, ROOT_RunTaskWithCommandInfoWithoutUser)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   // Need flags for 'executor_registration_timeout'.
@@ -553,7 +552,7 @@ TEST_F(SlaveTest, ROOT_RunTaskWithCommandInfoWithoutUser)
     MesosContainerizer::create(flags, false, &fetcher);
   CHECK_SOME(containerizer);
 
-  Try<PID<Slave> > slave = StartSlave(containerizer.get());
+  Try<PID<Slave>> slave = StartSlave(containerizer.get());
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -563,7 +562,7 @@ TEST_F(SlaveTest, ROOT_RunTaskWithCommandInfoWithoutUser)
   EXPECT_CALL(sched, registered(&driver, _, _))
     .Times(1);
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers))
     .WillRepeatedly(Return()); // Ignore subsequent offers.
@@ -640,7 +639,7 @@ TEST_F(SlaveTest, DISABLED_ROOT_RunTaskWithCommandInfoWithUser)
     return;
   }
 
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   // Need flags for 'executor_registration_timeout'.
@@ -653,7 +652,7 @@ TEST_F(SlaveTest, DISABLED_ROOT_RunTaskWithCommandInfoWithUser)
     MesosContainerizer::create(flags, false, &fetcher);
   CHECK_SOME(containerizer);
 
-  Try<PID<Slave> > slave = StartSlave(containerizer.get());
+  Try<PID<Slave>> slave = StartSlave(containerizer.get());
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -663,7 +662,7 @@ TEST_F(SlaveTest, DISABLED_ROOT_RunTaskWithCommandInfoWithUser)
   EXPECT_CALL(sched, registered(&driver, _, _))
     .Times(1);
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers))
     .WillRepeatedly(Return()); // Ignore subsequent offers.
@@ -724,12 +723,12 @@ TEST_F(SlaveTest, DISABLED_ROOT_RunTaskWithCommandInfoWithUser)
 // non-leading master is ignored.
 TEST_F(SlaveTest, IgnoreNonLeaderStatusUpdateAcknowledgement)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
 
-  Try<PID<Slave> > slave = StartSlave(&exec);
+  Try<PID<Slave>> slave = StartSlave(&exec);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -739,7 +738,7 @@ TEST_F(SlaveTest, IgnoreNonLeaderStatusUpdateAcknowledgement)
   EXPECT_CALL(sched, registered(&schedDriver, _, _))
     .Times(1);
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&schedDriver, _))
     .WillOnce(FutureArg<1>(&offers))
     .WillRepeatedly(Return()); // Ignore subsequent offers.
@@ -887,7 +886,7 @@ TEST_F(SlaveTest, MetricsInMetricsEndpoint)
 
 TEST_F(SlaveTest, StateEndpoint)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   slave::Flags flags = this->CreateSlaveFlags();
@@ -899,7 +898,7 @@ TEST_F(SlaveTest, StateEndpoint)
   // Capture the start time deterministically.
   Clock::pause();
 
-  Try<PID<Slave> > slave = StartSlave(flags);
+  Try<PID<Slave>> slave = StartSlave(flags);
   ASSERT_SOME(slave);
 
   Future<http::Response> response = http::get(slave.get(), "state.json");
@@ -981,7 +980,7 @@ TEST_F(SlaveTest, StateEndpoint)
 TEST_F(SlaveTest, TerminatingSlaveDoesNotReregister)
 {
   // Start a master.
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   // Create a MockExecutor to enable us to catch
@@ -1000,7 +999,7 @@ TEST_F(SlaveTest, TerminatingSlaveDoesNotReregister)
   flags.executor_shutdown_grace_period = slave::REGISTER_RETRY_INTERVAL_MAX * 2;
 
   // Start a slave.
-  Try<PID<Slave> > slave = StartSlave(&exec, &detector, flags);
+  Try<PID<Slave>> slave = StartSlave(&exec, &detector, flags);
   ASSERT_SOME(slave);
 
   // Create a task on the slave.
@@ -1081,7 +1080,7 @@ TEST_F(SlaveTest, TerminatingSlaveDoesNotReregister)
 TEST_F(SlaveTest, TerminalTaskContainerizerUpdateFails)
 {
   // Start a master.
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
@@ -1090,7 +1089,7 @@ TEST_F(SlaveTest, TerminalTaskContainerizerUpdateFails)
   TestContainerizer containerizer(&exec);
 
   // Start a slave.
-  Try<PID<Slave> > slave = StartSlave(&containerizer);
+  Try<PID<Slave>> slave = StartSlave(&containerizer);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -1099,7 +1098,7 @@ TEST_F(SlaveTest, TerminalTaskContainerizerUpdateFails)
 
   EXPECT_CALL(sched, registered(_, _, _));
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
 
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers))
@@ -1178,7 +1177,7 @@ TEST_F(SlaveTest, TerminalTaskContainerizerUpdateFails)
 TEST_F(SlaveTest, ContainerUpdatedBeforeTaskReachesExecutor)
 {
   // Start a master.
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
@@ -1187,7 +1186,7 @@ TEST_F(SlaveTest, ContainerUpdatedBeforeTaskReachesExecutor)
   TestContainerizer containerizer(&exec);
 
   // Start a slave.
-  Try<PID<Slave> > slave = StartSlave(&containerizer);
+  Try<PID<Slave>> slave = StartSlave(&containerizer);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -1235,7 +1234,7 @@ TEST_F(SlaveTest, ContainerUpdatedBeforeTaskReachesExecutor)
 TEST_F(SlaveTest, TaskLaunchContainerizerUpdateFails)
 {
   // Start a master.
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
@@ -1243,7 +1242,7 @@ TEST_F(SlaveTest, TaskLaunchContainerizerUpdateFails)
   TestContainerizer containerizer(&exec);
 
   // Start a slave.
-  Try<PID<Slave> > slave = StartSlave(&containerizer);
+  Try<PID<Slave>> slave = StartSlave(&containerizer);
   ASSERT_SOME(slave);
 
   MockScheduler sched;
@@ -1289,7 +1288,7 @@ TEST_F(SlaveTest, TaskLaunchContainerizerUpdateFails)
 TEST_F(SlaveTest, PingTimeoutNoPings)
 {
   // Start a master.
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   // Block all pings to the slave.
@@ -1299,7 +1298,7 @@ TEST_F(SlaveTest, PingTimeoutNoPings)
     FUTURE_PROTOBUF(SlaveRegisteredMessage(), _, _);
 
   // Start a slave.
-  Try<PID<Slave> > slave = StartSlave();
+  Try<PID<Slave>> slave = StartSlave();
   ASSERT_SOME(slave);
 
   AWAIT_READY(slaveRegisteredMessage);
@@ -1328,14 +1327,14 @@ TEST_F(SlaveTest, PingTimeoutNoPings)
 TEST_F(SlaveTest, PingTimeoutSomePings)
 {
   // Start a master.
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   Future<SlaveRegisteredMessage> slaveRegisteredMessage =
     FUTURE_PROTOBUF(SlaveRegisteredMessage(), _, _);
 
   // Start a slave.
-  Try<PID<Slave> > slave = StartSlave();
+  Try<PID<Slave>> slave = StartSlave();
   ASSERT_SOME(slave);
 
   AWAIT_READY(slaveRegisteredMessage);
@@ -1373,7 +1372,7 @@ TEST_F(SlaveTest, RateLimitSlaveShutdown)
 {
   // Start a master.
   shared_ptr<MockRateLimiter> slaveRemovalLimiter(new MockRateLimiter());
-  Try<PID<Master> > master = StartMaster(slaveRemovalLimiter);
+  Try<PID<Master>> master = StartMaster(slaveRemovalLimiter);
   ASSERT_SOME(master);
 
   // Set these expectations up before we spawn the slave so that we
@@ -1387,7 +1386,7 @@ TEST_F(SlaveTest, RateLimitSlaveShutdown)
     FUTURE_PROTOBUF(SlaveRegisteredMessage(), _, _);
 
   // Start a slave.
-  Try<PID<Slave> > slave = StartSlave();
+  Try<PID<Slave>> slave = StartSlave();
   ASSERT_SOME(slave);
 
   AWAIT_READY(slaveRegisteredMessage);
@@ -1436,7 +1435,7 @@ TEST_F(SlaveTest, CancelSlaveShutdown)
 {
   // Start a master.
   shared_ptr<MockRateLimiter> slaveRemovalLimiter(new MockRateLimiter());
-  Try<PID<Master> > master = StartMaster(slaveRemovalLimiter);
+  Try<PID<Master>> master = StartMaster(slaveRemovalLimiter);
   ASSERT_SOME(master);
 
   // Set these expectations up before we spawn the slave so that we
@@ -1453,7 +1452,7 @@ TEST_F(SlaveTest, CancelSlaveShutdown)
     FUTURE_PROTOBUF(SlaveRegisteredMessage(), _, _);
 
   // Start a slave.
-  Try<PID<Slave> > slave = StartSlave();
+  Try<PID<Slave>> slave = StartSlave();
   ASSERT_SOME(slave);
 
   AWAIT_READY(slaveRegisteredMessage);
@@ -1511,7 +1510,7 @@ TEST_F(SlaveTest, CancelSlaveShutdown)
 // called. See MESOS-1945.
 TEST_F(SlaveTest, KillTaskBetweenRunTaskParts)
 {
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
@@ -1530,7 +1529,7 @@ TEST_F(SlaveTest, KillTaskBetweenRunTaskParts)
   EXPECT_CALL(sched, registered(&driver, _, _))
     .Times(1);
 
-  Future<vector<Offer> > offers;
+  Future<vector<Offer>> offers;
   EXPECT_CALL(sched, resourceOffers(&driver, _))
     .WillOnce(FutureArg<1>(&offers))
     .WillRepeatedly(Return()); // Ignore subsequent offers.
@@ -1622,7 +1621,7 @@ TEST_F(SlaveTest, KillTaskBetweenRunTaskParts)
 TEST_F(SlaveTest, ReregisterWithStatusUpdateTaskState)
 {
   // Start a master.
-  Try<PID<Master> > master = StartMaster();
+  Try<PID<Master>> master = StartMaster();
   ASSERT_SOME(master);
 
   MockExecutor exec(DEFAULT_EXECUTOR_ID);
@@ -1632,7 +1631,7 @@ TEST_F(SlaveTest, ReregisterWithStatusUpdateTaskState)
   StandaloneMasterDetector detector(master.get());
 
   // Start a slave.
-  Try<PID<Slave> > slave = StartSlave(&exec, &detector);
+  Try<PID<Slave>> slave = StartSlave(&exec, &detector);
   ASSERT_SOME(slave);
 
   MockScheduler sched;