You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mesos.apache.org by ya...@apache.org on 2016/12/01 21:54:30 UTC

mesos git commit: Added additional unit tests for shared resources.

Repository: mesos
Updated Branches:
  refs/heads/master 9622bc9eb -> 1f571b347


Added additional unit tests for shared resources.

Review: https://reviews.apache.org/r/52295/


Project: http://git-wip-us.apache.org/repos/asf/mesos/repo
Commit: http://git-wip-us.apache.org/repos/asf/mesos/commit/1f571b34
Tree: http://git-wip-us.apache.org/repos/asf/mesos/tree/1f571b34
Diff: http://git-wip-us.apache.org/repos/asf/mesos/diff/1f571b34

Branch: refs/heads/master
Commit: 1f571b347ba3a9afae8001826a39d5ed83bb531e
Parents: 9622bc9
Author: Anindya Sinha <an...@apple.com>
Authored: Thu Dec 1 11:18:05 2016 -0800
Committer: Jiang Yan Xu <xu...@apple.com>
Committed: Thu Dec 1 13:48:58 2016 -0800

----------------------------------------------------------------------
 src/tests/hierarchical_allocator_tests.cpp | 101 +++++++
 src/tests/master_validation_tests.cpp      |  33 +++
 src/tests/persistent_volume_tests.cpp      | 130 ++++++++-
 src/tests/sorter_tests.cpp                 | 357 ++++++++++++++++++------
 4 files changed, 530 insertions(+), 91 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/mesos/blob/1f571b34/src/tests/hierarchical_allocator_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/hierarchical_allocator_tests.cpp b/src/tests/hierarchical_allocator_tests.cpp
index 3b0eb75..f9dcdbf 100644
--- a/src/tests/hierarchical_allocator_tests.cpp
+++ b/src/tests/hierarchical_allocator_tests.cpp
@@ -1453,6 +1453,7 @@ TEST_F(HierarchicalAllocatorTest, UpdateAvailableFail)
   AWAIT_EXPECT_FAILED(update);
 }
 
+
 // This test ensures that when oversubscribed resources are updated
 // subsequent allocations properly account for that.
 TEST_F(HierarchicalAllocatorTest, UpdateSlave)
@@ -3528,6 +3529,106 @@ TEST_F(HierarchicalAllocatorTest, ReviveOffers)
 }
 
 
+class HierarchicalAllocatorTestWithParam
+  : public HierarchicalAllocatorTestBase,
+    public WithParamInterface<bool> {};
+
+
+// The HierarchicalAllocatorTestWithParam tests are parameterized by a
+// flag which indicates if quota is involved (true) or not (false).
+// TODO(anindya_sinha): Move over more allocator tests that make sense to run
+// both when the role is quota'ed and not.
+INSTANTIATE_TEST_CASE_P(
+    QuotaSwitch,
+    HierarchicalAllocatorTestWithParam,
+    ::testing::Bool());
+
+
+// Tests that shared resources are only offered to frameworks one by one.
+// Note that shared resources are offered even if they are in use.
+TEST_P(HierarchicalAllocatorTestWithParam, AllocateSharedResources)
+{
+  Clock::pause();
+
+  initialize();
+
+  // Create 2 frameworks which have opted in for SHARED_RESOURCES.
+  FrameworkInfo framework1 = createFrameworkInfo(
+      "role1",
+      {FrameworkInfo::Capability::SHARED_RESOURCES});
+  allocator->addFramework(framework1.id(), framework1, {});
+
+  FrameworkInfo framework2 = createFrameworkInfo(
+      "role1",
+      {FrameworkInfo::Capability::SHARED_RESOURCES});
+  allocator->addFramework(framework2.id(), framework2, {});
+
+  if (GetParam()) {
+    // Assign a quota.
+    const Quota quota = createQuota("role1", "cpus:8;mem:2048;disk:4096");
+    allocator->setQuota("role1", quota);
+  }
+
+  SlaveInfo slave = createSlaveInfo("cpus:4;mem:1024;disk(role1):2048");
+  allocator->addSlave(slave.id(), slave, None(), slave.resources(), {});
+
+  // Initially, all the resources are allocated to `framework1`.
+  Future<Allocation> allocation = allocations.get();
+  AWAIT_READY(allocation);
+  EXPECT_EQ(framework1.id(), allocation.get().frameworkId);
+  EXPECT_EQ(1u, allocation.get().resources.size());
+  EXPECT_TRUE(allocation.get().resources.contains(slave.id()));
+  EXPECT_EQ(slave.resources(), Resources::sum(allocation.get().resources));
+
+  // Create a shared volume.
+  Resource volume = createDiskResource(
+      "5", "role1", "id1", None(), None(), true);
+  Offer::Operation create = CREATE(volume);
+
+  // Launch a task using the shared volume.
+  TaskInfo task = createTask(
+      slave.id(),
+      Resources::parse("cpus:1;mem:5").get() + volume,
+      "echo abc > path1/file");
+  Offer::Operation launch = LAUNCH({task});
+
+  // Ensure the CREATE operation can be applied.
+  Try<Resources> updated =
+    Resources::sum(allocation.get().resources).apply(create);
+
+  ASSERT_SOME(updated);
+
+  // Update the allocation in the allocator with a CREATE and a LAUNCH
+  // (with one task using the created shared volume) operation.
+  allocator->updateAllocation(
+      framework1.id(),
+      slave.id(),
+      Resources::sum(allocation.get().resources),
+      {create, launch});
+
+  // Now recover the resources, and expect the next allocation to contain
+  // the updated resources. Note that the volume is not recovered as it is
+  // used by the task (but it is still offerable because it is shared).
+  allocator->recoverResources(
+      framework1.id(),
+      slave.id(),
+      updated.get() - task.resources(),
+      None());
+
+  // The offer to 'framework2` should contain the shared volume.
+  Clock::advance(flags.allocation_interval);
+
+  allocation = allocations.get();
+  AWAIT_READY(allocation);
+  EXPECT_EQ(framework2.id(), allocation.get().frameworkId);
+  EXPECT_EQ(1u, allocation.get().resources.size());
+  EXPECT_TRUE(allocation.get().resources.contains(slave.id()));
+  EXPECT_EQ(
+      allocation.get().resources.get(slave.id()).get().shared(),
+      Resources(volume));
+}
+
+
 class HierarchicalAllocator_BENCHMARK_Test
   : public HierarchicalAllocatorTestBase,
     public WithParamInterface<std::tr1::tuple<size_t, size_t>> {};

http://git-wip-us.apache.org/repos/asf/mesos/blob/1f571b34/src/tests/master_validation_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/master_validation_tests.cpp b/src/tests/master_validation_tests.cpp
index f893067..5c44967 100644
--- a/src/tests/master_validation_tests.cpp
+++ b/src/tests/master_validation_tests.cpp
@@ -741,6 +741,39 @@ TEST_F(DestroyOperationValidationTest, SharedPersistentVolumeInUse)
 }
 
 
+// This test verifies that DESTROY for shared persistent volumes is valid
+// when the volumes are not assigned to any pending task.
+TEST_F(DestroyOperationValidationTest, SharedPersistentVolumeInPendingTasks)
+{
+  Resource cpus = Resources::parse("cpus", "1", "*").get();
+  Resource mem = Resources::parse("mem", "5", "*").get();
+  Resource sharedDisk = createDiskResource(
+      "50", "role1", "1", "path1", None(), true); // Shared.
+
+  SlaveID slaveId;
+  slaveId.set_value("S1");
+
+  // Add a task using shared volume as pending tasks.
+  TaskInfo task = createTask(slaveId, sharedDisk, "sleep 1000");
+
+  hashmap<FrameworkID, hashmap<TaskID, TaskInfo>> pendingTasks;
+  FrameworkID frameworkId1;
+  frameworkId1.set_value("id1");
+  pendingTasks[frameworkId1] = {{task.task_id(), task}};
+
+  // Destroy `sharedDisk` which is assigned to `task`.
+  Offer::Operation::Destroy destroy;
+  destroy.add_volumes()->CopyFrom(sharedDisk);
+
+  EXPECT_SOME(operation::validate(destroy, sharedDisk, {}, pendingTasks));
+
+  // Remove all pending tasks.
+  pendingTasks.clear();
+
+  EXPECT_NONE(operation::validate(destroy, sharedDisk, {}, pendingTasks));
+}
+
+
 TEST_F(DestroyOperationValidationTest, UnknownPersistentVolume)
 {
   Resource volume = Resources::parse("disk", "128", "role1").get();

http://git-wip-us.apache.org/repos/asf/mesos/blob/1f571b34/src/tests/persistent_volume_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/persistent_volume_tests.cpp b/src/tests/persistent_volume_tests.cpp
index 8651b2c..b7c3137 100644
--- a/src/tests/persistent_volume_tests.cpp
+++ b/src/tests/persistent_volume_tests.cpp
@@ -945,10 +945,10 @@ TEST_P(PersistentVolumeTest, SharedPersistentVolumeMultipleTasks)
       frameworkInfo.principal(),
       true); // Shared.
 
+  // Create 2 tasks which write distinct files in the shared volume.
   Try<Resources> taskResources1 = Resources::parse(
       "cpus:1;mem:128;disk(" + string(DEFAULT_TEST_ROLE) + "):32");
 
-  // Create 2 tasks which write distinct files in the shared volume.
   TaskInfo task1 = createTask(
       offer.slave_id(),
       taskResources1.get() + volume,
@@ -1052,7 +1052,7 @@ TEST_P(PersistentVolumeTest, SharedPersistentVolumeRescindOnDestroy)
       "path1",
       None(),
       frameworkInfo1.principal(),
-      true);  // Shared volume.
+      true); // Shared volume.
 
   // Create a task which uses a portion of the offered resources, so that
   // the remaining resources can be offered to framework2. It's not important
@@ -1151,6 +1151,132 @@ TEST_P(PersistentVolumeTest, SharedPersistentVolumeRescindOnDestroy)
 }
 
 
+// This test verifies that the master recovers after a failover and
+// re-offers the shared persistent volume when tasks using the same
+// volume are still running.
+TEST_P(PersistentVolumeTest, SharedPersistentVolumeMasterFailover)
+{
+  Try<Owned<cluster::Master>> master = StartMaster();
+  ASSERT_SOME(master);
+
+  StandaloneMasterDetector detector(master.get()->pid);
+
+  slave::Flags slaveFlags = CreateSlaveFlags();
+  slaveFlags.resources = getSlaveResources();
+
+  Try<Owned<cluster::Slave>> slave = StartSlave(&detector, slaveFlags);
+  ASSERT_SOME(slave);
+
+  // Create the framework with SHARED_RESOURCES capability.
+  FrameworkInfo frameworkInfo = DEFAULT_FRAMEWORK_INFO;
+  frameworkInfo.set_role(DEFAULT_TEST_ROLE);
+  frameworkInfo.add_capabilities()->set_type(
+      FrameworkInfo::Capability::SHARED_RESOURCES);
+
+  MockScheduler sched;
+  TestingMesosSchedulerDriver driver(&sched, &detector, frameworkInfo);
+
+  EXPECT_CALL(sched, registered(&driver, _, _));
+
+  Future<vector<Offer>> offers1;
+  EXPECT_CALL(sched, resourceOffers(&driver, _))
+    .WillOnce(FutureArg<1>(&offers1))
+    .WillRepeatedly(Return()); // Ignore subsequent offers.
+
+  driver.start();
+
+  AWAIT_READY(offers1);
+  EXPECT_FALSE(offers1.get().empty());
+
+  Offer offer1 = offers1.get()[0];
+
+  Resource volume = createPersistentVolume(
+      getDiskResource(Megabytes(2048)),
+      "id1",
+      "path1",
+      None(),
+      frameworkInfo.principal(),
+      true); // Shared volume.
+
+  Try<Resources> taskResources = Resources::parse("cpus:0.5;mem:512");
+
+  TaskInfo task1 = createTask(
+      offer1.slave_id(),
+      taskResources.get() + volume,
+      "sleep 1000");
+
+  TaskInfo task2 = createTask(
+      offer1.slave_id(),
+      taskResources.get() + volume,
+      "sleep 1000");
+
+  // We should receive a TASK_RUNNING for each of the tasks.
+  Future<TaskStatus> status1;
+  Future<TaskStatus> status2;
+
+  EXPECT_CALL(sched, statusUpdate(&driver, _))
+    .WillOnce(FutureArg<1>(&status1))
+    .WillOnce(FutureArg<1>(&status2));
+
+  Future<CheckpointResourcesMessage> checkpointResources =
+    FUTURE_PROTOBUF(CheckpointResourcesMessage(), _, slave.get()->pid);
+
+  driver.acceptOffers(
+      {offer1.id()},
+      {CREATE(volume),
+       LAUNCH({task1, task2})});
+
+  AWAIT_READY(checkpointResources);
+  AWAIT_READY(status1);
+  EXPECT_EQ(TASK_RUNNING, status1.get().state());
+
+  AWAIT_READY(status2);
+  EXPECT_EQ(TASK_RUNNING, status2.get().state());
+
+  // This is to make sure CheckpointResourcesMessage is processed.
+  Clock::pause();
+  Clock::settle();
+  Clock::resume();
+
+  EXPECT_CALL(sched, disconnected(&driver));
+
+  // Simulate failed over master by restarting the master.
+  master->reset();
+
+  EXPECT_CALL(sched, registered(&driver, _, _));
+
+  Future<SlaveReregisteredMessage> slaveReregistered =
+    FUTURE_PROTOBUF(SlaveReregisteredMessage(), _, _);
+
+  Future<vector<Offer>> offers2;
+  EXPECT_CALL(sched, resourceOffers(&driver, _))
+    .WillOnce(FutureArg<1>(&offers2))
+    .WillRepeatedly(Return()); // Ignore subsequent offers.
+
+  master = StartMaster();
+  ASSERT_SOME(master);
+
+  // Simulate a new master detected event on the slave so that the
+  // slave will do a re-registration.
+  detector.appoint(master.get()->pid);
+
+  AWAIT_READY(slaveReregistered);
+
+  AWAIT_READY(offers2);
+  EXPECT_FALSE(offers2.get().empty());
+
+  Offer offer2 = offers2.get()[0];
+
+  // Verify the offer from the failed over master.
+  EXPECT_TRUE(Resources(offer2.resources()).contains(
+      Resources::parse("cpus:1;mem:1024").get()));
+  EXPECT_TRUE(Resources(offer2.resources()).contains(volume));
+
+  driver.stop();
+  driver.join();
+}
+
+
 // This test verifies that persistent volumes are recovered properly
 // after the slave restarts. The idea is to launch a command which
 // keeps testing if the persistent volume exists, and fails if it does

http://git-wip-us.apache.org/repos/asf/mesos/blob/1f571b34/src/tests/sorter_tests.cpp
----------------------------------------------------------------------
diff --git a/src/tests/sorter_tests.cpp b/src/tests/sorter_tests.cpp
index 9f48abe..a57a4fa 100644
--- a/src/tests/sorter_tests.cpp
+++ b/src/tests/sorter_tests.cpp
@@ -486,6 +486,274 @@ TEST(SorterTest, RevocableResources)
 }
 
 
+
+// This test verifies that shared resources are properly accounted for in
+// the DRF sorter.
+TEST(SorterTest, SharedResources)
+{
+  DRFSorter sorter;
+
+  SlaveID slaveId;
+  slaveId.set_value("agentId");
+
+  Resource sharedDisk = createDiskResource(
+      "100", "role1", "id1", "path1", None(), true);
+
+  // Set totalResources to have disk of 1000, with disk 100 being shared.
+  Resources totalResources = Resources::parse(
+      "cpus:100;mem:100;disk(role1):900").get();
+  totalResources += sharedDisk;
+
+  sorter.add(slaveId, totalResources);
+
+  // Verify sort() works when shared resources are in the allocations.
+  sorter.add("a");
+  Resources aResources = Resources::parse("cpus:5;mem:5").get();
+  aResources += sharedDisk;
+  sorter.allocated("a", slaveId, aResources);
+
+  sorter.add("b");
+  Resources bResources = Resources::parse("cpus:6;mem:6").get();
+  sorter.allocated("b", slaveId, bResources);
+
+  // Shares: a = .1 (dominant: disk), b = .06 (dominant: cpus).
+  EXPECT_EQ(vector<string>({"b", "a"}), sorter.sort());
+
+  sorter.add("c");
+  Resources cResources = Resources::parse("cpus:1;mem:1").get();
+  cResources += sharedDisk;
+  sorter.allocated("c", slaveId, cResources);
+
+  // 'a' and 'c' share the same persistent volume which is the
+  // dominant resource for both of these clients.
+  // Shares: a = .1 (dominant: disk), b = .06 (dominant: cpus),
+  //         c = .1 (dominant: disk).
+  EXPECT_EQ(vector<string>({"b", "a", "c"}), sorter.sort());
+
+  sorter.remove("a");
+  Resources bUnallocated = Resources::parse("cpus:4;mem:4").get();
+  sorter.unallocated("b", slaveId, bUnallocated);
+
+  // Shares: b = .02 (dominant: cpus), c = .1 (dominant: disk).
+  EXPECT_EQ(vector<string>({"b", "c"}), sorter.sort());
+
+  sorter.add("d");
+  Resources dResources = Resources::parse("cpus:1;mem:5").get();
+  dResources += sharedDisk;
+  sorter.allocated("d", slaveId, dResources);
+
+  // Shares: b = .02 (dominant: cpus), c = .1 (dominant: disk),
+  //         d = .1 (dominant: disk).
+  EXPECT_EQ(vector<string>({"b", "c", "d"}), sorter.sort());
+
+  // Verify other basic allocator methods work when shared resources
+  // are in the allocations.
+  Resources removedResources = Resources::parse("cpus:50;mem:0").get();
+  sorter.remove(slaveId, removedResources);
+
+  // Total resources is now:
+  // cpus:50;mem:100;disk(role1):900;disk(role1)[id1]:100
+
+  // Shares: b = .04 (dominant: cpus), c = .1 (dominant: disk),
+  //         d = .1 (dominant: disk).
+  EXPECT_EQ(vector<string>({"b", "c", "d"}), sorter.sort());
+
+  Resources addedResources = Resources::parse("cpus:0;mem:100").get();
+  sorter.add(slaveId, addedResources);
+
+  // Total resources is now:
+  // cpus:50;mem:200;disk(role1):900;disk(role1)[id1]:100
+
+  // Shares: b = .04 (dominant: cpus), c = .1 (dominant: disk),
+  //         d = .1 (dominant: disk).
+  EXPECT_EQ(vector<string>({"b", "c", "d"}), sorter.sort());
+
+  EXPECT_TRUE(sorter.contains("b"));
+
+  EXPECT_FALSE(sorter.contains("a"));
+
+  EXPECT_EQ(3, sorter.count());
+}
+
+
+// This test verifies that shared resources can make clients
+// indistinguishable with its high likelihood of becoming the
+// dominant resource.
+TEST(SorterTest, SameDominantSharedResourcesAcrossClients)
+{
+  DRFSorter sorter;
+
+  SlaveID slaveId;
+  slaveId.set_value("agentId");
+
+  Resource sharedDisk = createDiskResource(
+      "900", "role1", "id1", "path1", None(), true);
+
+  // Set totalResources to have disk of 1000, with disk 900 being shared.
+  Resources totalResources = Resources::parse(
+      "cpus:100;mem:100;disk(role1):100").get();
+  totalResources += sharedDisk;
+
+  sorter.add(slaveId, totalResources);
+
+  // Add 2 clients each with the same shared disk, but with varying
+  // cpus and mem.
+  sorter.add("b");
+  Resources bResources = Resources::parse("cpus:5;mem:20").get();
+  bResources += sharedDisk;
+  sorter.allocated("b", slaveId, bResources);
+
+  sorter.add("c");
+  Resources cResources = Resources::parse("cpus:10;mem:6").get();
+  cResources += sharedDisk;
+  sorter.allocated("c", slaveId, cResources);
+
+  // Shares: b = .9 (dominant: disk), c = .9 (dominant: disk).
+  EXPECT_EQ(vector<string>({"b", "c"}), sorter.sort());
+
+  // Add 3rd client with the same shared resource.
+  sorter.add("a");
+  Resources aResources = Resources::parse("cpus:50;mem:40").get();
+  aResources += sharedDisk;
+  sorter.allocated("a", slaveId, aResources);
+
+  // Shares: a = .9 (dominant: disk), b = .9 (dominant: disk),
+  //         c = .9 (dominant: disk).
+  EXPECT_EQ(vector<string>({"a", "b", "c"}), sorter.sort());
+}
+
+
+// This test verifies that allocating the same shared resource to the
+// same client does not alter its fair share.
+TEST(SorterTest, SameSharedResourcesSameClient)
+{
+  DRFSorter sorter;
+
+  SlaveID slaveId;
+  slaveId.set_value("agentId");
+
+  Resource sharedDisk = createDiskResource(
+      "50", "role1", "id1", "path1", None(), true);
+
+  // Set totalResources to have disk of 1000, with disk of 50 being shared.
+  Resources totalResources = Resources::parse(
+      "cpus:100;mem:100;disk(role1):950").get();
+  totalResources += sharedDisk;
+
+  sorter.add(slaveId, totalResources);
+
+  // Verify sort() works when shared resources are in the allocations.
+  sorter.add("a");
+  Resources aResources = Resources::parse("cpus:2;mem:2").get();
+  aResources += sharedDisk;
+  sorter.allocated("a", slaveId, aResources);
+
+  sorter.add("b");
+  Resources bResources = Resources::parse("cpus:6;mem:6").get();
+  sorter.allocated("b", slaveId, bResources);
+
+  // Shares: a = .05 (dominant: disk), b = .06 (dominant: cpus).
+  EXPECT_EQ(vector<string>({"a", "b"}), sorter.sort());
+
+  // Update a's share to allocate 3 more copies of the shared disk.
+  // Verify fair share does not change when additional copies of same
+  // shared resource are added to a specific client.
+  Resources additionalAShared = Resources(sharedDisk) + sharedDisk + sharedDisk;
+  sorter.allocated("a", slaveId, additionalAShared);
+
+  // Shares: a = .05 (dominant: disk), b = .06 (dominant: cpus).
+  EXPECT_EQ(vector<string>({"a", "b"}), sorter.sort());
+}
+
+
+// This test verifies that shared resources are unallocated when all
+// the copies are unallocated.
+TEST(SorterTest, SharedResourcesUnallocated)
+{
+  DRFSorter sorter;
+
+  SlaveID slaveId;
+  slaveId.set_value("agentId");
+
+  Resource sharedDisk = createDiskResource(
+      "100", "role1", "id1", "path1", None(), true);
+
+  // Set totalResources to have disk of 1000, with disk 100 being shared.
+  Resources totalResources = Resources::parse(
+      "cpus:100;mem:100;disk(role1):900").get();
+  totalResources += sharedDisk;
+
+  sorter.add(slaveId, totalResources);
+
+  // Allocate 3 copies of shared resources to client 'a', but allocate no
+  // shared resource to client 'b'.
+  sorter.add("a");
+  Resources aResources = Resources::parse("cpus:2;mem:2").get();
+  aResources += sharedDisk;
+  aResources += sharedDisk;
+  aResources += sharedDisk;
+  sorter.allocated("a", slaveId, aResources);
+
+  sorter.add("b");
+  Resources bResources = Resources::parse("cpus:6;mem:6").get();
+  sorter.allocated("b", slaveId, bResources);
+
+  // Shares: a = .1 (dominant: disk), b = .06 (dominant: cpus).
+  EXPECT_EQ(vector<string>({"b", "a"}), sorter.sort());
+
+  // Unallocate 1 copy of shared resource from client 'a', which should
+  // result in no change in its dominant share.
+  sorter.unallocated("a", slaveId, sharedDisk);
+
+  // Shares: a = .1 (dominant: disk), b = .06 (dominant: cpus).
+  EXPECT_EQ(vector<string>({"b", "a"}), sorter.sort());
+
+  // Unallocate remaining copies of shared resource from client 'a',
+  // which would affect the fair share.
+  sorter.unallocated("a", slaveId, Resources(sharedDisk) + sharedDisk);
+
+  // Shares: a = .02 (dominant: cpus), b = .06 (dominant: cpus).
+  EXPECT_EQ(vector<string>({"a", "b"}), sorter.sort());
+}
+
+
+// This test verifies that shared resources are removed from the sorter
+// only when all instances of the the same shared resource are removed.
+TEST(SorterTest, RemoveSharedResources)
+{
+  DRFSorter sorter;
+
+  SlaveID slaveId;
+  slaveId.set_value("agentId");
+
+  Resource sharedDisk = createDiskResource(
+      "100", "role1", "id1", "path1", None(), true);
+
+  sorter.add(
+      slaveId, Resources::parse("cpus:100;mem:100;disk(role1):900").get());
+
+  Resources quantity1 = sorter.totalScalarQuantities();
+
+  sorter.add(slaveId, sharedDisk);
+  Resources quantity2 = sorter.totalScalarQuantities();
+
+  EXPECT_EQ(Resources::parse("disk(role1):100").get(), quantity2 - quantity1);
+
+  sorter.add(slaveId, sharedDisk);
+  Resources quantity3 = sorter.totalScalarQuantities();
+
+  EXPECT_NE(quantity1, quantity3);
+  EXPECT_EQ(quantity2, quantity3);
+
+  // The quantity of the shared disk is removed  when the last copy is removed.
+  sorter.remove(slaveId, sharedDisk);
+  EXPECT_EQ(sorter.totalScalarQuantities(), quantity3);
+
+  sorter.remove(slaveId, sharedDisk);
+  EXPECT_EQ(sorter.totalScalarQuantities(), quantity1);
+}
+
+
 class Sorter_BENCHMARK_Test
   : public ::testing::Test,
     public ::testing::WithParamInterface<std::tr1::tuple<size_t, size_t>> {};
@@ -634,95 +902,6 @@ TEST_P(Sorter_BENCHMARK_Test, FullSort)
        << watch.elapsed() << endl;
 }
 
-
-// This test verifies that shared resources are properly accounted for in
-// the DRF sorter.
-TEST(SorterTest, SharedResources)
-{
-  DRFSorter sorter;
-
-  SlaveID slaveId;
-  slaveId.set_value("agentId");
-
-  Resource sharedDisk = createDiskResource(
-      "100", "role1", "id1", "path1", None(), true);
-
-  // Set totalResources to have disk of 1000, with disk 100 being shared.
-  Resources totalResources = Resources::parse(
-      "cpus:100;mem:100;disk(role1):900").get();
-  totalResources += sharedDisk;
-
-  sorter.add(slaveId, totalResources);
-
-  // Verify sort() works when shared resources are in the allocations.
-  sorter.add("a");
-  Resources aResources = Resources::parse("cpus:5;mem:5").get();
-  aResources += sharedDisk;
-  sorter.allocated("a", slaveId, aResources);
-
-  sorter.add("b");
-  Resources bResources = Resources::parse("cpus:6;mem:6").get();
-  sorter.allocated("b", slaveId, bResources);
-
-  // Shares: a = .1 (dominant: disk), b = .06 (dominant: cpus).
-  EXPECT_EQ(vector<string>({"b", "a"}), sorter.sort());
-
-  sorter.add("c");
-  Resources cResources = Resources::parse("cpus:1;mem:1").get();
-  cResources += sharedDisk;
-  sorter.allocated("c", slaveId, cResources);
-
-  // 'a' and 'c' share the same persistent volume which is the
-  // dominant resource for both of these clients.
-  // Shares: a = .1 (dominant: disk), b = .06 (dominant: cpus),
-  //         c = .1 (dominant: disk).
-  EXPECT_EQ(vector<string>({"b", "a", "c"}), sorter.sort());
-
-  sorter.remove("a");
-  Resources bUnallocated = Resources::parse("cpus:4;mem:4").get();
-  sorter.unallocated("b", slaveId, bUnallocated);
-
-  // Shares: b = .02 (dominant: cpus), c = .1 (dominant: disk).
-  EXPECT_EQ(vector<string>({"b", "c"}), sorter.sort());
-
-  sorter.add("d");
-  Resources dResources = Resources::parse("cpus:1;mem:5").get();
-  dResources += sharedDisk;
-  sorter.allocated("d", slaveId, dResources);
-
-  // Shares: b = .02 (dominant: cpus), c = .1 (dominant: disk),
-  //         d = .1 (dominant: disk).
-  EXPECT_EQ(vector<string>({"b", "c", "d"}), sorter.sort());
-
-  // Verify other basic allocator methods work when shared resources
-  // are in the allocations.
-  Resources removedResources = Resources::parse("cpus:50;mem:0").get();
-  sorter.remove(slaveId, removedResources);
-
-  // Total resources is now:
-  // cpus:50;mem:100;disk(role1):900;disk(role1)[id1]:100
-
-  // Shares: b = .04 (dominant: cpus), c = .1 (dominant: disk),
-  //         d = .1 (dominant: disk).
-  EXPECT_EQ(vector<string>({"b", "c", "d"}), sorter.sort());
-
-  Resources addedResources = Resources::parse("cpus:0;mem:100").get();
-  sorter.add(slaveId, addedResources);
-
-  // Total resources is now:
-  // cpus:50;mem:200;disk(role1):900;disk(role1)[id1]:100
-
-  // Shares: b = .04 (dominant: cpus), c = .1 (dominant: disk),
-  //         d = .1 (dominant: disk).
-  EXPECT_EQ(vector<string>({"b", "c", "d"}), sorter.sort());
-
-  EXPECT_TRUE(sorter.contains("b"));
-
-  EXPECT_FALSE(sorter.contains("a"));
-
-  EXPECT_EQ(3, sorter.count());
-}
-
 } // namespace tests {
 } // namespace internal {
 } // namespace mesos {