You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@quickstep.apache.org by ji...@apache.org on 2017/03/16 19:13:19 UTC

incubator-quickstep git commit: Updates

Repository: incubator-quickstep
Updated Branches:
  refs/heads/LIP-time-decomposition 261b545e2 -> b19bb53ea


Updates


Project: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/commit/b19bb53e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/tree/b19bb53e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/diff/b19bb53e

Branch: refs/heads/LIP-time-decomposition
Commit: b19bb53ea7b247f19ba3cda5cd413883ed73e687
Parents: 261b545
Author: Jianqiao Zhu <ji...@cs.wisc.edu>
Authored: Thu Mar 16 14:13:07 2017 -0500
Committer: Jianqiao Zhu <ji...@cs.wisc.edu>
Committed: Thu Mar 16 14:13:07 2017 -0500

----------------------------------------------------------------------
 relational_operators/HashJoinOperator.cpp     |  25 ++-
 storage/CountedReference.hpp                  |   4 +-
 storage/InsertDestination.cpp                 |  27 +++
 storage/InsertDestination.hpp                 |   5 +
 storage/SplitRowStoreTupleStorageSubBlock.cpp | 197 ++++++++++++++++++++-
 storage/StorageManager.cpp                    | 194 +++++++++-----------
 utility/ShardedLockManager.hpp                |  59 ------
 7 files changed, 323 insertions(+), 188 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/b19bb53e/relational_operators/HashJoinOperator.cpp
----------------------------------------------------------------------
diff --git a/relational_operators/HashJoinOperator.cpp b/relational_operators/HashJoinOperator.cpp
index 3f6eaab..643602e 100644
--- a/relational_operators/HashJoinOperator.cpp
+++ b/relational_operators/HashJoinOperator.cpp
@@ -471,20 +471,19 @@ void HashInnerJoinWorkOrder::execute() {
         base_accessor->createSharedTupleIdSequenceAdapterVirtual(*existence_map));
   }
 
-  auto *container = simple_profiler.getContainer();
-  auto *event_hash = container->getEventLine("ProbeHash");
-  event_hash->emplace_back();
 
-  if (probe_accessor->getImplementationType() == ValueAccessor::Implementation::kSplitRowStore) {
-    executeWithCopyElision(probe_accessor.get());
-  } else {
+//  if (probe_accessor->getImplementationType() == ValueAccessor::Implementation::kSplitRowStore) {
+//    executeWithCopyElision(probe_accessor.get());
+//  } else {
     executeWithoutCopyElision(probe_accessor.get());
-  }
+//  }
 
-  event_hash->back().endEvent();
 }
 
 void HashInnerJoinWorkOrder::executeWithoutCopyElision(ValueAccessor *probe_accessor) {
+  auto *container = simple_profiler.getContainer();
+  auto *event_hash = container->getEventLine("ProbeHash");
+  event_hash->emplace_back();
   VectorsOfPairsJoinedTuplesCollector collector;
   if (join_key_attributes_.size() == 1) {
     hash_table_.getAllFromValueAccessor(
@@ -499,10 +498,14 @@ void HashInnerJoinWorkOrder::executeWithoutCopyElision(ValueAccessor *probe_acce
         any_join_key_attributes_nullable_,
         &collector);
   }
+  event_hash->back().endEvent();
 
   const relation_id build_relation_id = build_relation_.getID();
   const relation_id probe_relation_id = probe_relation_.getID();
 
+  auto *materialize_line = container->getEventLine("materialize");
+  materialize_line->emplace_back();
+  MutableBlockReference output_block;
   for (std::pair<const block_id, VectorOfTupleIdPair>
            &build_block_entry : *collector.getJoinedTuples()) {
     BlockReference build_block =
@@ -549,8 +552,12 @@ void HashInnerJoinWorkOrder::executeWithoutCopyElision(ValueAccessor *probe_acce
                                                                   build_block_entry.second));
     }
 
-    output_destination_->bulkInsertTuples(&temp_result);
+//    output_destination_->bulkInsertTuples(&temp_result);
+    output_destination_->bulkInsertTuples(&temp_result, &output_block);
   }
+
+  output_destination_->returnBlock(&output_block);
+  materialize_line->back().endEvent();
 }
 
 void HashInnerJoinWorkOrder::executeWithCopyElision(ValueAccessor *probe_accessor) {

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/b19bb53e/storage/CountedReference.hpp
----------------------------------------------------------------------
diff --git a/storage/CountedReference.hpp b/storage/CountedReference.hpp
index 2d9cec3..49d0f73 100644
--- a/storage/CountedReference.hpp
+++ b/storage/CountedReference.hpp
@@ -66,7 +66,7 @@ class CountedReference {
    **/
   CountedReference(T *block, EvictionPolicy *eviction_policy)
       : block_(block), eviction_policy_(eviction_policy) {
-    eviction_policy_->blockReferenced(block_->getID());
+//    eviction_policy_->blockReferenced(block_->getID());
 #ifdef QUICKSTEP_DEBUG
     block_->ref();
 #endif
@@ -111,7 +111,7 @@ class CountedReference {
 #ifdef QUICKSTEP_DEBUG
         block_->unref();
 #endif
-        eviction_policy_->blockUnreferenced(block_->getID());
+//        eviction_policy_->blockUnreferenced(block_->getID());
       }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/b19bb53e/storage/InsertDestination.cpp
----------------------------------------------------------------------
diff --git a/storage/InsertDestination.cpp b/storage/InsertDestination.cpp
index 75e1217..fa9382e 100644
--- a/storage/InsertDestination.cpp
+++ b/storage/InsertDestination.cpp
@@ -216,6 +216,33 @@ void InsertDestination::bulkInsertTuples(ValueAccessor *accessor, bool always_ma
   });
 }
 
+void InsertDestination::bulkInsertTuples(ValueAccessor *accessor,
+                                         MutableBlockReference *output_block) {
+  InvokeOnAnyValueAccessor(
+      accessor,
+      [&](auto *accessor) -> void {  // NOLINT(build/c++11)
+    accessor->beginIteration();
+    while (!accessor->iterationFinished()) {
+      // FIXME(chasseur): Deal with TupleTooLargeForBlock exception.
+      if (!output_block->valid()) {
+        *output_block = this->getBlockForInsertion();
+      }
+      if ((*output_block)->bulkInsertTuples(accessor) == 0 ||
+          !accessor->iterationFinished()) {
+        // output_block is full.
+        this->returnBlock(std::move(*output_block), true);
+        *output_block = this->getBlockForInsertion();
+      }
+    }
+  });
+}
+
+void InsertDestination::returnBlock(MutableBlockReference *output_block) {
+  if (output_block->valid()) {
+    this->returnBlock(std::move(*output_block), false);
+  }
+}
+
 void InsertDestination::bulkInsertTuplesWithRemappedAttributes(
     const std::vector<attribute_id> &attribute_map,
     ValueAccessor *accessor,

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/b19bb53e/storage/InsertDestination.hpp
----------------------------------------------------------------------
diff --git a/storage/InsertDestination.hpp b/storage/InsertDestination.hpp
index e9335ce..69df8a1 100644
--- a/storage/InsertDestination.hpp
+++ b/storage/InsertDestination.hpp
@@ -147,6 +147,11 @@ class InsertDestination : public InsertDestinationInterface {
 
   void bulkInsertTuples(ValueAccessor *accessor, bool always_mark_full = false) override;
 
+  void bulkInsertTuples(ValueAccessor *accessor,
+                        MutableBlockReference *output_block);
+
+  void returnBlock(MutableBlockReference *block);
+
   void bulkInsertTuplesWithRemappedAttributes(
       const std::vector<attribute_id> &attribute_map,
       ValueAccessor *accessor,

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/b19bb53e/storage/SplitRowStoreTupleStorageSubBlock.cpp
----------------------------------------------------------------------
diff --git a/storage/SplitRowStoreTupleStorageSubBlock.cpp b/storage/SplitRowStoreTupleStorageSubBlock.cpp
index ad583eb..a581e49 100644
--- a/storage/SplitRowStoreTupleStorageSubBlock.cpp
+++ b/storage/SplitRowStoreTupleStorageSubBlock.cpp
@@ -219,13 +219,196 @@ TupleStorageSubBlock::InsertResult SplitRowStoreTupleStorageSubBlock::insertTupl
 }
 
 tuple_id SplitRowStoreTupleStorageSubBlock::bulkInsertTuples(ValueAccessor *accessor) {
-  std::vector<attribute_id> simple_remap;
-  for (attribute_id attr_id = 0;
-      attr_id < static_cast<attribute_id>(relation_.size());
-      ++attr_id) {
-    simple_remap.push_back(attr_id);
-  }
-  return bulkInsertDispatcher(simple_remap, accessor, kCatalogMaxID, true);
+  const tuple_id original_num_tuples = header_->num_tuples;
+  tuple_id pos = 0;
+
+  InvokeOnAnyValueAccessor(
+      accessor,
+      [&](auto *accessor) -> void {  // NOLINT(build/c++11)
+    if (relation_.hasNullableAttributes()) {
+      if (relation_.isVariableLength()) {
+        while (accessor->next()) {
+          // If packed, insert at the end of the slot array, otherwise find the
+          // first hole.
+          pos = this->isPacked() ? header_->num_tuples
+                                 : occupancy_bitmap_->firstZero(pos);
+          const std::size_t tuple_variable_bytes
+              = CalculateVariableSize<decltype(*accessor), true>(relation_, *accessor);
+          if (!this->spaceToInsert(pos, tuple_variable_bytes)) {
+            accessor->previous();
+            break;
+          }
+          // Allocate variable-length storage.
+          header_->variable_length_bytes_allocated += tuple_variable_bytes;
+
+          // Find the slot and locate its sub-structures.
+          void *tuple_slot = static_cast<char*>(tuple_storage_) + pos * tuple_slot_bytes_;
+          BitVector<true> tuple_null_bitmap(tuple_slot,
+                                            relation_.numNullableAttributes());
+          tuple_null_bitmap.clear();
+          char *fixed_length_attr_storage = static_cast<char*>(tuple_slot) + per_tuple_null_bitmap_bytes_;
+          std::uint32_t *variable_length_info_array = reinterpret_cast<std::uint32_t*>(
+              fixed_length_attr_storage + relation_.getFixedByteLength());
+          // Start writing variable-length data at the beginning of the newly
+          // allocated range.
+          std::uint32_t current_variable_position
+              = tuple_storage_bytes_ - header_->variable_length_bytes_allocated;
+
+          attribute_id accessor_attr_id = 0;
+          for (CatalogRelationSchema::const_iterator attr_it = relation_.begin();
+               attr_it != relation_.end();
+               ++attr_it, ++accessor_attr_id) {
+            const int nullable_idx = relation_.getNullableAttributeIndex(attr_it->getID());
+            const int variable_idx = relation_.getVariableLengthAttributeIndex(attr_it->getID());
+            TypedValue attr_value(accessor->getTypedValue(accessor_attr_id));
+            if ((nullable_idx != -1) && (attr_value.isNull())) {
+              // Set null bit and move on.
+              tuple_null_bitmap.setBit(nullable_idx, true);
+              continue;
+            }
+            if (variable_idx != -1) {
+              // Write offset and size into the slot, then copy the actual
+              // value into the variable-length storage region.
+              const std::size_t attr_size = attr_value.getDataSize();
+              variable_length_info_array[variable_idx << 1] = current_variable_position;
+              variable_length_info_array[(variable_idx << 1) + 1] = attr_size;
+              attr_value.copyInto(static_cast<char*>(tuple_storage_) + current_variable_position);
+              current_variable_position += attr_size;
+            } else {
+              // Copy fixed-length value directly into the slot.
+              attr_value.copyInto(fixed_length_attr_storage
+                                  + relation_.getFixedLengthAttributeOffset(attr_it->getID()));
+            }
+          }
+          // Update occupancy bitmap and header.
+          occupancy_bitmap_->setBit(pos, true);
+          ++(header_->num_tuples);
+          if (pos > header_->max_tid) {
+            header_->max_tid = pos;
+          }
+        }
+      } else {
+        // Same as above, but skip variable-length checks.
+        while (accessor->next()) {
+          pos = this->isPacked() ? header_->num_tuples
+                                 : occupancy_bitmap_->firstZero(pos);
+          if (!this->spaceToInsert(pos, 0)) {
+            accessor->previous();
+            break;
+          }
+          void *tuple_slot = static_cast<char*>(tuple_storage_) + pos * tuple_slot_bytes_;
+          BitVector<true> tuple_null_bitmap(tuple_slot,
+                                            relation_.numNullableAttributes());
+          tuple_null_bitmap.clear();
+          char *fixed_length_attr_storage = static_cast<char*>(tuple_slot) + per_tuple_null_bitmap_bytes_;
+
+          attribute_id accessor_attr_id = 0;
+          for (CatalogRelationSchema::const_iterator attr_it = relation_.begin();
+               attr_it != relation_.end();
+               ++attr_it, ++accessor_attr_id) {
+            const int nullable_idx = relation_.getNullableAttributeIndex(attr_it->getID());
+            if (nullable_idx != -1) {
+              const void *attr_value = accessor->template getUntypedValue<true>(accessor_attr_id);
+              if (attr_value == nullptr) {
+                tuple_null_bitmap.setBit(nullable_idx, true);
+              } else {
+                std::memcpy(fixed_length_attr_storage
+                                + relation_.getFixedLengthAttributeOffset(attr_it->getID()),
+                            attr_value,
+                            attr_it->getType().maximumByteLength());
+              }
+            } else {
+              const void *attr_value = accessor->template getUntypedValue<false>(accessor_attr_id);
+              std::memcpy(fixed_length_attr_storage
+                              + relation_.getFixedLengthAttributeOffset(attr_it->getID()),
+                          attr_value,
+                          attr_it->getType().maximumByteLength());
+            }
+          }
+          occupancy_bitmap_->setBit(pos, true);
+          ++(header_->num_tuples);
+          if (pos > header_->max_tid) {
+            header_->max_tid = pos;
+          }
+        }
+      }
+    } else {
+      if (relation_.isVariableLength()) {
+        // Same as most general case above, but skip null checks.
+        while (accessor->next()) {
+          pos = this->isPacked() ? header_->num_tuples
+                                 : occupancy_bitmap_->firstZero(pos);
+          const std::size_t tuple_variable_bytes
+              = CalculateVariableSize<decltype(*accessor), false>(relation_, *accessor);
+          if (!this->spaceToInsert(pos, tuple_variable_bytes)) {
+            accessor->previous();
+            break;
+          }
+          header_->variable_length_bytes_allocated += tuple_variable_bytes;
+
+          void *tuple_slot = static_cast<char*>(tuple_storage_) + pos * tuple_slot_bytes_;
+          char *fixed_length_attr_storage = static_cast<char*>(tuple_slot) + per_tuple_null_bitmap_bytes_;
+          std::uint32_t *variable_length_info_array = reinterpret_cast<std::uint32_t*>(
+              fixed_length_attr_storage + relation_.getFixedByteLength());
+          std::uint32_t current_variable_position
+              = tuple_storage_bytes_ - header_->variable_length_bytes_allocated;
+
+          attribute_id accessor_attr_id = 0;
+          for (CatalogRelationSchema::const_iterator attr_it = relation_.begin();
+               attr_it != relation_.end();
+               ++attr_it, ++accessor_attr_id) {
+            const int variable_idx = relation_.getVariableLengthAttributeIndex(attr_it->getID());
+            TypedValue attr_value(accessor->getTypedValue(accessor_attr_id));
+            if (variable_idx != -1) {
+              const std::size_t attr_size = attr_value.getDataSize();
+              variable_length_info_array[variable_idx << 1] = current_variable_position;
+              variable_length_info_array[(variable_idx << 1) + 1] = attr_size;
+              attr_value.copyInto(static_cast<char*>(tuple_storage_) + current_variable_position);
+              current_variable_position += attr_size;
+            } else {
+              attr_value.copyInto(fixed_length_attr_storage
+                                  + relation_.getFixedLengthAttributeOffset(attr_it->getID()));
+            }
+          }
+          occupancy_bitmap_->setBit(pos, true);
+          ++(header_->num_tuples);
+          if (pos > header_->max_tid) {
+            header_->max_tid = pos;
+          }
+        }
+      } else {
+        // Simplest case: skip both null and variable-length checks.
+        while (accessor->next()) {
+          pos = this->isPacked() ? header_->num_tuples
+                                 : occupancy_bitmap_->firstZero(pos);
+          if (!this->spaceToInsert(pos, 0)) {
+            accessor->previous();
+            break;
+          }
+          void *tuple_slot = static_cast<char*>(tuple_storage_) + pos * tuple_slot_bytes_;
+          char *fixed_length_attr_storage = static_cast<char*>(tuple_slot) + per_tuple_null_bitmap_bytes_;
+
+          attribute_id accessor_attr_id = 0;
+          for (CatalogRelationSchema::const_iterator attr_it = relation_.begin();
+               attr_it != relation_.end();
+               ++attr_it, ++accessor_attr_id) {
+            const void *attr_value = accessor->template getUntypedValue<false>(accessor_attr_id);
+            std::memcpy(fixed_length_attr_storage
+                            + relation_.getFixedLengthAttributeOffset(attr_it->getID()),
+                        attr_value,
+                        attr_it->getType().maximumByteLength());
+          }
+          occupancy_bitmap_->setBit(pos, true);
+          ++(header_->num_tuples);
+          if (pos > header_->max_tid) {
+            header_->max_tid = pos;
+          }
+        }
+      }
+    }
+  });
+
+  return header_->num_tuples - original_num_tuples;
 }
 
 tuple_id SplitRowStoreTupleStorageSubBlock::bulkInsertPartialTuples(

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/b19bb53e/storage/StorageManager.cpp
----------------------------------------------------------------------
diff --git a/storage/StorageManager.cpp b/storage/StorageManager.cpp
index c70eafa..2379742 100644
--- a/storage/StorageManager.cpp
+++ b/storage/StorageManager.cpp
@@ -815,140 +815,112 @@ MutableBlockReference StorageManager::getBlockInternal(
       ret = MutableBlockReference(static_cast<StorageBlock*>(it->second.block), eviction_policy_.get());
     }
   }
-  // To be safe, release the block's shard after 'eviction_lock' destructs.
-  lock_manager_.release(block);
 
   if (ret.valid()) {
     return ret;
   }
 
-  // Note that there is no way for the block to be evicted between the call to
-  // loadBlock and the call to EvictionPolicy::blockReferenced from
-  // MutableBlockReference's constructor; this is because EvictionPolicy
-  // doesn't know about the block until blockReferenced is called, so
-  // chooseBlockToEvict shouldn't return the block.
-  do {
-    SpinSharedMutexExclusiveLock<false> io_lock(*lock_manager_.get(block));
-    {
-      // Check one more time if the block got loaded in memory by someone else.
-      SpinSharedMutexSharedLock<false> read_lock(blocks_shared_mutex_);
-      std::unordered_map<block_id, BlockHandle>::iterator it = blocks_.find(block);
-      if (it != blocks_.end()) {
-        DEBUG_ASSERT(!it->second.block->isBlob());
-        ret = MutableBlockReference(static_cast<StorageBlock*>(it->second.block), eviction_policy_.get());
-        break;
-      }
+  SpinSharedMutexExclusiveLock<false> io_lock(*lock_manager_.get(block));
+  {
+    // Check one more time if the block got loaded in memory by someone else.
+    SpinSharedMutexSharedLock<false> read_lock(blocks_shared_mutex_);
+    std::unordered_map<block_id, BlockHandle>::iterator it = blocks_.find(block);
+    if (it != blocks_.end()) {
+      DEBUG_ASSERT(!it->second.block->isBlob());
+      return MutableBlockReference(static_cast<StorageBlock*>(it->second.block), eviction_policy_.get());
     }
-    // No other thread loaded the block before us.
-    ret = MutableBlockReference(loadBlock(block, relation, numa_node), eviction_policy_.get());
-  } while (false);
-  // To be safe, release the block's shard after 'io_lock' destructs.
-  lock_manager_.release(block);
-
-  return ret;
+  }
+  // No other thread loaded the block before us.
+  return MutableBlockReference(loadBlock(block, relation, numa_node), eviction_policy_.get());
 }
 
 MutableBlobReference StorageManager::getBlobInternal(const block_id blob,
                                                      const int numa_node) {
-  MutableBlobReference ret;
   {
     SpinSharedMutexSharedLock<false> eviction_lock(*lock_manager_.get(blob));
     SpinSharedMutexSharedLock<false> read_lock(blocks_shared_mutex_);
     std::unordered_map<block_id, BlockHandle>::iterator it = blocks_.find(blob);
     if (it != blocks_.end()) {
       DEBUG_ASSERT(it->second.block->isBlob());
-      ret = MutableBlobReference(static_cast<StorageBlob*>(it->second.block), eviction_policy_.get());
+      return MutableBlobReference(
+          static_cast<StorageBlob*>(it->second.block), eviction_policy_.get());
     }
   }
-  // To be safe, release the blob's shard after 'eviction_lock' destructs.
-  lock_manager_.release(blob);
 
-  if (ret.valid()) {
-    return ret;
-  }
-
-  do {
-    SpinSharedMutexExclusiveLock<false> io_lock(*lock_manager_.get(blob));
-    // Note that there is no way for the block to be evicted between the call to
-    // loadBlob and the call to EvictionPolicy::blockReferenced from
-    // MutableBlobReference's constructor; this is because EvictionPolicy
-    // doesn't know about the blob until blockReferenced is called, so
-    // chooseBlockToEvict shouldn't return the blob.
-    {
-      SpinSharedMutexSharedLock<false> read_lock(blocks_shared_mutex_);
-      std::unordered_map<block_id, BlockHandle>::iterator it = blocks_.find(blob);
-      if (it != blocks_.end()) {
-        DEBUG_ASSERT(it->second.block->isBlob());
-        ret = MutableBlobReference(static_cast<StorageBlob*>(it->second.block), eviction_policy_.get());
-        break;
-      }
+  SpinSharedMutexExclusiveLock<false> io_lock(*lock_manager_.get(blob));
+  {
+    SpinSharedMutexSharedLock<false> read_lock(blocks_shared_mutex_);
+    std::unordered_map<block_id, BlockHandle>::iterator it = blocks_.find(blob);
+    if (it != blocks_.end()) {
+      DEBUG_ASSERT(it->second.block->isBlob());
+      return MutableBlobReference(
+          static_cast<StorageBlob*>(it->second.block), eviction_policy_.get());
     }
-    // No other thread loaded the blob before us.
-    ret = MutableBlobReference(loadBlob(blob, numa_node), eviction_policy_.get());
-  } while (false);
-  // To be safe, release the blob's shard after 'io_lock' destructs.
-  lock_manager_.release(blob);
-
-  return ret;
+  }
+  // No other thread loaded the blob before us.
+  return MutableBlobReference(loadBlob(blob, numa_node), eviction_policy_.get());
 }
 
 void StorageManager::makeRoomForBlockOrBlob(const size_t slots) {
-  block_id block_to_evict;
-  while (total_memory_usage_ + slots > max_memory_usage_) {
-    const EvictionPolicy::Status status = eviction_policy_->chooseBlockToEvict(&block_to_evict);
-    if (status != EvictionPolicy::Status::kOk) {
-      // If status was not ok, then we must not have been able to evict enough
-      // blocks; therefore, we return anyway, temporarily going over the memory
-      // limit.
-      break;
-    }
-
-    bool has_collision = false;
-    SpinSharedMutexExclusiveLock<false> eviction_lock(*lock_manager_.get(block_to_evict, &has_collision));
-    if (has_collision) {
-      // We have a collision in the shared lock manager, where some callers
-      // of this function (i.e., getBlockInternal or getBlobInternal) has
-      // acquired an exclusive lock, and we are trying to evict a block that
-      // hashes to the same location. This will cause a deadlock.
-
-      // For now simply treat this situation as the case where there is not
-      // enough memory and we temporarily go over the memory limit.
-      break;
-    }
-
-    {
-      SpinSharedMutexSharedLock<false> read_lock(blocks_shared_mutex_);
-      if (blocks_.find(block_to_evict) == blocks_.end()) {
-        // another thread must have jumped in and evicted it before us
-
-        // NOTE(zuyu): It is ok to release the shard for a block or blob,
-        // before 'eviction_lock' destructs, because we will never encounter a
-        // self-deadlock in a single thread, and in multiple-thread case some
-        // thread will block but not deadlock if there is a shard collision.
-        lock_manager_.release(block_to_evict);
-        continue;
-      }
-    }
-    if (eviction_policy_->getRefCount(block_to_evict) > 0) {
-      // Someone sneaked in and referenced the block before we could evict it.
-
-      // NOTE(zuyu): It is ok to release the shard for a block or blob, before
-      // before 'eviction_lock' destructs, because we will never encounter a
-      // self-deadlock in a single thread, and in multiple-thread case some
-      // thread will block but not deadlock if there is a shard collision.
-      lock_manager_.release(block_to_evict);
-      continue;
-    }
-    if (saveBlockOrBlob(block_to_evict)) {
-      evictBlockOrBlob(block_to_evict);
-    }  // else : Someone sneaked in and evicted the block before we could.
-
-    // NOTE(zuyu): It is ok to release the shard for a block or blob, before
-    // before 'eviction_lock' destructs, because we will never encounter a
-    // self-deadlock in a single thread, and in multiple-thread case some
-    // thread will block but not deadlock if there is a shard collision.
-    lock_manager_.release(block_to_evict);
-  }
+  if (total_memory_usage_.load(std::memory_order_relaxed) + slots > max_memory_usage_) {
+    LOG(FATAL) << "Buffer pool is full";
+  }
+//  block_id block_to_evict;
+//  while (total_memory_usage_ + slots > max_memory_usage_) {
+//    const EvictionPolicy::Status status = eviction_policy_->chooseBlockToEvict(&block_to_evict);
+//    if (status != EvictionPolicy::Status::kOk) {
+//      // If status was not ok, then we must not have been able to evict enough
+//      // blocks; therefore, we return anyway, temporarily going over the memory
+//      // limit.
+//      break;
+//    }
+//
+//    bool has_collision = false;
+//    SpinSharedMutexExclusiveLock<false> eviction_lock(*lock_manager_.get(block_to_evict, &has_collision));
+//    if (has_collision) {
+//      // We have a collision in the shared lock manager, where some callers
+//      // of this function (i.e., getBlockInternal or getBlobInternal) has
+//      // acquired an exclusive lock, and we are trying to evict a block that
+//      // hashes to the same location. This will cause a deadlock.
+//
+//      // For now simply treat this situation as the case where there is not
+//      // enough memory and we temporarily go over the memory limit.
+//      break;
+//    }
+//
+//    {
+//      SpinSharedMutexSharedLock<false> read_lock(blocks_shared_mutex_);
+//      if (blocks_.find(block_to_evict) == blocks_.end()) {
+//        // another thread must have jumped in and evicted it before us
+//
+//        // NOTE(zuyu): It is ok to release the shard for a block or blob,
+//        // before 'eviction_lock' destructs, because we will never encounter a
+//        // self-deadlock in a single thread, and in multiple-thread case some
+//        // thread will block but not deadlock if there is a shard collision.
+//        lock_manager_.release(block_to_evict);
+//        continue;
+//      }
+//    }
+//    if (eviction_policy_->getRefCount(block_to_evict) > 0) {
+//      // Someone sneaked in and referenced the block before we could evict it.
+//
+//      // NOTE(zuyu): It is ok to release the shard for a block or blob, before
+//      // before 'eviction_lock' destructs, because we will never encounter a
+//      // self-deadlock in a single thread, and in multiple-thread case some
+//      // thread will block but not deadlock if there is a shard collision.
+//      lock_manager_.release(block_to_evict);
+//      continue;
+//    }
+//    if (saveBlockOrBlob(block_to_evict)) {
+//      evictBlockOrBlob(block_to_evict);
+//    }  // else : Someone sneaked in and evicted the block before we could.
+//
+//    // NOTE(zuyu): It is ok to release the shard for a block or blob, before
+//    // before 'eviction_lock' destructs, because we will never encounter a
+//    // self-deadlock in a single thread, and in multiple-thread case some
+//    // thread will block but not deadlock if there is a shard collision.
+//    lock_manager_.release(block_to_evict);
+//  }
 }
 
 bool StorageManager::blockOrBlobIsLoadedAndDirty(const block_id block) {

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/b19bb53e/utility/ShardedLockManager.hpp
----------------------------------------------------------------------
diff --git a/utility/ShardedLockManager.hpp b/utility/ShardedLockManager.hpp
index 520f879..e56a6e1 100644
--- a/utility/ShardedLockManager.hpp
+++ b/utility/ShardedLockManager.hpp
@@ -55,74 +55,15 @@ class ShardedLockManager {
    */
   ShardedLockManager() { }
 
-  /**
-   * @brief Get the SharedMutex corresponding to the provided key.
-   * @param key The key to map to a SharedMutex.
-   * @param has_collision Whether accessing the given key would result in a
-   *        hash collision. Used in StorageManager::makeRoomForBlock only.
-   * @return The corresponding SharedMutex if there is no collision; otherwise,
-   *         the collision SharedMutex.
-   */
   SharedMutexT* get(const T key, bool *has_collision = nullptr) {
     const std::size_t shard = hash_(key) % N;
-
-    if (has_collision != nullptr) {
-      // In StorageManager::makeRoomForBlock, check whether the evicting block
-      // or blob has a shard collision with existing referenced shards.
-      SpinSharedMutexSharedLock<false> read_lock(shard_count_mutex_);
-      if (shard_count_.find(shard) != shard_count_.end()) {
-        *has_collision = true;
-        return &collision_mutex_;
-      }
-    }
-
-    {
-      SpinSharedMutexExclusiveLock<false> write_lock(shard_count_mutex_);
-
-      // Check one more time for the evicting block or blob if there is a shard
-      // collision.
-      auto it = shard_count_.find(shard);
-      if (it != shard_count_.end()) {
-        if (has_collision != nullptr) {
-          *has_collision = true;
-          return &collision_mutex_;
-        }
-
-        ++it->second;
-      } else {
-        shard_count_.emplace(shard, 1);
-      }
-    }
     return &sharded_mutexes_[shard];
   }
 
-  /**
-   * @brief Release the shard corresponding to the provided key.
-   * @param key The key to compute the shard.
-   */
-  void release(const T key) {
-    SpinSharedMutexExclusiveLock<false> write_lock(shard_count_mutex_);
-    auto it = shard_count_.find(hash_(key) % N);
-    DCHECK(it != shard_count_.end());
-
-    if (--it->second == 0) {
-      shard_count_.erase(it);
-    }
-  }
-
  private:
   std::hash<T> hash_;
   std::array<SharedMutexT, N> sharded_mutexes_;
 
-  // The placeholder mutex used whenever there is a hash collision.
-  SharedMutexT collision_mutex_;
-
-  // Count all shards referenced by StorageManager in multiple threads.
-  // The key is the shard, while the value is the count. If the count equals to
-  // zero, we delete the shard entry.
-  std::unordered_map<std::size_t, std::size_t> shard_count_;
-  alignas(kCacheLineBytes) mutable SpinSharedMutex<false> shard_count_mutex_;
-
   DISALLOW_COPY_AND_ASSIGN(ShardedLockManager);
 };