You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@quickstep.apache.org by hb...@apache.org on 2016/09/09 15:51:22 UTC

[08/19] incubator-quickstep git commit: reinterpreted byte to SpinMutex before locking. This removes the need to have an additional function accepting a pointer in the SpinMutex class

reinterpreted byte to SpinMutex before locking. This removes the need to have an additional function accepting a pointer in the SpinMutex class


Project: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/commit/e7e6bafc
Tree: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/tree/e7e6bafc
Diff: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/diff/e7e6bafc

Branch: refs/heads/quickstep-28-29
Commit: e7e6bafc5047edf50b99b8e8e91f7c54ba5417c9
Parents: a0a6861
Author: rathijit <ra...@node-2.aggregation.quickstep-pg0.wisc.cloudlab.us>
Authored: Sun Aug 14 19:54:50 2016 -0500
Committer: Harshad Deshmukh <hb...@apache.org>
Committed: Fri Sep 9 10:50:25 2016 -0500

----------------------------------------------------------------------
 storage/FastHashTable.hpp | 12 ++++++------
 threading/SpinMutex.hpp   |  2 --
 2 files changed, 6 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/e7e6bafc/storage/FastHashTable.hpp
----------------------------------------------------------------------
diff --git a/storage/FastHashTable.hpp b/storage/FastHashTable.hpp
index 8d8d82b..c659a20 100644
--- a/storage/FastHashTable.hpp
+++ b/storage/FastHashTable.hpp
@@ -1900,7 +1900,7 @@ bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys
         SpinSharedMutexSharedLock<true> resize_lock(resize_shared_mutex_);
         uint8_t *value = upsertCompositeKeyInternalFast(key, init_value_ptr, variable_size);
         if (value != nullptr) {
-            SpinMutex lock(value);
+            SpinMutexLock lock(*(reinterpret_cast<SpinMutex *>(value)));
             for (unsigned int k = 0; k < handles_.size(); ++k) {
                 handles_[k]->mergeStatesFast(source_state + payload_offsets_[k], value + payload_offsets_[k]);
             }
@@ -1914,7 +1914,7 @@ bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys
     if (value == nullptr) {
       return false;
     } else {
-      SpinMutex lock(value);
+      SpinMutexLock lock(*(reinterpret_cast<SpinMutex *>(value)));
       for (unsigned int k = 0; k < handles_.size(); ++k) {
           handles_[k]->mergeStatesFast(source_state + payload_offsets_[k], value + payload_offsets_[k]);
       }
@@ -2017,7 +2017,7 @@ bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys
               continuing = true;
               break;
             } else {
-              SpinMutex lock(value);
+              SpinMutexLock lock(*(reinterpret_cast<SpinMutex *>(value)));
               for (unsigned int k = 0; k < handles_.size(); ++k) {
                   local.clear();
                   if (argument_ids[k].size()) {
@@ -2044,7 +2044,7 @@ bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys
         if (value == nullptr) {
           return false;
         } else {
-          SpinMutex lock(value);
+          SpinMutexLock lock(*(reinterpret_cast<SpinMutex *>(value)));
           for (unsigned int k = 0; k < handles_.size(); ++k) {
               local.clear();
               if (argument_ids[k].size()) {
@@ -2170,7 +2170,7 @@ bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys
               continuing = true;
               break;
             } else {
-              SpinMutex lock(value);
+              SpinMutexLock lock(*(reinterpret_cast<SpinMutex *>(value)));
               for (unsigned int k = 0; k < handles_.size(); ++k) {
                   local.clear();
                   if (argument_ids[k].size()) {
@@ -2201,7 +2201,7 @@ bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys
         if (value == nullptr) {
           return false;
         } else {
-          SpinMutex lock(value);
+          SpinMutexLock lock(*(reinterpret_cast<SpinMutex *>(value)));
           for (unsigned int k = 0; k < handles_.size(); ++k) {
               local.clear();
               if (argument_ids[k].size()) {

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/e7e6bafc/threading/SpinMutex.hpp
----------------------------------------------------------------------
diff --git a/threading/SpinMutex.hpp b/threading/SpinMutex.hpp
index 106ef13..5ed1405 100644
--- a/threading/SpinMutex.hpp
+++ b/threading/SpinMutex.hpp
@@ -44,8 +44,6 @@ class SpinMutex {
   SpinMutex() : locked_(false) {
   }
 
-  explicit SpinMutex(uint8_t *ptr): locked_(*ptr) {}
-
   /**
    * @note This call does NOT yield when contended. SpinMutex is intended
    *       mainly for cases where locks are held briefly and it is better to