You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@quickstep.apache.org by hb...@apache.org on 2016/09/06 20:16:24 UTC

[47/73] [abbrv] incubator-quickstep git commit: Fixed signed-unsigned comparison failure. Minor code cleanup.

Fixed signed-unsigned comparison failure. Minor code cleanup.


Project: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/commit/e269e031
Tree: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/tree/e269e031
Diff: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/diff/e269e031

Branch: refs/heads/partitioned-aggregation
Commit: e269e031ba1cc325c588a240aed31c776ee2f22f
Parents: 2518a72
Author: rathijit <ra...@node-2.aggregation.quickstep-pg0.wisc.cloudlab.us>
Authored: Sun Aug 14 16:14:36 2016 -0500
Committer: Harshad Deshmukh <hb...@apache.org>
Committed: Tue Sep 6 10:39:59 2016 -0500

----------------------------------------------------------------------
 storage/AggregationOperationState.cpp | 36 ++----------------------------
 storage/FastHashTable.hpp             | 20 +++++------------
 2 files changed, 8 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/e269e031/storage/AggregationOperationState.cpp
----------------------------------------------------------------------
diff --git a/storage/AggregationOperationState.cpp b/storage/AggregationOperationState.cpp
index 833b707..90b8fcc 100644
--- a/storage/AggregationOperationState.cpp
+++ b/storage/AggregationOperationState.cpp
@@ -94,13 +94,6 @@ AggregationOperationState::AggregationOperationState(
     handles_.emplace_back(new AggregationHandleDistinct());
     arguments_.push_back({});
     is_distinct_.emplace_back(false);
-
- /*   group_by_hashtable_pools_.emplace_back(std::unique_ptr<HashTablePool>(
-        new HashTablePool(estimated_num_entries,
-                          hash_table_impl_type,
-                          group_by_types,
-                          handles_.back().get(),
-                          storage_manager)));*/
     group_by_hashtable_pools_.emplace_back(std::unique_ptr<HashTablePool>(
         new HashTablePool(estimated_num_entries,
                           hash_table_impl_type,
@@ -136,19 +129,12 @@ AggregationOperationState::AggregationOperationState(
       handles_.emplace_back((*agg_func_it)->createHandle(argument_types));
 
       if (!group_by_list_.empty()) {
-        // Aggregation with GROUP BY: create a HashTable pool for per-group states.
- /*       group_by_hashtable_pools_.emplace_back(std::unique_ptr<HashTablePool>(
-            new HashTablePool(estimated_num_entries,
-                              hash_table_impl_type,
-                              group_by_types,
-                              handles_.back().get(),
-                              storage_manager)));*/
+        // Aggregation with GROUP BY: combined payload is partially updated in the presence of DISTINCT.
          if (*is_distinct_it) {
             handles_.back()->BlockUpdate();
          }
          group_by_handles.emplace_back(handles_.back());
          payload_sizes.emplace_back(group_by_handles.back()->getPayloadSize());
-
       } else {
         // Aggregation without GROUP BY: create a single global state.
         single_states_.emplace_back(handles_.back()->createInitialState());
@@ -183,23 +169,13 @@ AggregationOperationState::AggregationOperationState(
         // the number of entries in the distinctify hash table. We may estimate
         // for each distinct aggregation an estimated_num_distinct_keys value during
         // query optimization, if it worths.
- /*       distinctify_hashtables_.emplace_back(
-            handles_.back()->createDistinctifyHashTable(
-                *distinctify_hash_table_impl_types_it,
-                key_types,
-                estimated_num_entries,
-                storage_manager));*/
-
-        std::vector<AggregationHandle *> local;
-        // local.emplace_back(handles_.back());
-        local.clear();
         distinctify_hashtables_.emplace_back(
         AggregationStateFastHashTableFactory::CreateResizable(
                 *distinctify_hash_table_impl_types_it,
                 key_types,
                 estimated_num_entries,
                 {0},
-                local,
+                {},
                 storage_manager));
         ++distinctify_hash_table_impl_types_it;
       } else {
@@ -455,13 +431,6 @@ void AggregationOperationState::aggregateBlockHashTable(const block_id input_blo
   DCHECK(group_by_hashtable_pools_[0] != nullptr);
   AggregationStateHashTableBase *agg_hash_table = group_by_hashtable_pools_[0]->getHashTableFast();
   DCHECK(agg_hash_table != nullptr);
- /*     block->aggregateGroupBy(*handles_[agg_idx],
-                              arguments_[agg_idx],
-                              group_by_list_,
-                              predicate_.get(),
-                              agg_hash_table,
-                              &reuse_matches,
-                              &reuse_group_by_vectors);*/
   block->aggregateGroupByFast(arguments_,
                               group_by_list_,
                               predicate_.get(),
@@ -507,7 +476,6 @@ void AggregationOperationState::finalizeHashTable(InsertDestination *output_dest
 
   // TODO(harshad) - Find heuristics for faster merge, even in a single thread.
   // e.g. Keep merging entries from smaller hash tables to larger.
-//  auto *hash_tables = group_by_hashtable_pools_[0]->getAllHashTables();
 
   auto *hash_tables = group_by_hashtable_pools_[0]->getAllHashTables();
   for (std::size_t agg_idx = 0; agg_idx < handles_.size(); ++agg_idx) {

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/e269e031/storage/FastHashTable.hpp
----------------------------------------------------------------------
diff --git a/storage/FastHashTable.hpp b/storage/FastHashTable.hpp
index e7887ab..8d8d82b 100644
--- a/storage/FastHashTable.hpp
+++ b/storage/FastHashTable.hpp
@@ -1901,7 +1901,7 @@ bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys
         uint8_t *value = upsertCompositeKeyInternalFast(key, init_value_ptr, variable_size);
         if (value != nullptr) {
             SpinMutex lock(value);
-            for (int k = 0; k < handles_.size(); ++k) {
+            for (unsigned int k = 0; k < handles_.size(); ++k) {
                 handles_[k]->mergeStatesFast(source_state + payload_offsets_[k], value + payload_offsets_[k]);
             }
           return true;
@@ -1915,7 +1915,7 @@ bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys
       return false;
     } else {
       SpinMutex lock(value);
-      for (int k = 0; k < handles_.size(); ++k) {
+      for (unsigned int k = 0; k < handles_.size(); ++k) {
           handles_[k]->mergeStatesFast(source_state + payload_offsets_[k], value + payload_offsets_[k]);
       }
       return true;
@@ -2018,10 +2018,8 @@ bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys
               break;
             } else {
               SpinMutex lock(value);
-              for (int k = 0; k < handles_.size(); ++k) {
+              for (unsigned int k = 0; k < handles_.size(); ++k) {
                   local.clear();
-//                std::for_each(argument_ids[k].begin(),argument_ids[k].end(),[&](attribute_id id)
-//                {local.emplace_back(accessor->getTypedValue(id));});
                   if (argument_ids[k].size()) {
                     local.emplace_back(accessor->getTypedValue(argument_ids[k].front()));
                   }
@@ -2047,10 +2045,8 @@ bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys
           return false;
         } else {
           SpinMutex lock(value);
-          for (int k = 0; k < handles_.size(); ++k) {
+          for (unsigned int k = 0; k < handles_.size(); ++k) {
               local.clear();
-//            std::for_each(argument_ids[k].begin(),argument_ids[k].end(),[&](attribute_id id)
-//            {local.emplace_back(accessor->getTypedValue(id));});
               if (argument_ids[k].size()) {
                  local.emplace_back(accessor->getTypedValue(argument_ids[k].front()));
               }
@@ -2175,10 +2171,8 @@ bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys
               break;
             } else {
               SpinMutex lock(value);
-              for (int k = 0; k < handles_.size(); ++k) {
+              for (unsigned int k = 0; k < handles_.size(); ++k) {
                   local.clear();
-//                std::for_each(argument_ids[k].begin(),argument_ids[k].end(),[&](attribute_id id)
-//                {local.emplace_back(accessor->getTypedValue(id));});
                   if (argument_ids[k].size()) {
                       local.emplace_back(accessor->getTypedValue(argument_ids[k].front()));
                   }
@@ -2208,10 +2202,8 @@ bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys
           return false;
         } else {
           SpinMutex lock(value);
-          for (int k = 0; k < handles_.size(); ++k) {
+          for (unsigned int k = 0; k < handles_.size(); ++k) {
               local.clear();
-//            std::for_each(argument_ids[k].begin(),argument_ids[k].end(),[&](attribute_id id)
-//            {local.emplace_back(accessor->getTypedValue(id));});
               if (argument_ids[k].size()) {
                  local.emplace_back(accessor->getTypedValue(argument_ids[k].front()));
               }