You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@quickstep.apache.org by ji...@apache.org on 2017/01/10 15:14:19 UTC

[2/2] incubator-quickstep git commit: Updates

Updates


Project: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/commit/3a74a9fa
Tree: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/tree/3a74a9fa
Diff: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/diff/3a74a9fa

Branch: refs/heads/output-attr-order
Commit: 3a74a9fa61d913af7ed95a9b53f4f9486d17be5b
Parents: f8089de
Author: Jianqiao Zhu <ji...@cs.wisc.edu>
Authored: Tue Jan 10 09:13:40 2017 -0600
Committer: Jianqiao Zhu <ji...@cs.wisc.edu>
Committed: Tue Jan 10 09:13:40 2017 -0600

----------------------------------------------------------------------
 query_optimizer/rules/ReorderColumns.cpp  |   2 +-
 relational_operators/HashJoinOperator.cpp | 443 +++++++++----------------
 storage/StorageBlock.cpp                  | 110 +++---
 3 files changed, 227 insertions(+), 328 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/3a74a9fa/query_optimizer/rules/ReorderColumns.cpp
----------------------------------------------------------------------
diff --git a/query_optimizer/rules/ReorderColumns.cpp b/query_optimizer/rules/ReorderColumns.cpp
index c4f2aa1..8f139eb 100644
--- a/query_optimizer/rules/ReorderColumns.cpp
+++ b/query_optimizer/rules/ReorderColumns.cpp
@@ -137,7 +137,7 @@ P::PhysicalPtr ReorderColumns::applyInternal(const P::PhysicalPtr &input) {
     return lhs_id < rhs_id;
   };
 
-  P::PhysicalPtr output = nodes.front()->children().front();
+  P::PhysicalPtr output = applyInternal(nodes.front()->children().front());
 
   for (const auto &node : nodes) {
     std::vector<E::NamedExpressionPtr> project_expressions;

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/3a74a9fa/relational_operators/HashJoinOperator.cpp
----------------------------------------------------------------------
diff --git a/relational_operators/HashJoinOperator.cpp b/relational_operators/HashJoinOperator.cpp
index cd1dc46..8ebb057 100644
--- a/relational_operators/HashJoinOperator.cpp
+++ b/relational_operators/HashJoinOperator.cpp
@@ -231,8 +231,7 @@ bool HashJoinOperator::getAllNonOuterJoinWorkOrders(
                                      hash_table,
                                      output_destination,
                                      storage_manager,
-                                     CreateLIPFilterAdaptiveProberHelper(lip_deployment_index_, query_context),
-                                     op_index_),
+                                     CreateLIPFilterAdaptiveProberHelper(lip_deployment_index_, query_context)),
               op_index_);
         }
         started_ = true;
@@ -253,8 +252,7 @@ bool HashJoinOperator::getAllNonOuterJoinWorkOrders(
                 hash_table,
                 output_destination,
                 storage_manager,
-                CreateLIPFilterAdaptiveProberHelper(lip_deployment_index_, query_context),
-                op_index_),
+                CreateLIPFilterAdaptiveProberHelper(lip_deployment_index_, query_context)),
             op_index_);
         ++num_workorders_generated_;
       }  // end while
@@ -453,19 +451,11 @@ serialization::WorkOrder* HashJoinOperator::createOuterJoinWorkOrderProto(const
 
 
 void HashInnerJoinWorkOrder::execute() {
-  auto *container = simple_profiler.getContainer();
-  auto *all_line = container->getEventLine("all");
-  all_line->emplace_back();
-
-  auto *lip_line = container->getEventLine("lip_probe");
-  auto *hash_line = container->getEventLine("hash_probe");
-
   BlockReference probe_block(
       storage_manager_->getBlock(block_id_, probe_relation_));
   const TupleStorageSubBlock &probe_store = probe_block->getTupleStorageSubBlock();
   std::unique_ptr<ValueAccessor> probe_accessor(probe_store.createValueAccessor());
 
-  lip_line->emplace_back();
   // Probe the LIPFilters to generate an existence bitmap for probe_accessor, if enabled.
   std::unique_ptr<TupleIdSequence> existence_map;
   std::unique_ptr<ValueAccessor> base_accessor;
@@ -476,28 +466,7 @@ void HashInnerJoinWorkOrder::execute() {
     probe_accessor.reset(
         base_accessor->createSharedTupleIdSequenceAdapterVirtual(*existence_map));
   }
-  lip_line->back().endEvent();
-  lip_line->back().setPayload(op_index_ + 0);
-
-//  hash_line->emplace_back();
-//  PairsOfVectorsJoinedTuplesCollector collector;
-//  if (join_key_attributes_.size() == 1) {
-//    hash_table_.getAllFromValueAccessor(
-//        probe_accessor.get(),
-//        join_key_attributes_.front(),
-//        any_join_key_attributes_nullable_,
-//        &collector);
-//  } else {
-//    hash_table_.getAllFromValueAccessorCompositeKey(
-//        probe_accessor.get(),
-//        join_key_attributes_,
-//        any_join_key_attributes_nullable_,
-//        &collector);
-//  }
-//  hash_line->back().endEvent();
-//  hash_line->back().setPayload(op_index_ + 0);
-
-  hash_line->emplace_back();
+
   VectorsOfPairsJoinedTuplesCollector collector;
   if (join_key_attributes_.size() == 1) {
     hash_table_.getAllFromValueAccessor(
@@ -512,255 +481,163 @@ void HashInnerJoinWorkOrder::execute() {
         any_join_key_attributes_nullable_,
         &collector);
   }
-  hash_line->back().endEvent();
-  hash_line->back().setPayload(op_index_ + 0);
-
-  (void)selection_;
-  (void)build_relation_;
-  (void)residual_predicate_;
-  (void)output_destination_;
-
-//if (op_index_ == 128) {
-//
-//  const relation_id build_relation_id = build_relation_.getID();
-//  const relation_id probe_relation_id = probe_relation_.getID();
-//
-//  std::map<attribute_id, attribute_id> build_attribute_map;
-//  std::map<attribute_id, attribute_id> probe_attribute_map;
-//  std::map<attribute_id, attribute_id> non_trivial_attribute_map;
-//  std::vector<const Scalar *> non_trivial_expressions;
-//  for (std::size_t i = 0; i < selection_.size(); ++i) {
-//    const Scalar *scalar = selection_[i].get();
-//    if (scalar->getDataSource() == Scalar::ScalarDataSource::kAttribute) {
-//      const ScalarAttribute *scalar_attr =
-//          static_cast<const ScalarAttribute *>(scalar);
-//      const relation_id scalar_attr_relation_id =
-//          scalar_attr->getRelationIdForValueAccessor();
-//      const attribute_id scalar_attr_id =
-//          scalar_attr->getAttributeIdForValueAccessor();
-//
-//      if (scalar_attr_relation_id == build_relation_id) {
-//        build_attribute_map.emplace(scalar_attr_id, i);
-//      } else {
-//        DCHECK_EQ(probe_relation_id, scalar_attr->getRelationIdForValueAccessor());
-//        probe_attribute_map.emplace(scalar_attr_id, i);
-//      }
-//    } else {
-//      non_trivial_attribute_map.emplace(non_trivial_expressions.size(), i);
-//      non_trivial_expressions.emplace_back(scalar);
-//    }
-//  }
-//
-//  std::unique_ptr<InsertContext> insert_context(
-//      new InsertContext(output_destination_->getRelation()));
-//  insert_context->addSource(build_attribute_map);
-//  insert_context->addSource(probe_attribute_map);
-//  insert_context->addSource(non_trivial_attribute_map);
-//
-//  auto *overall_line = container->getEventLine("overall");
-//  auto *bulk_insert_line = container->getEventLine("bulk_insert");
-//  overall_line->emplace_back();
-////  MutableBlockReference output_block;
-//  for (std::pair<const block_id, PairOfVectors>
-//           &build_block_entry : *collector.getJoinedTuples()) {
-//    BlockReference build_block =
-//        storage_manager_->getBlock(build_block_entry.first, build_relation_);
-//    const TupleStorageSubBlock &build_store = build_block->getTupleStorageSubBlock();
-//    std::unique_ptr<ValueAccessor> build_accessor(build_store.createValueAccessor());
-//
-//    const std::vector<tuple_id> &build_tids = build_block_entry.second.first;
-//    const std::vector<tuple_id> &probe_tids = build_block_entry.second.second;
-//
-//    // Evaluate '*residual_predicate_', if any.
-//    //
-//    // TODO(chasseur): We might consider implementing true vectorized
-//    // evaluation for join predicates that are not equijoins (although in
-//    // general that would require evaluating and materializing some expressions
-//    // over the cross-product of all tuples in a pair of blocks in order to
-//    // evaluate the predicate). We could use a heuristic where we only do the
-//    // vectorized materialization and evaluation if the set of matches from the
-//    // hash join is below a reasonable threshold so that we don't blow up
-//    // temporary memory requirements to an unreasonable degree.
-//    if (residual_predicate_ != nullptr) {
-//      PairOfVectors filtered_matches;
-//
-//      for (std::size_t i = 0; i < build_tids.size(); ++i) {
-//        const tuple_id build_tid = build_tids[i];
-//        const tuple_id probe_tid = probe_tids[i];
-//        if (residual_predicate_->matchesForJoinedTuples(*build_accessor,
-//                                                        build_relation_id,
-//                                                        build_tid,
-//                                                        *probe_accessor,
-//                                                        probe_relation_id,
-//                                                        probe_tid)) {
-//          filtered_matches.first.emplace_back(build_tid);
-//          filtered_matches.second.emplace_back(probe_tid);
-//        }
-//      }
-//
-//      build_block_entry.second = std::move(filtered_matches);
-//    }
-//
-//    // TODO(chasseur): If all the output expressions are ScalarAttributes,
-//    // we could implement a similar fast-path to StorageBlock::selectSimple()
-//    // that avoids a copy.
-//    //
-//    // TODO(chasseur): See TODO in NestedLoopsJoinOperator.cpp about limiting
-//    // the size of materialized temporary results. In common usage, this
-//    // probably won't be an issue for hash-joins, but in the worst case a hash
-//    // join can still devolve into a cross-product.
-//    //
-//    // NOTE(chasseur): We could also create one big ColumnVectorsValueAccessor
-//    // and accumulate all the results across multiple block pairs into it
-//    // before inserting anything into output blocks, but this would require
-//    // some significant API extensions to the expressions system for a dubious
-//    // benefit (probably only a real performance win when there are very few
-//    // matching tuples in each individual inner block but very many inner
-//    // blocks with at least one match).
-//    ColumnVectorsValueAccessor temp_result;
-//    if (non_trivial_expressions.size() > 0) {
-//      VectorOfPairs zipped_joined_tuple_ids;
-//      zipped_joined_tuple_ids.reserve(build_tids.size());
-//      for (std::size_t i = 0; i < build_tids.size(); ++i) {
-//        zipped_joined_tuple_ids.emplace_back(build_tids[i], probe_tids[i]);
-//      }
-//
-//      for (auto selection_cit = non_trivial_expressions.begin();
-//           selection_cit != non_trivial_expressions.end();
-//           ++selection_cit) {
-//        temp_result.addColumn((*selection_cit)->getAllValuesForJoin(build_relation_id,
-//                                                                    build_accessor.get(),
-//                                                                    probe_relation_id,
-//                                                                    probe_accessor.get(),
-//                                                                    zipped_joined_tuple_ids));
-//      }
-//    }
-//
-//    std::unique_ptr<ValueAccessor> ordered_build_accessor(
-//        build_accessor->createSharedOrderedTupleIdSequenceAdapterVirtual(build_tids));
-//    std::unique_ptr<ValueAccessor> ordered_probe_accessor(
-//        probe_accessor->createSharedOrderedTupleIdSequenceAdapterVirtual(probe_tids));
-//
-//    bulk_insert_line->emplace_back();
-////    output_destination_->bulkInsertTuples(
-////        { ordered_build_accessor.get(), ordered_probe_accessor.get(), &temp_result },
-////        insert_context.get(),
-////        &output_block);
-//    output_destination_->bulkInsertTuples(
-//        { ordered_build_accessor.get(), ordered_probe_accessor.get(), &temp_result },
-//        insert_context.get());
-//    bulk_insert_line->back().endEvent();
-//    bulk_insert_line->back().setPayload(op_index_ + 0);
-//  }
-//
-////  output_destination_->returnBlock(&output_block);
-//  overall_line->back().endEvent();
-//  overall_line->back().setPayload(op_index_ + 0);
-//}
-
-//  hash_line->emplace_back();
-//  VectorsOfPairsJoinedTuplesCollector collector;
-//  if (join_key_attributes_.size() == 1) {
-//    hash_table_.getAllFromValueAccessor(
-//        probe_accessor.get(),
-//        join_key_attributes_.front(),
-//        any_join_key_attributes_nullable_,
-//        &collector);
-//  } else {
-//    hash_table_.getAllFromValueAccessorCompositeKey(
-//        probe_accessor.get(),
-//        join_key_attributes_,
-//        any_join_key_attributes_nullable_,
-//        &collector);
-//  }
-//  hash_line->back().endEvent();
-//  hash_line->back().setPayload(op_index_ + 0);
-//
-//if (op_index_ == 128) {
-//
-//  const relation_id build_relation_id = build_relation_.getID();
-//  const relation_id probe_relation_id = probe_relation_.getID();
-//
-//  auto *overall_line = container->getEventLine("overall");
-//  auto *bulk_insert_line = container->getEventLine("bulk_insert");
-//  overall_line->emplace_back();
-//  for (std::pair<const block_id, std::vector<std::pair<tuple_id, tuple_id>>>
-//           &build_block_entry : *collector.getJoinedTuples()) {
-//    BlockReference build_block =
-//        storage_manager_->getBlock(build_block_entry.first, build_relation_);
-//    const TupleStorageSubBlock &build_store = build_block->getTupleStorageSubBlock();
-//    std::unique_ptr<ValueAccessor> build_accessor(build_store.createValueAccessor());
-//
-//    // Evaluate '*residual_predicate_', if any.
-//    //
-//    // TODO(chasseur): We might consider implementing true vectorized
-//    // evaluation for join predicates that are not equijoins (although in
-//    // general that would require evaluating and materializing some expressions
-//    // over the cross-product of all tuples in a pair of blocks in order to
-//    // evaluate the predicate). We could use a heuristic where we only do the
-//    // vectorized materialization and evaluation if the set of matches from the
-//    // hash join is below a reasonable threshold so that we don't blow up
-//    // temporary memory requirements to an unreasonable degree.
-//    if (residual_predicate_ != nullptr) {
-//      std::vector<std::pair<tuple_id, tuple_id>> filtered_matches;
-//
-//      for (const std::pair<tuple_id, tuple_id> &hash_match
-//           : build_block_entry.second) {
-//        if (residual_predicate_->matchesForJoinedTuples(*build_accessor,
-//                                                        build_relation_id,
-//                                                        hash_match.first,
-//                                                        *probe_accessor,
-//                                                        probe_relation_id,
-//                                                        hash_match.second)) {
-//          filtered_matches.emplace_back(hash_match);
-//        }
-//      }
-//
-//      build_block_entry.second = std::move(filtered_matches);
-//    }
-//
-//    // TODO(chasseur): If all the output expressions are ScalarAttributes,
-//    // we could implement a similar fast-path to StorageBlock::selectSimple()
-//    // that avoids a copy.
-//    //
-//    // TODO(chasseur): See TODO in NestedLoopsJoinOperator.cpp about limiting
-//    // the size of materialized temporary results. In common usage, this
-//    // probably won't be an issue for hash-joins, but in the worst case a hash
-//    // join can still devolve into a cross-product.
-//    //
-//    // NOTE(chasseur): We could also create one big ColumnVectorsValueAccessor
-//    // and accumulate all the results across multiple block pairs into it
-//    // before inserting anything into output blocks, but this would require
-//    // some significant API extensions to the expressions system for a dubious
-//    // benefit (probably only a real performance win when there are very few
-//    // matching tuples in each individual inner block but very many inner
-//    // blocks with at least one match).
-//    ColumnVectorsValueAccessor temp_result;
-//    for (vector<unique_ptr<const Scalar>>::const_iterator selection_cit = selection_.begin();
-//         selection_cit != selection_.end();
-//         ++selection_cit) {
-//      temp_result.addColumn((*selection_cit)->getAllValuesForJoin(build_relation_id,
-//                                                                  build_accessor.get(),
-//                                                                  probe_relation_id,
-//                                                                  probe_accessor.get(),
-//                                                                  build_block_entry.second));
-//    }
-//
-//    // NOTE(chasseur): calling the bulk-insert method of InsertDestination once
-//    // for each pair of joined blocks incurs some extra overhead that could be
-//    // avoided by keeping checked-out MutableBlockReferences across iterations
-//    // of this loop, but that would get messy when combined with partitioning.
-//    bulk_insert_line->emplace_back();
-//    output_destination_->bulkInsertTuples(&temp_result);
-//    bulk_insert_line->back().endEvent();
-//    bulk_insert_line->back().setPayload(op_index_ + 0);
-//  }
-//  overall_line->back().endEvent();
-//  overall_line->back().setPayload(op_index_ + 0);
-//}
-
-  all_line->back().endEvent();
-  all_line->back().setPayload(op_index_ + 0);
+
+  const relation_id build_relation_id = build_relation_.getID();
+  const relation_id probe_relation_id = probe_relation_.getID();
+
+  if (probe_accessor->getImplementationType() == ValueAccessor::Implementation::kSplitRowStore ||
+      probe_accessor->getImplementationType() == ValueAccessor::Implementation::kPackedRowStore ) {
+    std::map<attribute_id, attribute_id> build_attribute_map;
+    std::map<attribute_id, attribute_id> probe_attribute_map;
+    std::map<attribute_id, attribute_id> non_trivial_attribute_map;
+    std::vector<const Scalar *> non_trivial_expressions;
+    for (std::size_t i = 0; i < selection_.size(); ++i) {
+      const Scalar *scalar = selection_[i].get();
+      if (scalar->getDataSource() == Scalar::ScalarDataSource::kAttribute) {
+        const ScalarAttribute *scalar_attr =
+            static_cast<const ScalarAttribute *>(scalar);
+        const relation_id scalar_attr_relation_id =
+            scalar_attr->getRelationIdForValueAccessor();
+        const attribute_id scalar_attr_id =
+            scalar_attr->getAttributeIdForValueAccessor();
+
+        if (scalar_attr_relation_id == build_relation_id) {
+          build_attribute_map.emplace(scalar_attr_id, i);
+        } else {
+          DCHECK_EQ(probe_relation_id, scalar_attr->getRelationIdForValueAccessor());
+          probe_attribute_map.emplace(scalar_attr_id, i);
+        }
+      } else {
+        non_trivial_attribute_map.emplace(non_trivial_expressions.size(), i);
+        non_trivial_expressions.emplace_back(scalar);
+      }
+    }
+
+    std::unique_ptr<InsertContext> insert_context(
+        new InsertContext(output_destination_->getRelation()));
+    insert_context->addSource(build_attribute_map);
+    insert_context->addSource(probe_attribute_map);
+    insert_context->addSource(non_trivial_attribute_map);
+
+    std::vector<tuple_id> build_tids;
+    std::vector<tuple_id> probe_tids;
+
+    MutableBlockReference output_block;
+    for (std::pair<const block_id, VectorOfPairs>
+             &build_block_entry : *collector.getJoinedTuples()) {
+      BlockReference build_block =
+          storage_manager_->getBlock(build_block_entry.first, build_relation_);
+      const TupleStorageSubBlock &build_store = build_block->getTupleStorageSubBlock();
+      std::unique_ptr<ValueAccessor> build_accessor(build_store.createValueAccessor());
+
+      // Evaluate '*residual_predicate_', if any.
+      //
+      // TODO(chasseur): We might consider implementing true vectorized
+      // evaluation for join predicates that are not equijoins (although in
+      // general that would require evaluating and materializing some expressions
+      // over the cross-product of all tuples in a pair of blocks in order to
+      // evaluate the predicate). We could use a heuristic where we only do the
+      // vectorized materialization and evaluation if the set of matches from the
+      // hash join is below a reasonable threshold so that we don't blow up
+      // temporary memory requirements to an unreasonable degree.
+      if (residual_predicate_ != nullptr) {
+        VectorOfPairs filtered_matches;
+
+        for (const std::pair<tuple_id, tuple_id> &hash_match
+             : build_block_entry.second) {
+          if (residual_predicate_->matchesForJoinedTuples(*build_accessor,
+                                                          build_relation_id,
+                                                          hash_match.first,
+                                                          *probe_accessor,
+                                                          probe_relation_id,
+                                                          hash_match.second)) {
+            filtered_matches.emplace_back(hash_match);
+          }
+        }
+
+        build_block_entry.second = std::move(filtered_matches);
+      }
+
+      ColumnVectorsValueAccessor temp_result;
+      for (auto selection_cit = non_trivial_expressions.begin();
+           selection_cit != non_trivial_expressions.end();
+           ++selection_cit) {
+        temp_result.addColumn((*selection_cit)->getAllValuesForJoin(build_relation_id,
+                                                                    build_accessor.get(),
+                                                                    probe_relation_id,
+                                                                    probe_accessor.get(),
+                                                                    build_block_entry.second));
+      }
+
+      build_tids.clear();
+      probe_tids.clear();
+      for (const auto &hash_match : build_block_entry.second) {
+        build_tids.emplace_back(hash_match.first);
+        probe_tids.emplace_back(hash_match.second);
+      }
+
+      std::unique_ptr<ValueAccessor> ordered_build_accessor(
+          build_accessor->createSharedOrderedTupleIdSequenceAdapterVirtual(build_tids));
+      std::unique_ptr<ValueAccessor> ordered_probe_accessor(
+          probe_accessor->createSharedOrderedTupleIdSequenceAdapterVirtual(probe_tids));
+
+      output_destination_->bulkInsertTuples(
+          { ordered_build_accessor.get(), ordered_probe_accessor.get(), &temp_result },
+          insert_context.get(),
+          &output_block);
+    }
+
+    output_destination_->returnBlock(&output_block);
+  } else {
+    for (std::pair<const block_id, VectorOfPairs>
+             &build_block_entry : *collector.getJoinedTuples()) {
+      BlockReference build_block =
+          storage_manager_->getBlock(build_block_entry.first, build_relation_);
+      const TupleStorageSubBlock &build_store = build_block->getTupleStorageSubBlock();
+      std::unique_ptr<ValueAccessor> build_accessor(build_store.createValueAccessor());
+
+      // Evaluate '*residual_predicate_', if any.
+      //
+      // TODO(chasseur): We might consider implementing true vectorized
+      // evaluation for join predicates that are not equijoins (although in
+      // general that would require evaluating and materializing some expressions
+      // over the cross-product of all tuples in a pair of blocks in order to
+      // evaluate the predicate). We could use a heuristic where we only do the
+      // vectorized materialization and evaluation if the set of matches from the
+      // hash join is below a reasonable threshold so that we don't blow up
+      // temporary memory requirements to an unreasonable degree.
+      if (residual_predicate_ != nullptr) {
+        VectorOfPairs filtered_matches;
+
+        for (const std::pair<tuple_id, tuple_id> &hash_match
+             : build_block_entry.second) {
+          if (residual_predicate_->matchesForJoinedTuples(*build_accessor,
+                                                          build_relation_id,
+                                                          hash_match.first,
+                                                          *probe_accessor,
+                                                          probe_relation_id,
+                                                          hash_match.second)) {
+            filtered_matches.emplace_back(hash_match);
+          }
+        }
+
+        build_block_entry.second = std::move(filtered_matches);
+      }
+
+      ColumnVectorsValueAccessor temp_result;
+      for (auto selection_cit = selection_.begin();
+           selection_cit != selection_.end();
+           ++selection_cit) {
+        temp_result.addColumn((*selection_cit)->getAllValuesForJoin(build_relation_id,
+                                                                    build_accessor.get(),
+                                                                    probe_relation_id,
+                                                                    probe_accessor.get(),
+                                                                    build_block_entry.second));
+      }
+
+      output_destination_->bulkInsertTuples(&temp_result);
+    }
+  }
 }
 
 void HashSemiJoinWorkOrder::execute() {

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/3a74a9fa/storage/StorageBlock.cpp
----------------------------------------------------------------------
diff --git a/storage/StorageBlock.cpp b/storage/StorageBlock.cpp
index d7dffb8..29616a7 100644
--- a/storage/StorageBlock.cpp
+++ b/storage/StorageBlock.cpp
@@ -361,46 +361,66 @@ void StorageBlock::sample(const bool is_block_sample,
 void StorageBlock::select(const vector<unique_ptr<const Scalar>> &selection,
                           const TupleIdSequence *filter,
                           InsertDestinationInterface *destination) const {
-  std::map<attribute_id, attribute_id> base_attribute_map;
-  std::map<attribute_id, attribute_id> non_trivial_attribute_map;
-  std::vector<const Scalar *> non_trivial_expressions;
-  for (std::size_t i = 0; i < selection.size(); ++i) {
-    const Scalar *scalar = selection[i].get();
-    if (scalar->getDataSource() == Scalar::ScalarDataSource::kAttribute) {
-      const ScalarAttribute *scalar_attr =
-          static_cast<const ScalarAttribute *>(scalar);
-      base_attribute_map.emplace(
-          scalar_attr->getAttributeIdForValueAccessor(), i);
-    } else {
-      non_trivial_attribute_map.emplace(non_trivial_expressions.size(), i);
-      non_trivial_expressions.emplace_back(scalar);
-    }
-  }
-
-  std::unique_ptr<InsertContext> insert_context(
-      new InsertContext(destination->getRelation()));
-  insert_context->addSource(base_attribute_map);
-  insert_context->addSource(non_trivial_attribute_map);
+//  std::map<attribute_id, attribute_id> base_attribute_map;
+//  std::map<attribute_id, attribute_id> non_trivial_attribute_map;
+//  std::vector<const Scalar *> non_trivial_expressions;
+//  for (std::size_t i = 0; i < selection.size(); ++i) {
+//    const Scalar *scalar = selection[i].get();
+//    if (scalar->getDataSource() == Scalar::ScalarDataSource::kAttribute) {
+//      const ScalarAttribute *scalar_attr =
+//          static_cast<const ScalarAttribute *>(scalar);
+//      base_attribute_map.emplace(
+//          scalar_attr->getAttributeIdForValueAccessor(), i);
+//    } else {
+//      non_trivial_attribute_map.emplace(non_trivial_expressions.size(), i);
+//      non_trivial_expressions.emplace_back(scalar);
+//    }
+//  }
+//
+//  std::unique_ptr<InsertContext> insert_context(
+//      new InsertContext(destination->getRelation()));
+//  insert_context->addSource(base_attribute_map);
+//  insert_context->addSource(non_trivial_attribute_map);
+//
+//  ColumnVectorsValueAccessor temp_result;
+//  SubBlocksReference sub_blocks_ref(*tuple_store_,
+//                                    indices_,
+//                                    indices_consistent_);
+//
+//  std::unique_ptr<ValueAccessor> accessor(
+//      tuple_store_->createValueAccessor(filter));
+//
+//  for (auto expr_cit = non_trivial_expressions.begin();
+//       expr_cit != non_trivial_expressions.end();
+//       ++expr_cit) {
+//    temp_result.addColumn((*expr_cit)->getAllValues(accessor.get(), &sub_blocks_ref));
+//  }
+//
+//  // Rewind the base accessor.
+//  accessor->beginIterationVirtual();
+//
+//  destination->bulkInsertTuples({ accessor.get(), &temp_result },
+//                                insert_context.get());
 
   ColumnVectorsValueAccessor temp_result;
-  SubBlocksReference sub_blocks_ref(*tuple_store_,
-                                    indices_,
-                                    indices_consistent_);
+  {
+    SubBlocksReference sub_blocks_ref(*tuple_store_,
+                                      indices_,
+                                      indices_consistent_);
 
-  std::unique_ptr<ValueAccessor> accessor(
-      tuple_store_->createValueAccessor(filter));
+    std::unique_ptr<ValueAccessor> accessor(
+        tuple_store_->createValueAccessor(filter));
 
-  for (auto expr_cit = non_trivial_expressions.begin();
-       expr_cit != non_trivial_expressions.end();
-       ++expr_cit) {
-    temp_result.addColumn((*expr_cit)->getAllValues(accessor.get(), &sub_blocks_ref));
+    for (vector<unique_ptr<const Scalar>>::const_iterator selection_cit = selection.begin();
+         selection_cit != selection.end();
+         ++selection_cit) {
+      // TODO(chasseur): Can probably elide some copies for parts of the
+      // selection that are ScalarAttribute or ScalarLiteral.
+      temp_result.addColumn((*selection_cit)->getAllValues(accessor.get(), &sub_blocks_ref));
+    }
   }
 
-  // Rewind the base accessor.
-  accessor->beginIterationVirtual();
-
-  destination->bulkInsertTuples({ accessor.get(), &temp_result },
-                                insert_context.get());
+  destination->bulkInsertTuples(&temp_result);
 }
 
 void StorageBlock::selectSimple(const std::vector<attribute_id> &selection,
@@ -409,17 +429,19 @@ void StorageBlock::selectSimple(const std::vector<attribute_id> &selection,
   std::unique_ptr<ValueAccessor> accessor(
       tuple_store_->createValueAccessor(filter));
 
-  std::map<attribute_id, attribute_id> attribute_map;
-  for (std::size_t i = 0; i < selection.size(); ++i) {
-    attribute_map.emplace(selection[i], i);
-  }
-
-  std::unique_ptr<InsertContext> insert_context(
-      new InsertContext(destination->getRelation()));
-  insert_context->addSource(attribute_map);
+//  std::map<attribute_id, attribute_id> attribute_map;
+//  for (std::size_t i = 0; i < selection.size(); ++i) {
+//    attribute_map.emplace(selection[i], i);
+//  }
+//
+//  std::unique_ptr<InsertContext> insert_context(
+//      new InsertContext(destination->getRelation()));
+//  insert_context->addSource(attribute_map);
+//
+//  const std::vector<ValueAccessor *> accessors = { accessor.get() };
+//  destination->bulkInsertTuples(accessors, insert_context.get());
 
-  const std::vector<ValueAccessor *> accessors = { accessor.get() };
-  destination->bulkInsertTuples(accessors, insert_context.get());
+  destination->bulkInsertTuplesWithRemappedAttributes(selection, accessor.get());
 }
 
 AggregationState* StorageBlock::aggregate(