You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@quickstep.apache.org by ra...@apache.org on 2016/08/21 10:34:36 UTC

[1/7] incubator-quickstep git commit: Modified Aggregation unit test. Ran clang-format.

Repository: incubator-quickstep
Updated Branches:
  refs/heads/quickstep-28-29 5a80e33ef -> dad7d6f3f


http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/storage/HashTableBase.hpp
----------------------------------------------------------------------
diff --git a/storage/HashTableBase.hpp b/storage/HashTableBase.hpp
index 50d9a20..63c7fb1 100644
--- a/storage/HashTableBase.hpp
+++ b/storage/HashTableBase.hpp
@@ -20,8 +20,8 @@
 #include <cstddef>
 #include <vector>
 
-#include "utility/Macros.hpp"
 #include "ValueAccessor.hpp"
+#include "utility/Macros.hpp"
 
 namespace quickstep {
 
@@ -54,11 +54,7 @@ struct HashTablePreallocationState {
  * @brief Codes which indicate the result of a call to put() or
  *        putCompositeKey().
  **/
-enum class HashTablePutResult {
-  kOK = 0,
-  kDuplicateKey,
-  kOutOfSpace
-};
+enum class HashTablePutResult { kOK = 0, kDuplicateKey, kOutOfSpace };
 
 /**
  * @brief An ultra-minimal base class that HashTables with different ValueT
@@ -73,17 +69,19 @@ template <bool resizable,
           bool allow_duplicate_keys>
 class HashTableBase {
  public:
-  virtual ~HashTableBase() {
-  }
+  virtual ~HashTableBase() {}
+
   virtual bool upsertValueAccessorCompositeKeyFast(
       const std::vector<std::vector<attribute_id>> &argument,
       ValueAccessor *accessor,
       const std::vector<attribute_id> &key_attr_ids,
-      const bool check_for_null_keys) {return false;}
- protected:
-  HashTableBase() {
+      const bool check_for_null_keys) {
+    return false;
   }
 
+ protected:
+  HashTableBase() {}
+
  private:
   DISALLOW_COPY_AND_ASSIGN(HashTableBase);
 };


[5/7] incubator-quickstep git commit: Modified Aggregation unit test. Ran clang-format.

Posted by ra...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/expressions/aggregation/tests/AggregationHandleMax_unittest.cpp
----------------------------------------------------------------------
diff --git a/expressions/aggregation/tests/AggregationHandleMax_unittest.cpp b/expressions/aggregation/tests/AggregationHandleMax_unittest.cpp
index fc25e91..e70e784 100644
--- a/expressions/aggregation/tests/AggregationHandleMax_unittest.cpp
+++ b/expressions/aggregation/tests/AggregationHandleMax_unittest.cpp
@@ -29,6 +29,8 @@
 #include "expressions/aggregation/AggregationHandle.hpp"
 #include "expressions/aggregation/AggregationHandleMax.hpp"
 #include "expressions/aggregation/AggregationID.hpp"
+#include "storage/AggregationOperationState.hpp"
+#include "storage/FastHashTableFactory.hpp"
 #include "storage/HashTableBase.hpp"
 #include "storage/StorageManager.hpp"
 #include "types/CharType.hpp"
@@ -68,54 +70,59 @@ class AggregationHandleMaxTest : public ::testing::Test {
   // Helper method that calls AggregationHandleMax::iterateUnaryInl() to
   // aggregate 'value' into '*state'.
   void iterateHandle(AggregationState *state, const TypedValue &value) {
-    static_cast<const AggregationHandleMax&>(*aggregation_handle_max_).iterateUnaryInl(
-        static_cast<AggregationStateMax*>(state),
-        value);
+    static_cast<const AggregationHandleMax &>(*aggregation_handle_max_)
+        .iterateUnaryInl(static_cast<AggregationStateMax *>(state), value);
   }
 
   void initializeHandle(const Type &type) {
     aggregation_handle_max_.reset(
-        AggregateFunctionFactory::Get(AggregationID::kMax).createHandle(
-            std::vector<const Type*>(1, &type)));
+        AggregateFunctionFactory::Get(AggregationID::kMax)
+            .createHandle(std::vector<const Type *>(1, &type)));
     aggregation_handle_max_state_.reset(
         aggregation_handle_max_->createInitialState());
   }
 
   static bool ApplyToTypesTest(TypeID typeID) {
-    const Type &type = (typeID == kChar || typeID == kVarChar) ?
-        TypeFactory::GetType(typeID, static_cast<std::size_t>(10)) :
-        TypeFactory::GetType(typeID);
+    const Type &type =
+        (typeID == kChar || typeID == kVarChar)
+            ? TypeFactory::GetType(typeID, static_cast<std::size_t>(10))
+            : TypeFactory::GetType(typeID);
 
-    return AggregateFunctionFactory::Get(AggregationID::kMax).canApplyToTypes(
-        std::vector<const Type*>(1, &type));
+    return AggregateFunctionFactory::Get(AggregationID::kMax)
+        .canApplyToTypes(std::vector<const Type *>(1, &type));
   }
 
   static bool ResultTypeForArgumentTypeTest(TypeID input_type_id,
                                             TypeID output_type_id) {
-    const Type *result_type
-        = AggregateFunctionFactory::Get(AggregationID::kMax).resultTypeForArgumentTypes(
-            std::vector<const Type*>(1, &TypeFactory::GetType(input_type_id)));
+    const Type *result_type =
+        AggregateFunctionFactory::Get(AggregationID::kMax)
+            .resultTypeForArgumentTypes(std::vector<const Type *>(
+                1, &TypeFactory::GetType(input_type_id)));
     return (result_type->getTypeID() == output_type_id);
   }
 
   template <typename CppType>
-  static void CheckMaxValue(
-      CppType expected,
-      const AggregationHandle &handle,
-      const AggregationState &state) {
+  static void CheckMaxValue(CppType expected,
+                            const AggregationHandle &handle,
+                            const AggregationState &state) {
     EXPECT_EQ(expected, handle.finalize(state).getLiteral<CppType>());
   }
 
-  static void CheckMaxString(
-      const std::string &expected,
-      const AggregationHandle &handle,
-      const AggregationState &state) {
+  template <typename CppType>
+  static void CheckMaxValue(CppType expected, const TypedValue &value) {
+    EXPECT_EQ(expected, value.getLiteral<CppType>());
+  }
+
+  static void CheckMaxString(const std::string &expected,
+                             const AggregationHandle &handle,
+                             const AggregationState &state) {
     TypedValue value = handle.finalize(state);
 
     ASSERT_EQ(expected.length(), value.getAsciiStringLength());
-    EXPECT_EQ(0, std::strncmp(expected.c_str(),
-                              static_cast<const char*>(value.getDataPtr()),
-                              value.getAsciiStringLength()));
+    EXPECT_EQ(0,
+              std::strncmp(expected.c_str(),
+                           static_cast<const char *>(value.getDataPtr()),
+                           value.getAsciiStringLength()));
   }
 
   // Static templated method to initialize data types.
@@ -128,7 +135,9 @@ class AggregationHandleMaxTest : public ::testing::Test {
   void checkAggregationMaxGeneric() {
     const GenericType &type = GenericType::Instance(true);
     initializeHandle(type);
-    EXPECT_TRUE(aggregation_handle_max_->finalize(*aggregation_handle_max_state_).isNull());
+    EXPECT_TRUE(
+        aggregation_handle_max_->finalize(*aggregation_handle_max_state_)
+            .isNull());
 
     typename GenericType::cpptype val;
     typename GenericType::cpptype max;
@@ -140,16 +149,18 @@ class AggregationHandleMaxTest : public ::testing::Test {
         if (type.getTypeID() == kInt || type.getTypeID() == kLong) {
           SetDataType(i * kNumSamples + j - 10, &val);
         } else {
-          SetDataType(static_cast<float>(i * kNumSamples + j - 10)/10, &val);
+          SetDataType(static_cast<float>(i * kNumSamples + j - 10) / 10, &val);
         }
-        iterateHandle(aggregation_handle_max_state_.get(), type.makeValue(&val));
+        iterateHandle(aggregation_handle_max_state_.get(),
+                      type.makeValue(&val));
         if (max < val) {
           max = val;
         }
       }
     }
     iterateHandle(aggregation_handle_max_state_.get(), type.makeNullValue());
-    CheckMaxValue<typename GenericType::cpptype>(max, *aggregation_handle_max_, *aggregation_handle_max_state_);
+    CheckMaxValue<typename GenericType::cpptype>(
+        max, *aggregation_handle_max_, *aggregation_handle_max_state_);
 
     // Test mergeStates().
     std::unique_ptr<AggregationState> merge_state(
@@ -163,7 +174,7 @@ class AggregationHandleMaxTest : public ::testing::Test {
         if (type.getTypeID() == kInt || type.getTypeID() == kLong) {
           SetDataType(i * kNumSamples + j - 20, &val);
         } else {
-          SetDataType(static_cast<float>(i * kNumSamples + j - 20)/10, &val);
+          SetDataType(static_cast<float>(i * kNumSamples + j - 20) / 10, &val);
         }
         iterateHandle(merge_state.get(), type.makeValue(&val));
         if (max < val) {
@@ -174,14 +185,14 @@ class AggregationHandleMaxTest : public ::testing::Test {
     aggregation_handle_max_->mergeStates(*merge_state,
                                          aggregation_handle_max_state_.get());
     CheckMaxValue<typename GenericType::cpptype>(
-        max,
-        *aggregation_handle_max_,
-        *aggregation_handle_max_state_);
+        max, *aggregation_handle_max_, *aggregation_handle_max_state_);
   }
 
   template <typename GenericType>
-  ColumnVector *createColumnVectorGeneric(const Type &type, typename GenericType::cpptype *max) {
-    NativeColumnVector *column = new NativeColumnVector(type, kIterations * kNumSamples + 3);
+  ColumnVector* createColumnVectorGeneric(const Type &type,
+                                          typename GenericType::cpptype *max) {
+    NativeColumnVector *column =
+        new NativeColumnVector(type, kIterations * kNumSamples + 3);
 
     typename GenericType::cpptype val;
     SetDataType(0, max);
@@ -192,7 +203,7 @@ class AggregationHandleMaxTest : public ::testing::Test {
         if (type.getTypeID() == kInt || type.getTypeID() == kLong) {
           SetDataType(i * kNumSamples + j - 10, &val);
         } else {
-          SetDataType(static_cast<float>(i * kNumSamples + j - 10)/10, &val);
+          SetDataType(static_cast<float>(i * kNumSamples + j - 10) / 10, &val);
         }
         column->appendTypedValue(type.makeValue(&val));
         if (*max < val) {
@@ -200,7 +211,7 @@ class AggregationHandleMaxTest : public ::testing::Test {
         }
       }
       // One NULL in the middle.
-      if (i == kIterations/2) {
+      if (i == kIterations / 2) {
         column->appendTypedValue(type.makeNullValue());
       }
     }
@@ -213,11 +224,14 @@ class AggregationHandleMaxTest : public ::testing::Test {
   void checkAggregationMaxGenericColumnVector() {
     const GenericType &type = GenericType::Instance(true);
     initializeHandle(type);
-    EXPECT_TRUE(aggregation_handle_max_->finalize(*aggregation_handle_max_state_).isNull());
+    EXPECT_TRUE(
+        aggregation_handle_max_->finalize(*aggregation_handle_max_state_)
+            .isNull());
 
     typename GenericType::cpptype max;
     std::vector<std::unique_ptr<ColumnVector>> column_vectors;
-    column_vectors.emplace_back(createColumnVectorGeneric<GenericType>(type, &max));
+    column_vectors.emplace_back(
+        createColumnVectorGeneric<GenericType>(type, &max));
 
     std::unique_ptr<AggregationState> cv_state(
         aggregation_handle_max_->accumulateColumnVectors(column_vectors));
@@ -225,15 +239,12 @@ class AggregationHandleMaxTest : public ::testing::Test {
     // Test the state generated directly by accumulateColumnVectors(), and also
     // test after merging back.
     CheckMaxValue<typename GenericType::cpptype>(
-        max,
-        *aggregation_handle_max_,
-        *cv_state);
+        max, *aggregation_handle_max_, *cv_state);
 
-    aggregation_handle_max_->mergeStates(*cv_state, aggregation_handle_max_state_.get());
+    aggregation_handle_max_->mergeStates(*cv_state,
+                                         aggregation_handle_max_state_.get());
     CheckMaxValue<typename GenericType::cpptype>(
-        max,
-        *aggregation_handle_max_,
-        *aggregation_handle_max_state_);
+        max, *aggregation_handle_max_, *aggregation_handle_max_state_);
   }
 
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
@@ -241,29 +252,29 @@ class AggregationHandleMaxTest : public ::testing::Test {
   void checkAggregationMaxGenericValueAccessor() {
     const GenericType &type = GenericType::Instance(true);
     initializeHandle(type);
-    EXPECT_TRUE(aggregation_handle_max_->finalize(*aggregation_handle_max_state_).isNull());
+    EXPECT_TRUE(
+        aggregation_handle_max_->finalize(*aggregation_handle_max_state_)
+            .isNull());
 
-    std::unique_ptr<ColumnVectorsValueAccessor> accessor(new ColumnVectorsValueAccessor());
+    std::unique_ptr<ColumnVectorsValueAccessor> accessor(
+        new ColumnVectorsValueAccessor());
 
     typename GenericType::cpptype max;
     accessor->addColumn(createColumnVectorGeneric<GenericType>(type, &max));
 
     std::unique_ptr<AggregationState> va_state(
-        aggregation_handle_max_->accumulateValueAccessor(accessor.get(),
-                                                         std::vector<attribute_id>(1, 0)));
+        aggregation_handle_max_->accumulateValueAccessor(
+            accessor.get(), std::vector<attribute_id>(1, 0)));
 
     // Test the state generated directly by accumulateValueAccessor(), and also
     // test after merging back.
     CheckMaxValue<typename GenericType::cpptype>(
-        max,
-        *aggregation_handle_max_,
-        *va_state);
+        max, *aggregation_handle_max_, *va_state);
 
-    aggregation_handle_max_->mergeStates(*va_state, aggregation_handle_max_state_.get());
+    aggregation_handle_max_->mergeStates(*va_state,
+                                         aggregation_handle_max_state_.get());
     CheckMaxValue<typename GenericType::cpptype>(
-        max,
-        *aggregation_handle_max_,
-        *aggregation_handle_max_state_);
+        max, *aggregation_handle_max_, *aggregation_handle_max_state_);
   }
 #endif  // QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
 
@@ -271,11 +282,14 @@ class AggregationHandleMaxTest : public ::testing::Test {
   void checkAggregationMaxString() {
     const StringType &type = StringType::Instance(10, true);
     initializeHandle(type);
-    EXPECT_TRUE(aggregation_handle_max_->finalize(*aggregation_handle_max_state_).isNull());
+    EXPECT_TRUE(
+        aggregation_handle_max_->finalize(*aggregation_handle_max_state_)
+            .isNull());
 
     std::unique_ptr<UncheckedComparator> fast_comparator_;
-    fast_comparator_.reset(ComparisonFactory::GetComparison(ComparisonID::kGreater)
-                           .makeUncheckedComparatorForTypes(type, type));
+    fast_comparator_.reset(
+        ComparisonFactory::GetComparison(ComparisonID::kGreater)
+            .makeUncheckedComparatorForTypes(type, type));
     std::string string_literal;
     std::string max = "";
     int val;
@@ -289,15 +303,17 @@ class AggregationHandleMaxTest : public ::testing::Test {
 
         iterateHandle(
             aggregation_handle_max_state_.get(),
-            type.makeValue(string_literal.c_str(),
-                           string_literal.length() + 1).ensureNotReference());
-        if (fast_comparator_->compareDataPtrs(string_literal.c_str(), max.c_str())) {
+            type.makeValue(string_literal.c_str(), string_literal.length() + 1)
+                .ensureNotReference());
+        if (fast_comparator_->compareDataPtrs(string_literal.c_str(),
+                                              max.c_str())) {
           max = string_literal;
         }
       }
     }
     iterateHandle(aggregation_handle_max_state_.get(), type.makeNullValue());
-    CheckMaxString(max, *aggregation_handle_max_, *aggregation_handle_max_state_);
+    CheckMaxString(
+        max, *aggregation_handle_max_, *aggregation_handle_max_state_);
 
     // Test mergeStates().
     std::unique_ptr<AggregationState> merge_state(
@@ -315,24 +331,28 @@ class AggregationHandleMaxTest : public ::testing::Test {
 
         iterateHandle(
             merge_state.get(),
-            type.makeValue(string_literal.c_str(),
-                           string_literal.length() + 1).ensureNotReference());
-        if (fast_comparator_->compareDataPtrs(string_literal.c_str(), max.c_str())) {
+            type.makeValue(string_literal.c_str(), string_literal.length() + 1)
+                .ensureNotReference());
+        if (fast_comparator_->compareDataPtrs(string_literal.c_str(),
+                                              max.c_str())) {
           max = string_literal;
         }
       }
     }
     aggregation_handle_max_->mergeStates(*merge_state,
                                          aggregation_handle_max_state_.get());
-    CheckMaxString(max, *aggregation_handle_max_, *aggregation_handle_max_state_);
+    CheckMaxString(
+        max, *aggregation_handle_max_, *aggregation_handle_max_state_);
   }
 
   template <typename ColumnVectorType>
-  ColumnVector *createColumnVectorString(const Type &type, std::string *max) {
-    ColumnVectorType *column = new ColumnVectorType(type, kIterations * kNumSamples + 3);
+  ColumnVector* createColumnVectorString(const Type &type, std::string *max) {
+    ColumnVectorType *column =
+        new ColumnVectorType(type, kIterations * kNumSamples + 3);
     std::unique_ptr<UncheckedComparator> fast_comparator_;
-    fast_comparator_.reset(ComparisonFactory::GetComparison(ComparisonID::kGreater)
-                           .makeUncheckedComparatorForTypes(type, type));
+    fast_comparator_.reset(
+        ComparisonFactory::GetComparison(ComparisonID::kGreater)
+            .makeUncheckedComparatorForTypes(type, type));
     std::string string_literal;
     *max = "";
     int val;
@@ -344,14 +364,16 @@ class AggregationHandleMaxTest : public ::testing::Test {
         oss << "max" << val;
         string_literal = oss.str();
 
-        column->appendTypedValue(type.makeValue(string_literal.c_str(), string_literal.length() + 1)
-            .ensureNotReference());
-        if (fast_comparator_->compareDataPtrs(string_literal.c_str(), max->c_str())) {
+        column->appendTypedValue(
+            type.makeValue(string_literal.c_str(), string_literal.length() + 1)
+                .ensureNotReference());
+        if (fast_comparator_->compareDataPtrs(string_literal.c_str(),
+                                              max->c_str())) {
           *max = string_literal;
         }
       }
       // One NULL in the middle.
-      if (i == kIterations/2) {
+      if (i == kIterations / 2) {
         column->appendTypedValue(type.makeNullValue());
       }
     }
@@ -364,25 +386,26 @@ class AggregationHandleMaxTest : public ::testing::Test {
   void checkAggregationMaxStringColumnVector() {
     const StringType &type = StringType::Instance(10, true);
     initializeHandle(type);
-    EXPECT_TRUE(aggregation_handle_max_->finalize(*aggregation_handle_max_state_).isNull());
+    EXPECT_TRUE(
+        aggregation_handle_max_->finalize(*aggregation_handle_max_state_)
+            .isNull());
 
     std::string max;
     std::vector<std::unique_ptr<ColumnVector>> column_vectors;
-    column_vectors.emplace_back(createColumnVectorString<ColumnVectorType>(type, &max));
+    column_vectors.emplace_back(
+        createColumnVectorString<ColumnVectorType>(type, &max));
 
     std::unique_ptr<AggregationState> cv_state(
         aggregation_handle_max_->accumulateColumnVectors(column_vectors));
 
     // Test the state generated directly by accumulateColumnVectors(), and also
     // test after merging back.
-    CheckMaxString(max,
-                   *aggregation_handle_max_,
-                   *cv_state);
-
-    aggregation_handle_max_->mergeStates(*cv_state, aggregation_handle_max_state_.get());
-    CheckMaxString(max,
-                   *aggregation_handle_max_,
-                   *aggregation_handle_max_state_);
+    CheckMaxString(max, *aggregation_handle_max_, *cv_state);
+
+    aggregation_handle_max_->mergeStates(*cv_state,
+                                         aggregation_handle_max_state_.get());
+    CheckMaxString(
+        max, *aggregation_handle_max_, *aggregation_handle_max_state_);
   }
 
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
@@ -390,26 +413,27 @@ class AggregationHandleMaxTest : public ::testing::Test {
   void checkAggregationMaxStringValueAccessor() {
     const StringType &type = StringType::Instance(10, true);
     initializeHandle(type);
-    EXPECT_TRUE(aggregation_handle_max_->finalize(*aggregation_handle_max_state_).isNull());
+    EXPECT_TRUE(
+        aggregation_handle_max_->finalize(*aggregation_handle_max_state_)
+            .isNull());
 
     std::string max;
-    std::unique_ptr<ColumnVectorsValueAccessor> accessor(new ColumnVectorsValueAccessor());
+    std::unique_ptr<ColumnVectorsValueAccessor> accessor(
+        new ColumnVectorsValueAccessor());
     accessor->addColumn(createColumnVectorString<ColumnVectorType>(type, &max));
 
     std::unique_ptr<AggregationState> va_state(
-        aggregation_handle_max_->accumulateValueAccessor(accessor.get(),
-                                                         std::vector<attribute_id>(1, 0)));
+        aggregation_handle_max_->accumulateValueAccessor(
+            accessor.get(), std::vector<attribute_id>(1, 0)));
 
     // Test the state generated directly by accumulateValueAccessor(), and also
     // test after merging back.
-    CheckMaxString(max,
-                   *aggregation_handle_max_,
-                   *va_state);
-
-    aggregation_handle_max_->mergeStates(*va_state, aggregation_handle_max_state_.get());
-    CheckMaxString(max,
-                   *aggregation_handle_max_,
-                   *aggregation_handle_max_state_);
+    CheckMaxString(max, *aggregation_handle_max_, *va_state);
+
+    aggregation_handle_max_->mergeStates(*va_state,
+                                         aggregation_handle_max_state_.get());
+    CheckMaxString(
+        max, *aggregation_handle_max_, *aggregation_handle_max_state_);
   }
 #endif  // QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
 
@@ -420,9 +444,7 @@ class AggregationHandleMaxTest : public ::testing::Test {
 
 template <>
 void AggregationHandleMaxTest::CheckMaxValue<float>(
-    float val,
-    const AggregationHandle &handle,
-    const AggregationState &state) {
+    float val, const AggregationHandle &handle, const AggregationState &state) {
   EXPECT_FLOAT_EQ(val, handle.finalize(state).getLiteral<float>());
 }
 
@@ -435,17 +457,20 @@ void AggregationHandleMaxTest::CheckMaxValue<double>(
 }
 
 template <>
-void AggregationHandleMaxTest::SetDataType<DatetimeLit>(int value, DatetimeLit *data) {
+void AggregationHandleMaxTest::SetDataType<DatetimeLit>(int value,
+                                                        DatetimeLit *data) {
   data->ticks = value;
 }
 
 template <>
-void AggregationHandleMaxTest::SetDataType<DatetimeIntervalLit>(int value, DatetimeIntervalLit *data) {
+void AggregationHandleMaxTest::SetDataType<DatetimeIntervalLit>(
+    int value, DatetimeIntervalLit *data) {
   data->interval_ticks = value;
 }
 
 template <>
-void AggregationHandleMaxTest::SetDataType<YearMonthIntervalLit>(int value, YearMonthIntervalLit *data) {
+void AggregationHandleMaxTest::SetDataType<YearMonthIntervalLit>(
+    int value, YearMonthIntervalLit *data) {
   data->months = value;
 }
 
@@ -577,50 +602,67 @@ TEST_F(AggregationHandleMaxDeathTest, WrongTypeTest) {
   float float_val = 0;
 
   // Passes.
-  iterateHandle(aggregation_handle_max_state_.get(), int_non_null_type.makeValue(&int_val));
+  iterateHandle(aggregation_handle_max_state_.get(),
+                int_non_null_type.makeValue(&int_val));
 
-  EXPECT_DEATH(iterateHandle(aggregation_handle_max_state_.get(), long_type.makeValue(&long_val)), "");
-  EXPECT_DEATH(iterateHandle(aggregation_handle_max_state_.get(), double_type.makeValue(&double_val)), "");
-  EXPECT_DEATH(iterateHandle(aggregation_handle_max_state_.get(), float_type.makeValue(&float_val)), "");
-  EXPECT_DEATH(iterateHandle(aggregation_handle_max_state_.get(), char_type.makeValue("asdf", 5)), "");
-  EXPECT_DEATH(iterateHandle(aggregation_handle_max_state_.get(), varchar_type.makeValue("asdf", 5)), "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_max_state_.get(),
+                             long_type.makeValue(&long_val)),
+               "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_max_state_.get(),
+                             double_type.makeValue(&double_val)),
+               "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_max_state_.get(),
+                             float_type.makeValue(&float_val)),
+               "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_max_state_.get(),
+                             char_type.makeValue("asdf", 5)),
+               "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_max_state_.get(),
+                             varchar_type.makeValue("asdf", 5)),
+               "");
 
   // Test mergeStates() with incorrectly typed handles.
   std::unique_ptr<AggregationHandle> aggregation_handle_max_long(
-      AggregateFunctionFactory::Get(AggregationID::kMax).createHandle(
-          std::vector<const Type*>(1, &long_type)));
+      AggregateFunctionFactory::Get(AggregationID::kMax)
+          .createHandle(std::vector<const Type *>(1, &long_type)));
   std::unique_ptr<AggregationState> aggregation_state_max_merge_long(
       aggregation_handle_max_long->createInitialState());
-  static_cast<const AggregationHandleMax&>(*aggregation_handle_max_long).iterateUnaryInl(
-      static_cast<AggregationStateMax*>(aggregation_state_max_merge_long.get()),
-      long_type.makeValue(&long_val));
-  EXPECT_DEATH(aggregation_handle_max_->mergeStates(*aggregation_state_max_merge_long,
-                                                    aggregation_handle_max_state_.get()),
-               "");
+  static_cast<const AggregationHandleMax &>(*aggregation_handle_max_long)
+      .iterateUnaryInl(static_cast<AggregationStateMax *>(
+                           aggregation_state_max_merge_long.get()),
+                       long_type.makeValue(&long_val));
+  EXPECT_DEATH(
+      aggregation_handle_max_->mergeStates(*aggregation_state_max_merge_long,
+                                           aggregation_handle_max_state_.get()),
+      "");
 
   std::unique_ptr<AggregationHandle> aggregation_handle_max_double(
-      AggregateFunctionFactory::Get(AggregationID::kMax).createHandle(
-          std::vector<const Type*>(1, &double_type)));
+      AggregateFunctionFactory::Get(AggregationID::kMax)
+          .createHandle(std::vector<const Type *>(1, &double_type)));
   std::unique_ptr<AggregationState> aggregation_state_max_merge_double(
       aggregation_handle_max_double->createInitialState());
-  static_cast<const AggregationHandleMax&>(*aggregation_handle_max_double).iterateUnaryInl(
-      static_cast<AggregationStateMax*>(aggregation_state_max_merge_double.get()),
-      double_type.makeValue(&double_val));
-  EXPECT_DEATH(aggregation_handle_max_->mergeStates(*aggregation_state_max_merge_double,
-                                                    aggregation_handle_max_state_.get()),
-               "");
+  static_cast<const AggregationHandleMax &>(*aggregation_handle_max_double)
+      .iterateUnaryInl(static_cast<AggregationStateMax *>(
+                           aggregation_state_max_merge_double.get()),
+                       double_type.makeValue(&double_val));
+  EXPECT_DEATH(
+      aggregation_handle_max_->mergeStates(*aggregation_state_max_merge_double,
+                                           aggregation_handle_max_state_.get()),
+      "");
 
   std::unique_ptr<AggregationHandle> aggregation_handle_max_float(
-      AggregateFunctionFactory::Get(AggregationID::kMax).createHandle(
-          std::vector<const Type*>(1, &float_type)));
+      AggregateFunctionFactory::Get(AggregationID::kMax)
+          .createHandle(std::vector<const Type *>(1, &float_type)));
   std::unique_ptr<AggregationState> aggregation_state_max_merge_float(
       aggregation_handle_max_float->createInitialState());
-  static_cast<const AggregationHandleMax&>(*aggregation_handle_max_float).iterateUnaryInl(
-      static_cast<AggregationStateMax*>(aggregation_state_max_merge_float.get()),
-      float_type.makeValue(&float_val));
-  EXPECT_DEATH(aggregation_handle_max_->mergeStates(*aggregation_state_max_merge_float,
-                                                    aggregation_handle_max_state_.get()),
-               "");
+  static_cast<const AggregationHandleMax &>(*aggregation_handle_max_float)
+      .iterateUnaryInl(static_cast<AggregationStateMax *>(
+                           aggregation_state_max_merge_float.get()),
+                       float_type.makeValue(&float_val));
+  EXPECT_DEATH(
+      aggregation_handle_max_->mergeStates(*aggregation_state_max_merge_float,
+                                           aggregation_handle_max_state_.get()),
+      "");
 }
 #endif
 
@@ -645,25 +687,28 @@ TEST_F(AggregationHandleMaxTest, GroupByTableMergeTest) {
   initializeHandle(int_non_null_type);
   storage_manager_.reset(new StorageManager("./test_max_data"));
   std::unique_ptr<AggregationStateHashTableBase> source_hash_table(
-      aggregation_handle_max_->createGroupByHashTable(
-          HashTableImplType::kSimpleScalarSeparateChaining,
+      AggregationStateFastHashTableFactory::CreateResizable(
+          HashTableImplType::kSeparateChaining,
           std::vector<const Type *>(1, &int_non_null_type),
           10,
+          {aggregation_handle_max_.get()->getPayloadSize()},
+          {aggregation_handle_max_.get()},
           storage_manager_.get()));
   std::unique_ptr<AggregationStateHashTableBase> destination_hash_table(
-      aggregation_handle_max_->createGroupByHashTable(
-          HashTableImplType::kSimpleScalarSeparateChaining,
+      AggregationStateFastHashTableFactory::CreateResizable(
+          HashTableImplType::kSeparateChaining,
           std::vector<const Type *>(1, &int_non_null_type),
           10,
+          {aggregation_handle_max_.get()->getPayloadSize()},
+          {aggregation_handle_max_.get()},
           storage_manager_.get()));
 
-  AggregationStateHashTable<AggregationStateMax> *destination_hash_table_derived =
-      static_cast<AggregationStateHashTable<AggregationStateMax> *>(
+  AggregationStateFastHashTable *destination_hash_table_derived =
+      static_cast<AggregationStateFastHashTable *>(
           destination_hash_table.get());
 
-  AggregationStateHashTable<AggregationStateMax> *source_hash_table_derived =
-      static_cast<AggregationStateHashTable<AggregationStateMax> *>(
-          source_hash_table.get());
+  AggregationStateFastHashTable *source_hash_table_derived =
+      static_cast<AggregationStateFastHashTable *>(source_hash_table.get());
 
   AggregationHandleMax *aggregation_handle_max_derived =
       static_cast<AggregationHandleMax *>(aggregation_handle_max_.get());
@@ -728,35 +773,52 @@ TEST_F(AggregationHandleMaxTest, GroupByTableMergeTest) {
   EXPECT_EQ(exclusive_key_source_max_val.getLiteral<int>(), actual_val);
 
   // Add the key-state pairs to the hash tables.
-  source_hash_table_derived->putCompositeKey(common_key,
-                                             *common_key_source_state);
-  destination_hash_table_derived->putCompositeKey(
-      common_key, *common_key_destination_state);
-  source_hash_table_derived->putCompositeKey(exclusive_source_key,
-                                             *exclusive_key_source_state);
-  destination_hash_table_derived->putCompositeKey(
-      exclusive_destination_key, *exclusive_key_destination_state);
+  unsigned char buffer[100];
+  buffer[0] = '\0';
+  memcpy(buffer + 1,
+         common_key_source_state.get()->getPayloadAddress(),
+         aggregation_handle_max_.get()->getPayloadSize());
+  source_hash_table_derived->putCompositeKeyFast(common_key, buffer);
+
+  memcpy(buffer + 1,
+         common_key_destination_state.get()->getPayloadAddress(),
+         aggregation_handle_max_.get()->getPayloadSize());
+  destination_hash_table_derived->putCompositeKeyFast(common_key, buffer);
+
+  memcpy(buffer + 1,
+         exclusive_key_source_state.get()->getPayloadAddress(),
+         aggregation_handle_max_.get()->getPayloadSize());
+  source_hash_table_derived->putCompositeKeyFast(exclusive_source_key, buffer);
+
+  memcpy(buffer + 1,
+         exclusive_key_destination_state.get()->getPayloadAddress(),
+         aggregation_handle_max_.get()->getPayloadSize());
+  destination_hash_table_derived->putCompositeKeyFast(exclusive_destination_key,
+                                                      buffer);
 
   EXPECT_EQ(2u, destination_hash_table_derived->numEntries());
   EXPECT_EQ(2u, source_hash_table_derived->numEntries());
 
-  aggregation_handle_max_->mergeGroupByHashTables(*source_hash_table,
-                                                  destination_hash_table.get());
+  AggregationOperationState::mergeGroupByHashTables(
+      source_hash_table.get(), destination_hash_table.get());
 
   EXPECT_EQ(3u, destination_hash_table_derived->numEntries());
 
   CheckMaxValue<int>(
       common_key_destination_max_val.getLiteral<int>(),
-      *aggregation_handle_max_derived,
-      *(destination_hash_table_derived->getSingleCompositeKey(common_key)));
+      aggregation_handle_max_derived->finalizeHashTableEntryFast(
+          destination_hash_table_derived->getSingleCompositeKey(common_key) +
+          1));
   CheckMaxValue<int>(exclusive_key_destination_max_val.getLiteral<int>(),
-                     *aggregation_handle_max_derived,
-                     *(destination_hash_table_derived->getSingleCompositeKey(
-                         exclusive_destination_key)));
+                     aggregation_handle_max_derived->finalizeHashTableEntryFast(
+                         destination_hash_table_derived->getSingleCompositeKey(
+                             exclusive_destination_key) +
+                         1));
   CheckMaxValue<int>(exclusive_key_source_max_val.getLiteral<int>(),
-                     *aggregation_handle_max_derived,
-                     *(source_hash_table_derived->getSingleCompositeKey(
-                         exclusive_source_key)));
+                     aggregation_handle_max_derived->finalizeHashTableEntryFast(
+                         source_hash_table_derived->getSingleCompositeKey(
+                             exclusive_source_key) +
+                         1));
 }
 
 }  // namespace quickstep

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/expressions/aggregation/tests/AggregationHandleMin_unittest.cpp
----------------------------------------------------------------------
diff --git a/expressions/aggregation/tests/AggregationHandleMin_unittest.cpp b/expressions/aggregation/tests/AggregationHandleMin_unittest.cpp
index a87ace9..3c743f7 100644
--- a/expressions/aggregation/tests/AggregationHandleMin_unittest.cpp
+++ b/expressions/aggregation/tests/AggregationHandleMin_unittest.cpp
@@ -29,6 +29,8 @@
 #include "expressions/aggregation/AggregationHandle.hpp"
 #include "expressions/aggregation/AggregationHandleMin.hpp"
 #include "expressions/aggregation/AggregationID.hpp"
+#include "storage/AggregationOperationState.hpp"
+#include "storage/FastHashTableFactory.hpp"
 #include "storage/StorageManager.hpp"
 #include "types/CharType.hpp"
 #include "types/DatetimeIntervalType.hpp"
@@ -67,54 +69,59 @@ class AggregationHandleMinTest : public ::testing::Test {
   // Helper method that calls AggregationHandleMin::iterateUnaryInl() to
   // aggregate 'value' into '*state'.
   void iterateHandle(AggregationState *state, const TypedValue &value) {
-    static_cast<const AggregationHandleMin&>(*aggregation_handle_min_).iterateUnaryInl(
-        static_cast<AggregationStateMin*>(state),
-        value);
+    static_cast<const AggregationHandleMin &>(*aggregation_handle_min_)
+        .iterateUnaryInl(static_cast<AggregationStateMin *>(state), value);
   }
 
   void initializeHandle(const Type &type) {
     aggregation_handle_min_.reset(
-        AggregateFunctionFactory::Get(AggregationID::kMin).createHandle(
-            std::vector<const Type*>(1, &type)));
+        AggregateFunctionFactory::Get(AggregationID::kMin)
+            .createHandle(std::vector<const Type *>(1, &type)));
     aggregation_handle_min_state_.reset(
         aggregation_handle_min_->createInitialState());
   }
 
   static bool ApplyToTypesTest(TypeID typeID) {
-    const Type &type = (typeID == kChar || typeID == kVarChar) ?
-        TypeFactory::GetType(typeID, static_cast<std::size_t>(10)) :
-        TypeFactory::GetType(typeID);
+    const Type &type =
+        (typeID == kChar || typeID == kVarChar)
+            ? TypeFactory::GetType(typeID, static_cast<std::size_t>(10))
+            : TypeFactory::GetType(typeID);
 
-    return AggregateFunctionFactory::Get(AggregationID::kMin).canApplyToTypes(
-        std::vector<const Type*>(1, &type));
+    return AggregateFunctionFactory::Get(AggregationID::kMin)
+        .canApplyToTypes(std::vector<const Type *>(1, &type));
   }
 
   static bool ResultTypeForArgumentTypeTest(TypeID input_type_id,
                                             TypeID output_type_id) {
-    const Type *result_type
-        = AggregateFunctionFactory::Get(AggregationID::kMin).resultTypeForArgumentTypes(
-            std::vector<const Type*>(1, &TypeFactory::GetType(input_type_id)));
+    const Type *result_type =
+        AggregateFunctionFactory::Get(AggregationID::kMin)
+            .resultTypeForArgumentTypes(std::vector<const Type *>(
+                1, &TypeFactory::GetType(input_type_id)));
     return (result_type->getTypeID() == output_type_id);
   }
 
   template <typename CppType>
-  static void CheckMinValue(
-      CppType expected,
-      const AggregationHandle &handle,
-      const AggregationState &state) {
+  static void CheckMinValue(CppType expected,
+                            const AggregationHandle &handle,
+                            const AggregationState &state) {
     EXPECT_EQ(expected, handle.finalize(state).getLiteral<CppType>());
   }
 
-  static void CheckMinString(
-      const std::string &expected,
-      const AggregationHandle &handle,
-      const AggregationState &state) {
+  template <typename CppType>
+  static void CheckMinValue(CppType expected, const TypedValue &value) {
+    EXPECT_EQ(expected, value.getLiteral<CppType>());
+  }
+
+  static void CheckMinString(const std::string &expected,
+                             const AggregationHandle &handle,
+                             const AggregationState &state) {
     TypedValue value = handle.finalize(state);
 
     ASSERT_EQ(expected.length(), value.getAsciiStringLength());
-    EXPECT_EQ(0, std::strncmp(expected.c_str(),
-                              static_cast <const char *>(value.getDataPtr()),
-                              value.getAsciiStringLength()));
+    EXPECT_EQ(0,
+              std::strncmp(expected.c_str(),
+                           static_cast<const char *>(value.getDataPtr()),
+                           value.getAsciiStringLength()));
   }
 
   // Static templated method to initialize data types.
@@ -127,7 +134,9 @@ class AggregationHandleMinTest : public ::testing::Test {
   void checkAggregationMinGeneric() {
     const GenericType &type = GenericType::Instance(true);
     initializeHandle(type);
-    EXPECT_TRUE(aggregation_handle_min_->finalize(*aggregation_handle_min_state_).isNull());
+    EXPECT_TRUE(
+        aggregation_handle_min_->finalize(*aggregation_handle_min_state_)
+            .isNull());
 
     typename GenericType::cpptype val;
     typename GenericType::cpptype min;
@@ -139,16 +148,18 @@ class AggregationHandleMinTest : public ::testing::Test {
         if (type.getTypeID() == kInt || type.getTypeID() == kLong) {
           SetDataType(i * kNumSamples + j - 10, &val);
         } else {
-          SetDataType(static_cast<float>(i * kNumSamples + j - 10)/10, &val);
+          SetDataType(static_cast<float>(i * kNumSamples + j - 10) / 10, &val);
         }
-        iterateHandle(aggregation_handle_min_state_.get(), type.makeValue(&val));
+        iterateHandle(aggregation_handle_min_state_.get(),
+                      type.makeValue(&val));
         if (min > val) {
           min = val;
         }
       }
     }
     iterateHandle(aggregation_handle_min_state_.get(), type.makeNullValue());
-    CheckMinValue<typename GenericType::cpptype>(min, *aggregation_handle_min_, *aggregation_handle_min_state_);
+    CheckMinValue<typename GenericType::cpptype>(
+        min, *aggregation_handle_min_, *aggregation_handle_min_state_);
 
     // Test mergeStates().
     std::unique_ptr<AggregationState> merge_state(
@@ -162,7 +173,7 @@ class AggregationHandleMinTest : public ::testing::Test {
         if (type.getTypeID() == kInt || type.getTypeID() == kLong) {
           SetDataType(i * kNumSamples + j - 20, &val);
         } else {
-          SetDataType(static_cast<float>(i * kNumSamples + j - 20)/10, &val);
+          SetDataType(static_cast<float>(i * kNumSamples + j - 20) / 10, &val);
         }
         iterateHandle(merge_state.get(), type.makeValue(&val));
         if (min > val) {
@@ -173,14 +184,14 @@ class AggregationHandleMinTest : public ::testing::Test {
     aggregation_handle_min_->mergeStates(*merge_state,
                                          aggregation_handle_min_state_.get());
     CheckMinValue<typename GenericType::cpptype>(
-        min,
-        *aggregation_handle_min_,
-        *aggregation_handle_min_state_);
+        min, *aggregation_handle_min_, *aggregation_handle_min_state_);
   }
 
   template <typename GenericType>
-  ColumnVector *createColumnVectorGeneric(const Type &type, typename GenericType::cpptype *min) {
-    NativeColumnVector *column = new NativeColumnVector(type, kIterations * kNumSamples + 3);
+  ColumnVector* createColumnVectorGeneric(const Type &type,
+                                          typename GenericType::cpptype *min) {
+    NativeColumnVector *column =
+        new NativeColumnVector(type, kIterations * kNumSamples + 3);
 
     typename GenericType::cpptype val;
     SetDataType(1000, min);
@@ -191,7 +202,7 @@ class AggregationHandleMinTest : public ::testing::Test {
         if (type.getTypeID() == kInt || type.getTypeID() == kLong) {
           SetDataType(i * kNumSamples + j - 10, &val);
         } else {
-          SetDataType(static_cast<float>(i * kNumSamples + j - 10)/10, &val);
+          SetDataType(static_cast<float>(i * kNumSamples + j - 10) / 10, &val);
         }
         column->appendTypedValue(type.makeValue(&val));
         if (*min > val) {
@@ -199,7 +210,7 @@ class AggregationHandleMinTest : public ::testing::Test {
         }
       }
       // One NULL in the middle.
-      if (i == kIterations/2) {
+      if (i == kIterations / 2) {
         column->appendTypedValue(type.makeNullValue());
       }
     }
@@ -212,11 +223,14 @@ class AggregationHandleMinTest : public ::testing::Test {
   void checkAggregationMinGenericColumnVector() {
     const GenericType &type = GenericType::Instance(true);
     initializeHandle(type);
-    EXPECT_TRUE(aggregation_handle_min_->finalize(*aggregation_handle_min_state_).isNull());
+    EXPECT_TRUE(
+        aggregation_handle_min_->finalize(*aggregation_handle_min_state_)
+            .isNull());
 
     typename GenericType::cpptype min;
     std::vector<std::unique_ptr<ColumnVector>> column_vectors;
-    column_vectors.emplace_back(createColumnVectorGeneric<GenericType>(type, &min));
+    column_vectors.emplace_back(
+        createColumnVectorGeneric<GenericType>(type, &min));
 
     std::unique_ptr<AggregationState> cv_state(
         aggregation_handle_min_->accumulateColumnVectors(column_vectors));
@@ -224,15 +238,12 @@ class AggregationHandleMinTest : public ::testing::Test {
     // Test the state generated directly by accumulateColumnVectors(), and also
     // test after merging back.
     CheckMinValue<typename GenericType::cpptype>(
-        min,
-        *aggregation_handle_min_,
-        *cv_state);
+        min, *aggregation_handle_min_, *cv_state);
 
-    aggregation_handle_min_->mergeStates(*cv_state, aggregation_handle_min_state_.get());
+    aggregation_handle_min_->mergeStates(*cv_state,
+                                         aggregation_handle_min_state_.get());
     CheckMinValue<typename GenericType::cpptype>(
-        min,
-        *aggregation_handle_min_,
-        *aggregation_handle_min_state_);
+        min, *aggregation_handle_min_, *aggregation_handle_min_state_);
   }
 
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
@@ -240,29 +251,29 @@ class AggregationHandleMinTest : public ::testing::Test {
   void checkAggregationMinGenericValueAccessor() {
     const GenericType &type = GenericType::Instance(true);
     initializeHandle(type);
-    EXPECT_TRUE(aggregation_handle_min_->finalize(*aggregation_handle_min_state_).isNull());
+    EXPECT_TRUE(
+        aggregation_handle_min_->finalize(*aggregation_handle_min_state_)
+            .isNull());
 
-    std::unique_ptr<ColumnVectorsValueAccessor> accessor(new ColumnVectorsValueAccessor());
+    std::unique_ptr<ColumnVectorsValueAccessor> accessor(
+        new ColumnVectorsValueAccessor());
 
     typename GenericType::cpptype min;
     accessor->addColumn(createColumnVectorGeneric<GenericType>(type, &min));
 
     std::unique_ptr<AggregationState> va_state(
-        aggregation_handle_min_->accumulateValueAccessor(accessor.get(),
-                                                         std::vector<attribute_id>(1, 0)));
+        aggregation_handle_min_->accumulateValueAccessor(
+            accessor.get(), std::vector<attribute_id>(1, 0)));
 
     // Test the state generated directly by accumulateValueAccessor(), and also
     // test after merging back.
     CheckMinValue<typename GenericType::cpptype>(
-        min,
-        *aggregation_handle_min_,
-        *va_state);
+        min, *aggregation_handle_min_, *va_state);
 
-    aggregation_handle_min_->mergeStates(*va_state, aggregation_handle_min_state_.get());
+    aggregation_handle_min_->mergeStates(*va_state,
+                                         aggregation_handle_min_state_.get());
     CheckMinValue<typename GenericType::cpptype>(
-        min,
-        *aggregation_handle_min_,
-        *aggregation_handle_min_state_);
+        min, *aggregation_handle_min_, *aggregation_handle_min_state_);
   }
 #endif  // QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
 
@@ -270,11 +281,13 @@ class AggregationHandleMinTest : public ::testing::Test {
   void checkAggregationMinString() {
     const StringType &type = StringType::Instance(10, true);
     initializeHandle(type);
-    EXPECT_TRUE(aggregation_handle_min_->finalize(*aggregation_handle_min_state_).isNull());
+    EXPECT_TRUE(
+        aggregation_handle_min_->finalize(*aggregation_handle_min_state_)
+            .isNull());
 
     std::unique_ptr<UncheckedComparator> fast_comparator_;
     fast_comparator_.reset(ComparisonFactory::GetComparison(ComparisonID::kLess)
-                           .makeUncheckedComparatorForTypes(type, type));
+                               .makeUncheckedComparatorForTypes(type, type));
     std::string string_literal;
     std::string min = "z";
     int val;
@@ -286,15 +299,19 @@ class AggregationHandleMinTest : public ::testing::Test {
         oss << "test" << val;
         string_literal = oss.str();
 
-        iterateHandle(aggregation_handle_min_state_.get(), type.makeValue(string_literal.c_str(),
-                                                        string_literal.length() + 1).ensureNotReference());
-        if (fast_comparator_->compareDataPtrs(string_literal.c_str(), min.c_str())) {
+        iterateHandle(
+            aggregation_handle_min_state_.get(),
+            type.makeValue(string_literal.c_str(), string_literal.length() + 1)
+                .ensureNotReference());
+        if (fast_comparator_->compareDataPtrs(string_literal.c_str(),
+                                              min.c_str())) {
           min = string_literal;
         }
       }
     }
     iterateHandle(aggregation_handle_min_state_.get(), type.makeNullValue());
-    CheckMinString(min, *aggregation_handle_min_, *aggregation_handle_min_state_);
+    CheckMinString(
+        min, *aggregation_handle_min_, *aggregation_handle_min_state_);
 
     // Test mergeStates().
     std::unique_ptr<AggregationState> merge_state(
@@ -312,24 +329,27 @@ class AggregationHandleMinTest : public ::testing::Test {
 
         iterateHandle(
             merge_state.get(),
-            type.makeValue(string_literal.c_str(),
-                           string_literal.length() + 1).ensureNotReference());
-        if (fast_comparator_->compareDataPtrs(string_literal.c_str(), min.c_str())) {
+            type.makeValue(string_literal.c_str(), string_literal.length() + 1)
+                .ensureNotReference());
+        if (fast_comparator_->compareDataPtrs(string_literal.c_str(),
+                                              min.c_str())) {
           min = string_literal;
         }
       }
     }
     aggregation_handle_min_->mergeStates(*merge_state,
                                          aggregation_handle_min_state_.get());
-    CheckMinString(min, *aggregation_handle_min_, *aggregation_handle_min_state_);
+    CheckMinString(
+        min, *aggregation_handle_min_, *aggregation_handle_min_state_);
   }
 
   template <typename ColumnVectorType>
-  ColumnVector *createColumnVectorString(const Type &type, std::string *min) {
-    ColumnVectorType *column = new ColumnVectorType(type, kIterations * kNumSamples + 3);
+  ColumnVector* createColumnVectorString(const Type &type, std::string *min) {
+    ColumnVectorType *column =
+        new ColumnVectorType(type, kIterations * kNumSamples + 3);
     std::unique_ptr<UncheckedComparator> fast_comparator_;
     fast_comparator_.reset(ComparisonFactory::GetComparison(ComparisonID::kLess)
-                           .makeUncheckedComparatorForTypes(type, type));
+                               .makeUncheckedComparatorForTypes(type, type));
     std::string string_literal;
     *min = "z";
     int val;
@@ -341,14 +361,16 @@ class AggregationHandleMinTest : public ::testing::Test {
         oss << "test" << val;
         string_literal = oss.str();
 
-        column->appendTypedValue(type.makeValue(string_literal.c_str(), string_literal.length() + 1)
-            .ensureNotReference());
-        if (fast_comparator_->compareDataPtrs(string_literal.c_str(), min->c_str())) {
+        column->appendTypedValue(
+            type.makeValue(string_literal.c_str(), string_literal.length() + 1)
+                .ensureNotReference());
+        if (fast_comparator_->compareDataPtrs(string_literal.c_str(),
+                                              min->c_str())) {
           *min = string_literal;
         }
       }
       // One NULL in the middle.
-      if (i == kIterations/2) {
+      if (i == kIterations / 2) {
         column->appendTypedValue(type.makeNullValue());
       }
     }
@@ -361,25 +383,26 @@ class AggregationHandleMinTest : public ::testing::Test {
   void checkAggregationMinStringColumnVector() {
     const StringType &type = StringType::Instance(10, true);
     initializeHandle(type);
-    EXPECT_TRUE(aggregation_handle_min_->finalize(*aggregation_handle_min_state_).isNull());
+    EXPECT_TRUE(
+        aggregation_handle_min_->finalize(*aggregation_handle_min_state_)
+            .isNull());
 
     std::string min;
     std::vector<std::unique_ptr<ColumnVector>> column_vectors;
-    column_vectors.emplace_back(createColumnVectorString<ColumnVectorType>(type, &min));
+    column_vectors.emplace_back(
+        createColumnVectorString<ColumnVectorType>(type, &min));
 
     std::unique_ptr<AggregationState> cv_state(
         aggregation_handle_min_->accumulateColumnVectors(column_vectors));
 
     // Test the state generated directly by accumulateColumnVectors(), and also
     // test after merging back.
-    CheckMinString(min,
-                   *aggregation_handle_min_,
-                   *cv_state);
-
-    aggregation_handle_min_->mergeStates(*cv_state, aggregation_handle_min_state_.get());
-    CheckMinString(min,
-                   *aggregation_handle_min_,
-                   *aggregation_handle_min_state_);
+    CheckMinString(min, *aggregation_handle_min_, *cv_state);
+
+    aggregation_handle_min_->mergeStates(*cv_state,
+                                         aggregation_handle_min_state_.get());
+    CheckMinString(
+        min, *aggregation_handle_min_, *aggregation_handle_min_state_);
   }
 
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
@@ -387,26 +410,27 @@ class AggregationHandleMinTest : public ::testing::Test {
   void checkAggregationMinStringValueAccessor() {
     const StringType &type = StringType::Instance(10, true);
     initializeHandle(type);
-    EXPECT_TRUE(aggregation_handle_min_->finalize(*aggregation_handle_min_state_).isNull());
+    EXPECT_TRUE(
+        aggregation_handle_min_->finalize(*aggregation_handle_min_state_)
+            .isNull());
 
     std::string min;
-    std::unique_ptr<ColumnVectorsValueAccessor> accessor(new ColumnVectorsValueAccessor());
+    std::unique_ptr<ColumnVectorsValueAccessor> accessor(
+        new ColumnVectorsValueAccessor());
     accessor->addColumn(createColumnVectorString<ColumnVectorType>(type, &min));
 
     std::unique_ptr<AggregationState> va_state(
-        aggregation_handle_min_->accumulateValueAccessor(accessor.get(),
-                                                         std::vector<attribute_id>(1, 0)));
+        aggregation_handle_min_->accumulateValueAccessor(
+            accessor.get(), std::vector<attribute_id>(1, 0)));
 
     // Test the state generated directly by accumulateValueAccessor(), and also
     // test after merging back.
-    CheckMinString(min,
-                   *aggregation_handle_min_,
-                   *va_state);
-
-    aggregation_handle_min_->mergeStates(*va_state, aggregation_handle_min_state_.get());
-    CheckMinString(min,
-                   *aggregation_handle_min_,
-                   *aggregation_handle_min_state_);
+    CheckMinString(min, *aggregation_handle_min_, *va_state);
+
+    aggregation_handle_min_->mergeStates(*va_state,
+                                         aggregation_handle_min_state_.get());
+    CheckMinString(
+        min, *aggregation_handle_min_, *aggregation_handle_min_state_);
   }
 #endif  // QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
 
@@ -417,9 +441,7 @@ class AggregationHandleMinTest : public ::testing::Test {
 
 template <>
 void AggregationHandleMinTest::CheckMinValue<float>(
-    float val,
-    const AggregationHandle &handle,
-    const AggregationState &state) {
+    float val, const AggregationHandle &handle, const AggregationState &state) {
   EXPECT_FLOAT_EQ(val, handle.finalize(state).getLiteral<float>());
 }
 
@@ -432,17 +454,20 @@ void AggregationHandleMinTest::CheckMinValue<double>(
 }
 
 template <>
-void AggregationHandleMinTest::SetDataType<DatetimeLit>(int value, DatetimeLit *data) {
+void AggregationHandleMinTest::SetDataType<DatetimeLit>(int value,
+                                                        DatetimeLit *data) {
   data->ticks = value;
 }
 
 template <>
-void AggregationHandleMinTest::SetDataType<DatetimeIntervalLit>(int value, DatetimeIntervalLit *data) {
+void AggregationHandleMinTest::SetDataType<DatetimeIntervalLit>(
+    int value, DatetimeIntervalLit *data) {
   data->interval_ticks = value;
 }
 
 template <>
-void AggregationHandleMinTest::SetDataType<YearMonthIntervalLit>(int value, YearMonthIntervalLit *data) {
+void AggregationHandleMinTest::SetDataType<YearMonthIntervalLit>(
+    int value, YearMonthIntervalLit *data) {
   data->months = value;
 }
 
@@ -573,50 +598,67 @@ TEST_F(AggregationHandleMinDeathTest, WrongTypeTest) {
   double double_val = 0;
   float float_val = 0;
 
-  iterateHandle(aggregation_handle_min_state_.get(), int_non_null_type.makeValue(&int_val));
+  iterateHandle(aggregation_handle_min_state_.get(),
+                int_non_null_type.makeValue(&int_val));
 
-  EXPECT_DEATH(iterateHandle(aggregation_handle_min_state_.get(), long_type.makeValue(&long_val)), "");
-  EXPECT_DEATH(iterateHandle(aggregation_handle_min_state_.get(), double_type.makeValue(&double_val)), "");
-  EXPECT_DEATH(iterateHandle(aggregation_handle_min_state_.get(), float_type.makeValue(&float_val)), "");
-  EXPECT_DEATH(iterateHandle(aggregation_handle_min_state_.get(), char_type.makeValue("asdf", 5)), "");
-  EXPECT_DEATH(iterateHandle(aggregation_handle_min_state_.get(), varchar_type.makeValue("asdf", 5)), "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_min_state_.get(),
+                             long_type.makeValue(&long_val)),
+               "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_min_state_.get(),
+                             double_type.makeValue(&double_val)),
+               "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_min_state_.get(),
+                             float_type.makeValue(&float_val)),
+               "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_min_state_.get(),
+                             char_type.makeValue("asdf", 5)),
+               "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_min_state_.get(),
+                             varchar_type.makeValue("asdf", 5)),
+               "");
 
   // Test mergeStates() with incorrectly typed handles.
   std::unique_ptr<AggregationHandle> aggregation_handle_min_long(
-      AggregateFunctionFactory::Get(AggregationID::kMin).createHandle(
-          std::vector<const Type*>(1, &long_type)));
+      AggregateFunctionFactory::Get(AggregationID::kMin)
+          .createHandle(std::vector<const Type *>(1, &long_type)));
   std::unique_ptr<AggregationState> aggregation_state_min_merge_long(
       aggregation_handle_min_long->createInitialState());
-  static_cast<const AggregationHandleMin&>(*aggregation_handle_min_long).iterateUnaryInl(
-      static_cast<AggregationStateMin*>(aggregation_state_min_merge_long.get()),
-      long_type.makeValue(&long_val));
-  EXPECT_DEATH(aggregation_handle_min_->mergeStates(*aggregation_state_min_merge_long,
-                                                    aggregation_handle_min_state_.get()),
-               "");
+  static_cast<const AggregationHandleMin &>(*aggregation_handle_min_long)
+      .iterateUnaryInl(static_cast<AggregationStateMin *>(
+                           aggregation_state_min_merge_long.get()),
+                       long_type.makeValue(&long_val));
+  EXPECT_DEATH(
+      aggregation_handle_min_->mergeStates(*aggregation_state_min_merge_long,
+                                           aggregation_handle_min_state_.get()),
+      "");
 
   std::unique_ptr<AggregationHandle> aggregation_handle_min_double(
-      AggregateFunctionFactory::Get(AggregationID::kMin).createHandle(
-          std::vector<const Type*>(1, &double_type)));
+      AggregateFunctionFactory::Get(AggregationID::kMin)
+          .createHandle(std::vector<const Type *>(1, &double_type)));
   std::unique_ptr<AggregationState> aggregation_state_min_merge_double(
       aggregation_handle_min_double->createInitialState());
-  static_cast<const AggregationHandleMin&>(*aggregation_handle_min_double).iterateUnaryInl(
-      static_cast<AggregationStateMin*>(aggregation_state_min_merge_double.get()),
-      double_type.makeValue(&double_val));
-  EXPECT_DEATH(aggregation_handle_min_->mergeStates(*aggregation_state_min_merge_double,
-                                                    aggregation_handle_min_state_.get()),
-               "");
+  static_cast<const AggregationHandleMin &>(*aggregation_handle_min_double)
+      .iterateUnaryInl(static_cast<AggregationStateMin *>(
+                           aggregation_state_min_merge_double.get()),
+                       double_type.makeValue(&double_val));
+  EXPECT_DEATH(
+      aggregation_handle_min_->mergeStates(*aggregation_state_min_merge_double,
+                                           aggregation_handle_min_state_.get()),
+      "");
 
   std::unique_ptr<AggregationHandle> aggregation_handle_min_float(
-      AggregateFunctionFactory::Get(AggregationID::kMin).createHandle(
-          std::vector<const Type*>(1, &float_type)));
+      AggregateFunctionFactory::Get(AggregationID::kMin)
+          .createHandle(std::vector<const Type *>(1, &float_type)));
   std::unique_ptr<AggregationState> aggregation_state_min_merge_float(
       aggregation_handle_min_float->createInitialState());
-  static_cast<const AggregationHandleMin&>(*aggregation_handle_min_float).iterateUnaryInl(
-      static_cast<AggregationStateMin*>(aggregation_state_min_merge_float.get()),
-      float_type.makeValue(&float_val));
-  EXPECT_DEATH(aggregation_handle_min_->mergeStates(*aggregation_state_min_merge_float,
-                                                    aggregation_handle_min_state_.get()),
-               "");
+  static_cast<const AggregationHandleMin &>(*aggregation_handle_min_float)
+      .iterateUnaryInl(static_cast<AggregationStateMin *>(
+                           aggregation_state_min_merge_float.get()),
+                       float_type.makeValue(&float_val));
+  EXPECT_DEATH(
+      aggregation_handle_min_->mergeStates(*aggregation_state_min_merge_float,
+                                           aggregation_handle_min_state_.get()),
+      "");
 }
 #endif
 
@@ -641,25 +683,28 @@ TEST_F(AggregationHandleMinTest, GroupByTableMergeTest) {
   initializeHandle(int_non_null_type);
   storage_manager_.reset(new StorageManager("./test_min_data"));
   std::unique_ptr<AggregationStateHashTableBase> source_hash_table(
-      aggregation_handle_min_->createGroupByHashTable(
-          HashTableImplType::kSimpleScalarSeparateChaining,
+      AggregationStateFastHashTableFactory::CreateResizable(
+          HashTableImplType::kSeparateChaining,
           std::vector<const Type *>(1, &int_non_null_type),
           10,
+          {aggregation_handle_min_.get()->getPayloadSize()},
+          {aggregation_handle_min_.get()},
           storage_manager_.get()));
   std::unique_ptr<AggregationStateHashTableBase> destination_hash_table(
-      aggregation_handle_min_->createGroupByHashTable(
-          HashTableImplType::kSimpleScalarSeparateChaining,
+      AggregationStateFastHashTableFactory::CreateResizable(
+          HashTableImplType::kSeparateChaining,
           std::vector<const Type *>(1, &int_non_null_type),
           10,
+          {aggregation_handle_min_.get()->getPayloadSize()},
+          {aggregation_handle_min_.get()},
           storage_manager_.get()));
 
-  AggregationStateHashTable<AggregationStateMin> *destination_hash_table_derived =
-      static_cast<AggregationStateHashTable<AggregationStateMin> *>(
+  AggregationStateFastHashTable *destination_hash_table_derived =
+      static_cast<AggregationStateFastHashTable *>(
           destination_hash_table.get());
 
-  AggregationStateHashTable<AggregationStateMin> *source_hash_table_derived =
-      static_cast<AggregationStateHashTable<AggregationStateMin> *>(
-          source_hash_table.get());
+  AggregationStateFastHashTable *source_hash_table_derived =
+      static_cast<AggregationStateFastHashTable *>(source_hash_table.get());
 
   AggregationHandleMin *aggregation_handle_min_derived =
       static_cast<AggregationHandleMin *>(aggregation_handle_min_.get());
@@ -724,35 +769,52 @@ TEST_F(AggregationHandleMinTest, GroupByTableMergeTest) {
   EXPECT_EQ(exclusive_key_source_min_val.getLiteral<int>(), actual_val);
 
   // Add the key-state pairs to the hash tables.
-  source_hash_table_derived->putCompositeKey(common_key,
-                                             *common_key_source_state);
-  destination_hash_table_derived->putCompositeKey(
-      common_key, *common_key_destination_state);
-  source_hash_table_derived->putCompositeKey(exclusive_source_key,
-                                             *exclusive_key_source_state);
-  destination_hash_table_derived->putCompositeKey(
-      exclusive_destination_key, *exclusive_key_destination_state);
+  unsigned char buffer[100];
+  buffer[0] = '\0';
+  memcpy(buffer + 1,
+         common_key_source_state.get()->getPayloadAddress(),
+         aggregation_handle_min_.get()->getPayloadSize());
+  source_hash_table_derived->putCompositeKeyFast(common_key, buffer);
+
+  memcpy(buffer + 1,
+         common_key_destination_state.get()->getPayloadAddress(),
+         aggregation_handle_min_.get()->getPayloadSize());
+  destination_hash_table_derived->putCompositeKeyFast(common_key, buffer);
+
+  memcpy(buffer + 1,
+         exclusive_key_source_state.get()->getPayloadAddress(),
+         aggregation_handle_min_.get()->getPayloadSize());
+  source_hash_table_derived->putCompositeKeyFast(exclusive_source_key, buffer);
+
+  memcpy(buffer + 1,
+         exclusive_key_destination_state.get()->getPayloadAddress(),
+         aggregation_handle_min_.get()->getPayloadSize());
+  destination_hash_table_derived->putCompositeKeyFast(exclusive_destination_key,
+                                                      buffer);
 
   EXPECT_EQ(2u, destination_hash_table_derived->numEntries());
   EXPECT_EQ(2u, source_hash_table_derived->numEntries());
 
-  aggregation_handle_min_->mergeGroupByHashTables(*source_hash_table,
-                                                  destination_hash_table.get());
+  AggregationOperationState::mergeGroupByHashTables(
+      source_hash_table.get(), destination_hash_table.get());
 
   EXPECT_EQ(3u, destination_hash_table_derived->numEntries());
 
   CheckMinValue<int>(
       common_key_source_min_val.getLiteral<int>(),
-      *aggregation_handle_min_derived,
-      *(destination_hash_table_derived->getSingleCompositeKey(common_key)));
+      aggregation_handle_min_derived->finalizeHashTableEntryFast(
+          destination_hash_table_derived->getSingleCompositeKey(common_key) +
+          1));
   CheckMinValue<int>(exclusive_key_destination_min_val.getLiteral<int>(),
-                     *aggregation_handle_min_derived,
-                     *(destination_hash_table_derived->getSingleCompositeKey(
-                         exclusive_destination_key)));
+                     aggregation_handle_min_derived->finalizeHashTableEntryFast(
+                         destination_hash_table_derived->getSingleCompositeKey(
+                             exclusive_destination_key) +
+                         1));
   CheckMinValue<int>(exclusive_key_source_min_val.getLiteral<int>(),
-                     *aggregation_handle_min_derived,
-                     *(source_hash_table_derived->getSingleCompositeKey(
-                         exclusive_source_key)));
+                     aggregation_handle_min_derived->finalizeHashTableEntryFast(
+                         source_hash_table_derived->getSingleCompositeKey(
+                             exclusive_source_key) +
+                         1));
 }
 
 }  // namespace quickstep


[7/7] incubator-quickstep git commit: Modified Aggregation unit test. Ran clang-format.

Posted by ra...@apache.org.
Modified Aggregation unit test. Ran clang-format.


Project: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/commit/dad7d6f3
Tree: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/tree/dad7d6f3
Diff: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/diff/dad7d6f3

Branch: refs/heads/quickstep-28-29
Commit: dad7d6f3fc57b7ff020a18b8a80727b4b94d70e8
Parents: 5a80e33
Author: rathijit <ra...@node-2.aggregation-pr.quickstep-pg0.wisc.cloudlab.us>
Authored: Sun Aug 21 05:33:40 2016 -0500
Committer: rathijit <ra...@node-2.aggregation-pr.quickstep-pg0.wisc.cloudlab.us>
Committed: Sun Aug 21 05:33:40 2016 -0500

----------------------------------------------------------------------
 .../aggregation/AggregationConcreteHandle.hpp   |  153 +-
 expressions/aggregation/AggregationHandle.hpp   |   48 +-
 .../aggregation/AggregationHandleAvg.cpp        |   96 +-
 .../aggregation/AggregationHandleAvg.hpp        |  130 +-
 .../aggregation/AggregationHandleCount.cpp      |  150 +-
 .../aggregation/AggregationHandleCount.hpp      |  118 +-
 .../aggregation/AggregationHandleDistinct.hpp   |   28 +-
 .../aggregation/AggregationHandleMax.cpp        |   71 +-
 .../aggregation/AggregationHandleMax.hpp        |   98 +-
 .../aggregation/AggregationHandleMin.cpp        |   73 +-
 .../aggregation/AggregationHandleMin.hpp        |  101 +-
 .../aggregation/AggregationHandleSum.cpp        |   87 +-
 .../aggregation/AggregationHandleSum.hpp        |  113 +-
 expressions/aggregation/CMakeLists.txt          |   85 +-
 .../tests/AggregationHandleAvg_unittest.cpp     |  255 ++--
 .../tests/AggregationHandleCount_unittest.cpp   |  311 ++--
 .../tests/AggregationHandleMax_unittest.cpp     |  382 +++--
 .../tests/AggregationHandleMin_unittest.cpp     |  378 +++--
 .../tests/AggregationHandleSum_unittest.cpp     |  291 ++--
 storage/AggregationOperationState.cpp           |  263 ++--
 storage/AggregationOperationState.hpp           |   42 +-
 storage/FastHashTable.hpp                       | 1419 ++++++++++--------
 storage/FastSeparateChainingHashTable.hpp       | 1171 +++++++++------
 storage/HashTableBase.hpp                       |   20 +-
 24 files changed, 3268 insertions(+), 2615 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/expressions/aggregation/AggregationConcreteHandle.hpp
----------------------------------------------------------------------
diff --git a/expressions/aggregation/AggregationConcreteHandle.hpp b/expressions/aggregation/AggregationConcreteHandle.hpp
index a5b77b5..e24f64b 100644
--- a/expressions/aggregation/AggregationConcreteHandle.hpp
+++ b/expressions/aggregation/AggregationConcreteHandle.hpp
@@ -21,18 +21,18 @@
 #define QUICKSTEP_EXPRESSIONS_AGGREGATION_AGGREGATION_CONCRETE_HANDLE_HPP_
 
 #include <cstddef>
-#include <vector>
 #include <utility>
+#include <vector>
 
 #include "catalog/CatalogTypedefs.hpp"
 #include "expressions/aggregation/AggregationHandle.hpp"
-#include "storage/HashTable.hpp"
 #include "storage/FastHashTable.hpp"
+#include "storage/HashTable.hpp"
 #include "storage/HashTableBase.hpp"
+#include "threading/SpinMutex.hpp"
 #include "types/TypedValue.hpp"
 #include "types/containers/ColumnVector.hpp"
 #include "utility/Macros.hpp"
-#include "threading/SpinMutex.hpp"
 
 #include "glog/logging.h"
 
@@ -61,7 +61,8 @@ class HashTableStateUpserterFast {
    *        table. The corresponding state (for the same key) in the destination
    *        hash table will be upserted.
    **/
-  HashTableStateUpserterFast(const HandleT &handle, const uint8_t *source_state)
+  HashTableStateUpserterFast(const HandleT &handle,
+                             const std::uint8_t *source_state)
       : handle_(handle), source_state_(source_state) {}
 
   /**
@@ -70,13 +71,13 @@ class HashTableStateUpserterFast {
    * @param destination_state The aggregation state in the aggregation hash
    *        table that is being upserted.
    **/
-  void operator()(uint8_t *destination_state) {
+  void operator()(std::uint8_t *destination_state) {
     handle_.mergeStatesFast(source_state_, destination_state);
   }
 
  private:
   const HandleT &handle_;
-  const uint8_t *source_state_;
+  const std::uint8_t *source_state_;
 
   DISALLOW_COPY_AND_ASSIGN(HashTableStateUpserterFast);
 };
@@ -108,13 +109,15 @@ class AggregationConcreteHandle : public AggregationHandle {
    */
   AggregationStateHashTableBase* createDistinctifyHashTable(
       const HashTableImplType hash_table_impl,
-      const std::vector<const Type*> &key_types,
+      const std::vector<const Type *> &key_types,
       const std::size_t estimated_num_distinct_keys,
       StorageManager *storage_manager) const override;
 
   /**
-   * @brief Implementaion for AggregationHandle::insertValueAccessorIntoDistinctifyHashTable()
-   *        that inserts the GROUP BY expressions and aggregation arguments together
+   * @brief Implementaion for
+   * AggregationHandle::insertValueAccessorIntoDistinctifyHashTable()
+   *        that inserts the GROUP BY expressions and aggregation arguments
+   * together
    *        as keys into the distinctify hash table.
    */
   void insertValueAccessorIntoDistinctifyHashTable(
@@ -123,23 +126,19 @@ class AggregationConcreteHandle : public AggregationHandle {
       AggregationStateHashTableBase *distinctify_hash_table) const override;
 
  protected:
-  AggregationConcreteHandle() {
-  }
+  AggregationConcreteHandle() {}
 
-  template <typename HandleT,
-            typename StateT>
+  template <typename HandleT, typename StateT>
   StateT* aggregateOnDistinctifyHashTableForSingleUnaryHelperFast(
       const AggregationStateHashTableBase &distinctify_hash_table) const;
 
-  template <typename HandleT,
-            typename HashTableT>
+  template <typename HandleT, typename HashTableT>
   void aggregateOnDistinctifyHashTableForGroupByUnaryHelperFast(
       const AggregationStateHashTableBase &distinctify_hash_table,
       AggregationStateHashTableBase *hash_table,
-      int index) const;
+      std::size_t index) const;
 
-  template <typename HandleT,
-            typename HashTableT>
+  template <typename HandleT, typename HashTableT>
   ColumnVector* finalizeHashTableHelperFast(
       const Type &result_type,
       const AggregationStateHashTableBase &hash_table,
@@ -151,11 +150,13 @@ class AggregationConcreteHandle : public AggregationHandle {
       const AggregationStateHashTableBase &hash_table,
       const std::vector<TypedValue> &group_key,
       int index) const {
-    const std::uint8_t *group_state
-        = static_cast<const HashTableT&>(hash_table).getSingleCompositeKey(group_key, index);
+    const std::uint8_t *group_state =
+        static_cast<const HashTableT &>(hash_table)
+            .getSingleCompositeKey(group_key, index);
     DCHECK(group_state != nullptr)
         << "Could not find entry for specified group_key in HashTable";
-    return static_cast<const HandleT*>(this)->finalizeHashTableEntryFast(group_state);
+    return static_cast<const HandleT *>(this)->finalizeHashTableEntryFast(
+        group_state);
   }
 
   template <typename HandleT, typename HashTableT>
@@ -177,24 +178,26 @@ class AggregationConcreteHandle : public AggregationHandle {
 template <typename HandleT, typename ColumnVectorT>
 class HashTableAggregateFinalizer {
  public:
-  HashTableAggregateFinalizer(const HandleT &handle,
-                              std::vector<std::vector<TypedValue>> *group_by_keys,
-                              ColumnVectorT *output_column_vector)
+  HashTableAggregateFinalizer(
+      const HandleT &handle,
+      std::vector<std::vector<TypedValue>> *group_by_keys,
+      ColumnVectorT *output_column_vector)
       : handle_(handle),
         group_by_keys_(group_by_keys),
-        output_column_vector_(output_column_vector) {
-  }
+        output_column_vector_(output_column_vector) {}
 
   inline void operator()(const std::vector<TypedValue> &group_by_key,
                          const AggregationState &group_state) {
     group_by_keys_->emplace_back(group_by_key);
-    output_column_vector_->appendTypedValue(handle_.finalizeHashTableEntry(group_state));
+    output_column_vector_->appendTypedValue(
+        handle_.finalizeHashTableEntry(group_state));
   }
 
   inline void operator()(const std::vector<TypedValue> &group_by_key,
                          const unsigned char *byte_ptr) {
     group_by_keys_->emplace_back(group_by_key);
-    output_column_vector_->appendTypedValue(handle_.finalizeHashTableEntryFast(byte_ptr));
+    output_column_vector_->appendTypedValue(
+        handle_.finalizeHashTableEntryFast(byte_ptr));
   }
 
  private:
@@ -208,47 +211,51 @@ class HashTableAggregateFinalizer {
 // ----------------------------------------------------------------------------
 // Implementations of templated methods follow:
 
-template <typename HandleT,
-          typename StateT>
-StateT* AggregationConcreteHandle::aggregateOnDistinctifyHashTableForSingleUnaryHelperFast(
-    const AggregationStateHashTableBase &distinctify_hash_table) const {
-  const HandleT& handle = static_cast<const HandleT&>(*this);
-  StateT *state = static_cast<StateT*>(createInitialState());
+template <typename HandleT, typename StateT>
+StateT* AggregationConcreteHandle::
+    aggregateOnDistinctifyHashTableForSingleUnaryHelperFast(
+        const AggregationStateHashTableBase &distinctify_hash_table) const {
+  const HandleT &handle = static_cast<const HandleT &>(*this);
+  StateT *state = static_cast<StateT *>(createInitialState());
 
   // A lambda function which will be called on each key from the distinctify
   // hash table.
-  const auto aggregate_functor = [&handle, &state](const TypedValue &key,
-                                                   const std::uint8_t &dumb_placeholder) {
+  const auto aggregate_functor = [&handle, &state](
+      const TypedValue &key, const std::uint8_t &dumb_placeholder) {
     // For each (unary) key in the distinctify hash table, aggregate the key
     // into "state".
     handle.iterateUnaryInl(state, key);
   };
 
   const AggregationStateFastHashTable &hash_table =
-      static_cast<const AggregationStateFastHashTable &>(distinctify_hash_table);
-  // Invoke the lambda function "aggregate_functor" on each key from the distinctify
+      static_cast<const AggregationStateFastHashTable &>(
+          distinctify_hash_table);
+  // Invoke the lambda function "aggregate_functor" on each key from the
+  // distinctify
   // hash table.
   hash_table.forEach(&aggregate_functor);
 
   return state;
 }
 
-template <typename HandleT,
-          typename HashTableT>
-void AggregationConcreteHandle::aggregateOnDistinctifyHashTableForGroupByUnaryHelperFast(
-    const AggregationStateHashTableBase &distinctify_hash_table,
-    AggregationStateHashTableBase *aggregation_hash_table,
-    int index) const {
-  const HandleT& handle = static_cast<const HandleT&>(*this);
-  HashTableT *target_hash_table = static_cast<HashTableT*>(aggregation_hash_table);
+template <typename HandleT, typename HashTableT>
+void AggregationConcreteHandle::
+    aggregateOnDistinctifyHashTableForGroupByUnaryHelperFast(
+        const AggregationStateHashTableBase &distinctify_hash_table,
+        AggregationStateHashTableBase *aggregation_hash_table,
+        std::size_t index) const {
+  const HandleT &handle = static_cast<const HandleT &>(*this);
+  HashTableT *target_hash_table =
+      static_cast<HashTableT *>(aggregation_hash_table);
 
   // A lambda function which will be called on each key-value pair from the
   // distinctify hash table.
   const auto aggregate_functor = [&handle, &target_hash_table, &index](
-      std::vector<TypedValue> &key,
-      const bool &dumb_placeholder) {
-    // For each (composite) key vector in the distinctify hash table with size N.
-    // The first N-1 entries are GROUP BY columns and the last entry is the argument
+      std::vector<TypedValue> &key, const bool &dumb_placeholder) {
+    // For each (composite) key vector in the distinctify hash table with size
+    // N.
+    // The first N-1 entries are GROUP BY columns and the last entry is the
+    // argument
     // to be aggregated on.
     const TypedValue argument(std::move(key.back()));
     key.pop_back();
@@ -263,59 +270,55 @@ void AggregationConcreteHandle::aggregateOnDistinctifyHashTableForGroupByUnaryHe
   };
 
   const HashTableT &source_hash_table =
-      static_cast<const HashTableT&>(distinctify_hash_table);
+      static_cast<const HashTableT &>(distinctify_hash_table);
   // Invoke the lambda function "aggregate_functor" on each composite key vector
   // from the distinctify hash table.
   source_hash_table.forEachCompositeKeyFast(&aggregate_functor);
 }
 
-template <typename HandleT,
-          typename HashTableT>
+template <typename HandleT, typename HashTableT>
 ColumnVector* AggregationConcreteHandle::finalizeHashTableHelperFast(
     const Type &result_type,
     const AggregationStateHashTableBase &hash_table,
     std::vector<std::vector<TypedValue>> *group_by_keys,
     int index) const {
-  const HandleT &handle = static_cast<const HandleT&>(*this);
-  const HashTableT &hash_table_concrete = static_cast<const HashTableT&>(hash_table);
+  const HandleT &handle = static_cast<const HandleT &>(*this);
+  const HashTableT &hash_table_concrete =
+      static_cast<const HashTableT &>(hash_table);
 
   if (group_by_keys->empty()) {
     if (NativeColumnVector::UsableForType(result_type)) {
-      NativeColumnVector *result = new NativeColumnVector(result_type,
-                                                          hash_table_concrete.numEntries());
+      NativeColumnVector *result =
+          new NativeColumnVector(result_type, hash_table_concrete.numEntries());
       HashTableAggregateFinalizer<HandleT, NativeColumnVector> finalizer(
-          handle,
-          group_by_keys,
-          result);
+          handle, group_by_keys, result);
       hash_table_concrete.forEachCompositeKeyFast(&finalizer, index);
       return result;
     } else {
-      IndirectColumnVector *result = new IndirectColumnVector(result_type,
-                                                              hash_table_concrete.numEntries());
+      IndirectColumnVector *result = new IndirectColumnVector(
+          result_type, hash_table_concrete.numEntries());
       HashTableAggregateFinalizer<HandleT, IndirectColumnVector> finalizer(
-          handle,
-          group_by_keys,
-          result);
+          handle, group_by_keys, result);
       hash_table_concrete.forEachCompositeKeyFast(&finalizer, index);
       return result;
     }
   } else {
     if (NativeColumnVector::UsableForType(result_type)) {
-      NativeColumnVector *result = new NativeColumnVector(result_type,
-                                                          group_by_keys->size());
+      NativeColumnVector *result =
+          new NativeColumnVector(result_type, group_by_keys->size());
       for (const std::vector<TypedValue> &group_by_key : *group_by_keys) {
-        result->appendTypedValue(finalizeGroupInHashTableFast<HandleT, HashTableT>(hash_table,
-                                                                                   group_by_key,
-                                                                                   index));
+        result->appendTypedValue(
+            finalizeGroupInHashTableFast<HandleT, HashTableT>(
+                hash_table, group_by_key, index));
       }
       return result;
     } else {
-      IndirectColumnVector *result = new IndirectColumnVector(result_type,
-                                                              hash_table_concrete.numEntries());
+      IndirectColumnVector *result = new IndirectColumnVector(
+          result_type, hash_table_concrete.numEntries());
       for (const std::vector<TypedValue> &group_by_key : *group_by_keys) {
-        result->appendTypedValue(finalizeGroupInHashTableFast<HandleT, HashTableT>(hash_table,
-                                                                                   group_by_key,
-                                                                                   index));
+        result->appendTypedValue(
+            finalizeGroupInHashTableFast<HandleT, HashTableT>(
+                hash_table, group_by_key, index));
       }
       return result;
     }

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/expressions/aggregation/AggregationHandle.hpp
----------------------------------------------------------------------
diff --git a/expressions/aggregation/AggregationHandle.hpp b/expressions/aggregation/AggregationHandle.hpp
index 29b0355..89bfc36 100644
--- a/expressions/aggregation/AggregationHandle.hpp
+++ b/expressions/aggregation/AggregationHandle.hpp
@@ -40,7 +40,6 @@ class ValueAccessor;
  *  @{
  */
 
-
 /**
  * @brief Abstract base class for aggregation state.
  **/
@@ -107,8 +106,7 @@ class AggregationHandle {
    * @brief Virtual destructor.
    *
    **/
-  virtual ~AggregationHandle() {
-  }
+  virtual ~AggregationHandle() {}
 
   /**
    * @brief Create an initial "blank" state for this aggregation.
@@ -136,7 +134,7 @@ class AggregationHandle {
    **/
   virtual AggregationStateHashTableBase* createGroupByHashTable(
       const HashTableImplType hash_table_impl,
-      const std::vector<const Type*> &group_by_types,
+      const std::vector<const Type *> &group_by_types,
       const std::size_t estimated_num_groups,
       StorageManager *storage_manager) const = 0;
 
@@ -167,7 +165,8 @@ class AggregationHandle {
    *         for deleting the returned AggregationState.
    **/
   virtual AggregationState* accumulateColumnVectors(
-      const std::vector<std::unique_ptr<ColumnVector>> &column_vectors) const = 0;
+      const std::vector<std::unique_ptr<ColumnVector>> &column_vectors)
+      const = 0;
 
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
   /**
@@ -269,7 +268,8 @@ class AggregationHandle {
       int index) const = 0;
 
   /**
-   * @brief Create a new HashTable for the distinctify step for DISTINCT aggregation.
+   * @brief Create a new HashTable for the distinctify step for DISTINCT
+   * aggregation.
    *
    * Distinctify is the first step for DISTINCT aggregation. This step inserts
    * the GROUP BY expression values and aggregation arguments together as keys
@@ -282,7 +282,8 @@ class AggregationHandle {
    * we simply treat it as a special GROUP BY case that the GROUP BY expression
    * vector is empty.
    *
-   * @param hash_table_impl The choice of which concrete HashTable implementation
+   * @param hash_table_impl The choice of which concrete HashTable
+   * implementation
    *        to use.
    * @param key_types The types of the GROUP BY expressions together with the
    *        types of the aggregation arguments.
@@ -292,14 +293,15 @@ class AggregationHandle {
    *        This is an estimate only, and the HashTable will be resized if it
    *        becomes over-full.
    * @param storage_manager The StorageManager to use to create the HashTable.
-   *        A StorageBlob will be allocated to serve as the HashTable's in-memory
+   *        A StorageBlob will be allocated to serve as the HashTable's
+   * in-memory
    *        storage.
    * @return A new HashTable instance with the appropriate state type for this
    *         aggregate as the ValueT.
    */
   virtual AggregationStateHashTableBase* createDistinctifyHashTable(
       const HashTableImplType hash_table_impl,
-      const std::vector<const Type*> &key_types,
+      const std::vector<const Type *> &key_types,
       const std::size_t estimated_num_distinct_keys,
       StorageManager *storage_manager) const = 0;
 
@@ -307,11 +309,13 @@ class AggregationHandle {
    * @brief Inserts the GROUP BY expressions and aggregation arguments together
    * as keys into the distinctify hash table.
    *
-   * @param accessor The ValueAccessor that will be iterated over to read tuples.
+   * @param accessor The ValueAccessor that will be iterated over to read
+   * tuples.
    * @param key_ids The attribute_ids of the GROUP BY expressions in accessor
    *        together with the attribute_ids of the arguments to this aggregate
    *        in accessor, in order.
-   * @param distinctify_hash_table The HashTable to store the GROUP BY expressions
+   * @param distinctify_hash_table The HashTable to store the GROUP BY
+   * expressions
    *        and the aggregation arguments together as hash table keys and a bool
    *        constant \c true as hash table value (So the hash table actually
    *        serves as a hash set). This should have been created by calling
@@ -340,7 +344,8 @@ class AggregationHandle {
    * @brief Perform GROUP BY aggregation on the keys from the distinctify hash
    * table and upserts states into the aggregation hash table.
    *
-   * @param distinctify_hash_table Hash table which stores the GROUP BY expression
+   * @param distinctify_hash_table Hash table which stores the GROUP BY
+   * expression
    *        values and aggregation arguments together as hash table keys.
    * @param aggregation_hash_table The HashTable to upsert AggregationStates in.
    *        This should have been created by calling createGroupByHashTable() on
@@ -349,18 +354,19 @@ class AggregationHandle {
   virtual void aggregateOnDistinctifyHashTableForGroupBy(
       const AggregationStateHashTableBase &distinctify_hash_table,
       AggregationStateHashTableBase *aggregation_hash_table,
-      int index) const = 0;
+      std::size_t index) const = 0;
 
-  virtual size_t getPayloadSize() const {return 1;}
-  virtual void iterateInlFast(const std::vector<TypedValue> &arguments, uint8_t *byte_ptr) const {}
-  virtual void mergeStatesFast(const uint8_t *src, uint8_t *dst) const {}
-  virtual void initPayload(uint8_t *byte_ptr) const {}
-  virtual void BlockUpdate() {}
-  virtual void AllowUpdate() {}
+  virtual std::size_t getPayloadSize() const { return 1; }
+  virtual void updateState(const std::vector<TypedValue> &arguments,
+                           std::uint8_t *byte_ptr) const {}
+  virtual void mergeStatesFast(const std::uint8_t *src,
+                               std::uint8_t *dst) const {}
+  virtual void initPayload(std::uint8_t *byte_ptr) const {}
+  virtual void blockUpdate() {}
+  virtual void allowUpdate() {}
 
  protected:
-  AggregationHandle() {
-  }
+  AggregationHandle() {}
 
  private:
   DISALLOW_COPY_AND_ASSIGN(AggregationHandle);

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/expressions/aggregation/AggregationHandleAvg.cpp
----------------------------------------------------------------------
diff --git a/expressions/aggregation/AggregationHandleAvg.cpp b/expressions/aggregation/AggregationHandleAvg.cpp
index 45202e3..10523a7 100644
--- a/expressions/aggregation/AggregationHandleAvg.cpp
+++ b/expressions/aggregation/AggregationHandleAvg.cpp
@@ -42,7 +42,7 @@ namespace quickstep {
 class StorageManager;
 
 AggregationHandleAvg::AggregationHandleAvg(const Type &type)
-    : argument_type_(type), block_update(false) {
+    : argument_type_(type), block_update_(false) {
   // We sum Int as Long and Float as Double so that we have more headroom when
   // adding many values.
   TypeID type_precision_id;
@@ -76,26 +76,24 @@ AggregationHandleAvg::AggregationHandleAvg(const Type &type)
   // Divide operator for dividing sum by count to get final average.
   divide_operator_.reset(
       BinaryOperationFactory::GetBinaryOperation(BinaryOperationID::kDivide)
-          .makeUncheckedBinaryOperatorForTypes(sum_type, TypeFactory::GetType(kDouble)));
+          .makeUncheckedBinaryOperatorForTypes(sum_type,
+                                               TypeFactory::GetType(kDouble)));
 
   // Result is nullable, because AVG() over 0 values (or all NULL values) is
   // NULL.
-  result_type_
-      = &(BinaryOperationFactory::GetBinaryOperation(BinaryOperationID::kDivide)
-              .resultTypeForArgumentTypes(sum_type, TypeFactory::GetType(kDouble))
-                  ->getNullableVersion());
+  result_type_ =
+      &(BinaryOperationFactory::GetBinaryOperation(BinaryOperationID::kDivide)
+            .resultTypeForArgumentTypes(sum_type, TypeFactory::GetType(kDouble))
+            ->getNullableVersion());
 }
 
 AggregationStateHashTableBase* AggregationHandleAvg::createGroupByHashTable(
     const HashTableImplType hash_table_impl,
-    const std::vector<const Type*> &group_by_types,
+    const std::vector<const Type *> &group_by_types,
     const std::size_t estimated_num_groups,
     StorageManager *storage_manager) const {
   return AggregationStateHashTableFactory<AggregationStateAvg>::CreateResizable(
-      hash_table_impl,
-      group_by_types,
-      estimated_num_groups,
-      storage_manager);
+      hash_table_impl, group_by_types, estimated_num_groups, storage_manager);
 }
 
 AggregationState* AggregationHandleAvg::accumulateColumnVectors(
@@ -105,9 +103,8 @@ AggregationState* AggregationHandleAvg::accumulateColumnVectors(
 
   AggregationStateAvg *state = new AggregationStateAvg(blank_state_);
   std::size_t count = 0;
-  state->sum_ = fast_add_operator_->accumulateColumnVector(state->sum_,
-                                                           *column_vectors.front(),
-                                                           &count);
+  state->sum_ = fast_add_operator_->accumulateColumnVector(
+      state->sum_, *column_vectors.front(), &count);
   state->count_ = count;
   return state;
 }
@@ -121,10 +118,8 @@ AggregationState* AggregationHandleAvg::accumulateValueAccessor(
 
   AggregationStateAvg *state = new AggregationStateAvg(blank_state_);
   std::size_t count = 0;
-  state->sum_ = fast_add_operator_->accumulateValueAccessor(state->sum_,
-                                                            accessor,
-                                                            accessor_ids.front(),
-                                                            &count);
+  state->sum_ = fast_add_operator_->accumulateValueAccessor(
+      state->sum_, accessor, accessor_ids.front(), &count);
   state->count_ = count;
   return state;
 }
@@ -139,40 +134,44 @@ void AggregationHandleAvg::aggregateValueAccessorIntoHashTable(
       << "Got wrong number of arguments for AVG: " << argument_ids.size();
 }
 
-void AggregationHandleAvg::mergeStates(
-    const AggregationState &source,
-    AggregationState *destination) const {
-  const AggregationStateAvg &avg_source = static_cast<const AggregationStateAvg&>(source);
-  AggregationStateAvg *avg_destination = static_cast<AggregationStateAvg*>(destination);
+void AggregationHandleAvg::mergeStates(const AggregationState &source,
+                                       AggregationState *destination) const {
+  const AggregationStateAvg &avg_source =
+      static_cast<const AggregationStateAvg &>(source);
+  AggregationStateAvg *avg_destination =
+      static_cast<AggregationStateAvg *>(destination);
 
   SpinMutexLock lock(avg_destination->mutex_);
   avg_destination->count_ += avg_source.count_;
-  avg_destination->sum_ = merge_add_operator_->applyToTypedValues(avg_destination->sum_,
-                                                                  avg_source.sum_);
+  avg_destination->sum_ = merge_add_operator_->applyToTypedValues(
+      avg_destination->sum_, avg_source.sum_);
 }
 
-void AggregationHandleAvg::mergeStatesFast(
-    const uint8_t *source,
-    uint8_t *destination) const {
-    const TypedValue *src_sum_ptr = reinterpret_cast<const TypedValue *>(source + blank_state_.sum_offset);
-    const std::int64_t *src_count_ptr = reinterpret_cast<const std::int64_t *>(source + blank_state_.count_offset);
-    TypedValue *dst_sum_ptr = reinterpret_cast<TypedValue *>(destination+blank_state_.sum_offset);
-    std::int64_t *dst_count_ptr = reinterpret_cast<std::int64_t *>(destination + blank_state_.count_offset);
-    (*dst_count_ptr) += (*src_count_ptr);
-    *dst_sum_ptr = merge_add_operator_->applyToTypedValues(*dst_sum_ptr, *src_sum_ptr);
+void AggregationHandleAvg::mergeStatesFast(const std::uint8_t *source,
+                                           std::uint8_t *destination) const {
+  const TypedValue *src_sum_ptr =
+      reinterpret_cast<const TypedValue *>(source + blank_state_.sum_offset_);
+  const std::int64_t *src_count_ptr = reinterpret_cast<const std::int64_t *>(
+      source + blank_state_.count_offset_);
+  TypedValue *dst_sum_ptr =
+      reinterpret_cast<TypedValue *>(destination + blank_state_.sum_offset_);
+  std::int64_t *dst_count_ptr = reinterpret_cast<std::int64_t *>(
+      destination + blank_state_.count_offset_);
+  (*dst_count_ptr) += (*src_count_ptr);
+  *dst_sum_ptr =
+      merge_add_operator_->applyToTypedValues(*dst_sum_ptr, *src_sum_ptr);
 }
 
-
-
 TypedValue AggregationHandleAvg::finalize(const AggregationState &state) const {
-  const AggregationStateAvg &agg_state = static_cast<const AggregationStateAvg&>(state);
+  const AggregationStateAvg &agg_state =
+      static_cast<const AggregationStateAvg &>(state);
   if (agg_state.count_ == 0) {
     // AVG() over no values is NULL.
     return result_type_->makeNullValue();
   } else {
     // Divide sum by count to get final average.
-    return divide_operator_->applyToTypedValues(agg_state.sum_,
-                                                TypedValue(static_cast<double>(agg_state.count_)));
+    return divide_operator_->applyToTypedValues(
+        agg_state.sum_, TypedValue(static_cast<double>(agg_state.count_)));
   }
 }
 
@@ -181,31 +180,26 @@ ColumnVector* AggregationHandleAvg::finalizeHashTable(
     std::vector<std::vector<TypedValue>> *group_by_keys,
     int index) const {
   return finalizeHashTableHelperFast<AggregationHandleAvg,
-                                 AggregationStateFastHashTable>(
-      *result_type_,
-      hash_table,
-      group_by_keys,
-      index);
+                                     AggregationStateFastHashTable>(
+      *result_type_, hash_table, group_by_keys, index);
 }
 
-AggregationState* AggregationHandleAvg::aggregateOnDistinctifyHashTableForSingle(
+AggregationState*
+AggregationHandleAvg::aggregateOnDistinctifyHashTableForSingle(
     const AggregationStateHashTableBase &distinctify_hash_table) const {
   return aggregateOnDistinctifyHashTableForSingleUnaryHelperFast<
       AggregationHandleAvg,
-      AggregationStateAvg>(
-          distinctify_hash_table);
+      AggregationStateAvg>(distinctify_hash_table);
 }
 
 void AggregationHandleAvg::aggregateOnDistinctifyHashTableForGroupBy(
     const AggregationStateHashTableBase &distinctify_hash_table,
     AggregationStateHashTableBase *aggregation_hash_table,
-    int index) const {
+    std::size_t index) const {
   aggregateOnDistinctifyHashTableForGroupByUnaryHelperFast<
       AggregationHandleAvg,
       AggregationStateFastHashTable>(
-          distinctify_hash_table,
-          aggregation_hash_table,
-          index);
+      distinctify_hash_table, aggregation_hash_table, index);
 }
 
 }  // namespace quickstep

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/expressions/aggregation/AggregationHandleAvg.hpp
----------------------------------------------------------------------
diff --git a/expressions/aggregation/AggregationHandleAvg.hpp b/expressions/aggregation/AggregationHandleAvg.hpp
index ecb0c6c..3bbcfb1 100644
--- a/expressions/aggregation/AggregationHandleAvg.hpp
+++ b/expressions/aggregation/AggregationHandleAvg.hpp
@@ -28,8 +28,8 @@
 #include "catalog/CatalogTypedefs.hpp"
 #include "expressions/aggregation/AggregationConcreteHandle.hpp"
 #include "expressions/aggregation/AggregationHandle.hpp"
-#include "storage/HashTableBase.hpp"
 #include "storage/FastHashTable.hpp"
+#include "storage/HashTableBase.hpp"
 #include "threading/SpinMutex.hpp"
 #include "types/Type.hpp"
 #include "types/TypedValue.hpp"
@@ -59,30 +59,36 @@ class AggregationStateAvg : public AggregationState {
   AggregationStateAvg(const AggregationStateAvg &orig)
       : sum_(orig.sum_),
         count_(orig.count_),
-        sum_offset(orig.sum_offset),
-        count_offset(orig.count_offset),
-        mutex_offset(orig.mutex_offset) {
-  }
+        sum_offset_(orig.sum_offset_),
+        count_offset_(orig.count_offset_),
+        mutex_offset_(orig.mutex_offset_) {}
 
   /**
    * @brief Destructor.
    */
   ~AggregationStateAvg() override {}
 
-  size_t getPayloadSize() const {
-     size_t p1 = reinterpret_cast<size_t>(&sum_);
-     size_t p2 = reinterpret_cast<size_t>(&mutex_);
-     return (p2-p1);
+  std::size_t getPayloadSize() const {
+    std::size_t p1 = reinterpret_cast<std::size_t>(&sum_);
+    std::size_t p2 = reinterpret_cast<std::size_t>(&mutex_);
+    return (p2 - p1);
+  }
+
+  const std::uint8_t *getPayloadAddress() const {
+    return reinterpret_cast<const uint8_t *>(&sum_);
   }
 
  private:
   friend class AggregationHandleAvg;
 
   AggregationStateAvg()
-      : sum_(0), count_(0), sum_offset(0),
-        count_offset(reinterpret_cast<uint8_t *>(&count_)-reinterpret_cast<uint8_t *>(&sum_)),
-        mutex_offset(reinterpret_cast<uint8_t *>(&mutex_)-reinterpret_cast<uint8_t *>(&sum_)) {
-  }
+      : sum_(0),
+        count_(0),
+        sum_offset_(0),
+        count_offset_(reinterpret_cast<std::uint8_t *>(&count_) -
+                      reinterpret_cast<std::uint8_t *>(&sum_)),
+        mutex_offset_(reinterpret_cast<std::uint8_t *>(&mutex_) -
+                      reinterpret_cast<std::uint8_t *>(&sum_)) {}
 
   // TODO(shoban): We might want to specialize sum_ and count_ to use atomics
   // for int types similar to in AggregationStateCount.
@@ -90,7 +96,7 @@ class AggregationStateAvg : public AggregationState {
   std::int64_t count_;
   SpinMutex mutex_;
 
-  int sum_offset, count_offset, mutex_offset;
+  int sum_offset_, count_offset_, mutex_offset_;
 };
 
 /**
@@ -98,8 +104,7 @@ class AggregationStateAvg : public AggregationState {
  **/
 class AggregationHandleAvg : public AggregationConcreteHandle {
  public:
-  ~AggregationHandleAvg() override {
-  }
+  ~AggregationHandleAvg() override {}
 
   AggregationState* createInitialState() const override {
     return new AggregationStateAvg(blank_state_);
@@ -107,14 +112,15 @@ class AggregationHandleAvg : public AggregationConcreteHandle {
 
   AggregationStateHashTableBase* createGroupByHashTable(
       const HashTableImplType hash_table_impl,
-      const std::vector<const Type*> &group_by_types,
+      const std::vector<const Type *> &group_by_types,
       const std::size_t estimated_num_groups,
       StorageManager *storage_manager) const override;
 
   /**
    * @brief Iterate method with average aggregation state.
    **/
-  inline void iterateUnaryInl(AggregationStateAvg *state, const TypedValue &value) const {
+  inline void iterateUnaryInl(AggregationStateAvg *state,
+                              const TypedValue &value) const {
     DCHECK(value.isPlausibleInstanceOf(argument_type_.getSignature()));
     if (value.isNull()) return;
 
@@ -123,37 +129,41 @@ class AggregationHandleAvg : public AggregationConcreteHandle {
     ++state->count_;
   }
 
-  inline void iterateUnaryInlFast(const TypedValue &value, uint8_t *byte_ptr) const {
+  inline void iterateUnaryInlFast(const TypedValue &value,
+                                  std::uint8_t *byte_ptr) const {
     DCHECK(value.isPlausibleInstanceOf(argument_type_.getSignature()));
     if (value.isNull()) return;
-    TypedValue *sum_ptr = reinterpret_cast<TypedValue *>(byte_ptr + blank_state_.sum_offset);
-    std::int64_t *count_ptr = reinterpret_cast<std::int64_t *>(byte_ptr + blank_state_.count_offset);
+    TypedValue *sum_ptr =
+        reinterpret_cast<TypedValue *>(byte_ptr + blank_state_.sum_offset_);
+    std::int64_t *count_ptr =
+        reinterpret_cast<std::int64_t *>(byte_ptr + blank_state_.count_offset_);
     *sum_ptr = fast_add_operator_->applyToTypedValues(*sum_ptr, value);
     ++(*count_ptr);
   }
 
-  inline void iterateInlFast(const std::vector<TypedValue> &arguments, uint8_t *byte_ptr) const override {
-     if (block_update) return;
-     iterateUnaryInlFast(arguments.front(), byte_ptr);
+  inline void updateState(const std::vector<TypedValue> &arguments,
+                          std::uint8_t *byte_ptr) const override {
+    if (!block_update_) {
+      iterateUnaryInlFast(arguments.front(), byte_ptr);
+    }
   }
 
-  void BlockUpdate() override {
-      block_update = true;
-  }
+  void blockUpdate() override { block_update_ = true; }
 
-  void AllowUpdate() override {
-      block_update = false;
-  }
+  void allowUpdate() override { block_update_ = false; }
 
-  void initPayload(uint8_t *byte_ptr) const override {
-    TypedValue *sum_ptr = reinterpret_cast<TypedValue *>(byte_ptr + blank_state_.sum_offset);
-    std::int64_t *count_ptr = reinterpret_cast<std::int64_t *>(byte_ptr + blank_state_.count_offset);
+  void initPayload(std::uint8_t *byte_ptr) const override {
+    TypedValue *sum_ptr =
+        reinterpret_cast<TypedValue *>(byte_ptr + blank_state_.sum_offset_);
+    std::int64_t *count_ptr =
+        reinterpret_cast<std::int64_t *>(byte_ptr + blank_state_.count_offset_);
     *sum_ptr = blank_state_.sum_;
     *count_ptr = blank_state_.count_;
   }
 
   AggregationState* accumulateColumnVectors(
-      const std::vector<std::unique_ptr<ColumnVector>> &column_vectors) const override;
+      const std::vector<std::unique_ptr<ColumnVector>> &column_vectors)
+      const override;
 
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
   AggregationState* accumulateValueAccessor(
@@ -170,33 +180,32 @@ class AggregationHandleAvg : public AggregationConcreteHandle {
   void mergeStates(const AggregationState &source,
                    AggregationState *destination) const override;
 
-  void mergeStatesFast(const uint8_t *source,
-                   uint8_t *destination) const override;
+  void mergeStatesFast(const std::uint8_t *source,
+                       std::uint8_t *destination) const override;
 
   TypedValue finalize(const AggregationState &state) const override;
 
-  inline TypedValue finalizeHashTableEntry(const AggregationState &state) const {
-    const AggregationStateAvg &agg_state = static_cast<const AggregationStateAvg&>(state);
+  inline TypedValue finalizeHashTableEntry(
+      const AggregationState &state) const {
+    const AggregationStateAvg &agg_state =
+        static_cast<const AggregationStateAvg &>(state);
     // TODO(chasseur): Could improve performance further if we made a special
     // version of finalizeHashTable() that collects all the sums into one
     // ColumnVector and all the counts into another and then applies
     // '*divide_operator_' to them in bulk.
-    return divide_operator_->applyToTypedValues(agg_state.sum_,
-                                                TypedValue(static_cast<double>(agg_state.count_)));
+    return divide_operator_->applyToTypedValues(
+        agg_state.sum_, TypedValue(static_cast<double>(agg_state.count_)));
   }
 
-  inline TypedValue finalizeHashTableEntryFast(const uint8_t *byte_ptr) const {
-//    const AggregationStateAvg &agg_state = static_cast<const AggregationStateAvg&>(state);
-    // TODO(chasseur): Could improve performance further if we made a special
-    // version of finalizeHashTable() that collects all the sums into one
-    // ColumnVector and all the counts into another and then applies
-    // '*divide_operator_' to them in bulk.
-
-    uint8_t *value_ptr = const_cast<uint8_t*>(byte_ptr);
-    TypedValue *sum_ptr = reinterpret_cast<TypedValue *>(value_ptr + blank_state_.sum_offset);
-    std::int64_t *count_ptr = reinterpret_cast<std::int64_t *>(value_ptr + blank_state_.count_offset);
-    return divide_operator_->applyToTypedValues(*sum_ptr,
-                                                TypedValue(static_cast<double>(*count_ptr)));
+  inline TypedValue finalizeHashTableEntryFast(
+      const std::uint8_t *byte_ptr) const {
+    std::uint8_t *value_ptr = const_cast<std::uint8_t *>(byte_ptr);
+    TypedValue *sum_ptr =
+        reinterpret_cast<TypedValue *>(value_ptr + blank_state_.sum_offset_);
+    std::int64_t *count_ptr = reinterpret_cast<std::int64_t *>(
+        value_ptr + blank_state_.count_offset_);
+    return divide_operator_->applyToTypedValues(
+        *sum_ptr, TypedValue(static_cast<double>(*count_ptr)));
   }
 
   ColumnVector* finalizeHashTable(
@@ -205,23 +214,26 @@ class AggregationHandleAvg : public AggregationConcreteHandle {
       int index) const override;
 
   /**
-   * @brief Implementation of AggregationHandle::aggregateOnDistinctifyHashTableForSingle()
+   * @brief Implementation of
+   * AggregationHandle::aggregateOnDistinctifyHashTableForSingle()
    *        for AVG aggregation.
    */
   AggregationState* aggregateOnDistinctifyHashTableForSingle(
-      const AggregationStateHashTableBase &distinctify_hash_table) const override;
+      const AggregationStateHashTableBase &distinctify_hash_table)
+      const override;
 
   /**
-   * @brief Implementation of AggregationHandle::aggregateOnDistinctifyHashTableForGroupBy()
+   * @brief Implementation of
+   * AggregationHandle::aggregateOnDistinctifyHashTableForGroupBy()
    *        for AVG aggregation.
    */
   void aggregateOnDistinctifyHashTableForGroupBy(
       const AggregationStateHashTableBase &distinctify_hash_table,
       AggregationStateHashTableBase *aggregation_hash_table,
-      int index) const override;
+      std::size_t index) const override;
 
-  size_t getPayloadSize() const override {
-      return blank_state_.getPayloadSize();
+  std::size_t getPayloadSize() const override {
+    return blank_state_.getPayloadSize();
   }
 
  private:
@@ -241,7 +253,7 @@ class AggregationHandleAvg : public AggregationConcreteHandle {
   std::unique_ptr<UncheckedBinaryOperator> merge_add_operator_;
   std::unique_ptr<UncheckedBinaryOperator> divide_operator_;
 
-  bool block_update;
+  bool block_update_;
 
   DISALLOW_COPY_AND_ASSIGN(AggregationHandleAvg);
 };

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/expressions/aggregation/AggregationHandleCount.cpp
----------------------------------------------------------------------
diff --git a/expressions/aggregation/AggregationHandleCount.cpp b/expressions/aggregation/AggregationHandleCount.cpp
index 872d770..5ced71c 100644
--- a/expressions/aggregation/AggregationHandleCount.cpp
+++ b/expressions/aggregation/AggregationHandleCount.cpp
@@ -49,48 +49,50 @@ class ValueAccessor;
 
 template <bool count_star, bool nullable_type>
 AggregationStateHashTableBase*
-    AggregationHandleCount<count_star, nullable_type>::createGroupByHashTable(
-        const HashTableImplType hash_table_impl,
-        const std::vector<const Type*> &group_by_types,
-        const std::size_t estimated_num_groups,
-        StorageManager *storage_manager) const {
-  return AggregationStateHashTableFactory<AggregationStateCount>::CreateResizable(
-      hash_table_impl,
-      group_by_types,
-      estimated_num_groups,
-      storage_manager);
+AggregationHandleCount<count_star, nullable_type>::createGroupByHashTable(
+    const HashTableImplType hash_table_impl,
+    const std::vector<const Type *> &group_by_types,
+    const std::size_t estimated_num_groups,
+    StorageManager *storage_manager) const {
+  return AggregationStateHashTableFactory<
+      AggregationStateCount>::CreateResizable(hash_table_impl,
+                                              group_by_types,
+                                              estimated_num_groups,
+                                              storage_manager);
 }
 
 template <bool count_star, bool nullable_type>
 AggregationState*
-    AggregationHandleCount<count_star, nullable_type>::accumulateColumnVectors(
-        const std::vector<std::unique_ptr<ColumnVector>> &column_vectors) const {
+AggregationHandleCount<count_star, nullable_type>::accumulateColumnVectors(
+    const std::vector<std::unique_ptr<ColumnVector>> &column_vectors) const {
   DCHECK(!count_star)
       << "Called non-nullary accumulation method on an AggregationHandleCount "
       << "set up for nullary COUNT(*)";
 
   DCHECK_EQ(1u, column_vectors.size())
-      << "Got wrong number of ColumnVectors for COUNT: " << column_vectors.size();
+      << "Got wrong number of ColumnVectors for COUNT: "
+      << column_vectors.size();
 
   std::size_t count = 0;
   InvokeOnColumnVector(
       *column_vectors.front(),
       [&](const auto &column_vector) -> void {  // NOLINT(build/c++11)
-    if (nullable_type) {
-      // TODO(shoban): Iterating over the ColumnVector is a rather slow way to
-      // do this. We should look at extending the ColumnVector interface to do
-      // a quick count of the non-null values (i.e. the length minus the
-      // population count of the null bitmap). We should do something similar
-      // for ValueAccessor too.
-      for (std::size_t pos = 0;
-           pos < column_vector.size();
-           ++pos) {
-        count += !column_vector.getTypedValue(pos).isNull();
-      }
-    } else {
-      count = column_vector.size();
-    }
-  });
+        if (nullable_type) {
+          // TODO(shoban): Iterating over the ColumnVector is a rather slow way
+          // to
+          // do this. We should look at extending the ColumnVector interface to
+          // do
+          // a quick count of the non-null values (i.e. the length minus the
+          // population count of the null bitmap). We should do something
+          // similar
+          // for ValueAccessor too.
+          for (std::size_t pos = 0; pos < column_vector.size(); ++pos) {
+            count += !column_vector.getTypedValue(pos).isNull();
+          }
+        } else {
+          count = column_vector.size();
+        }
+      });
 
   return new AggregationStateCount(count);
 }
@@ -98,9 +100,9 @@ AggregationState*
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
 template <bool count_star, bool nullable_type>
 AggregationState*
-    AggregationHandleCount<count_star, nullable_type>::accumulateValueAccessor(
-        ValueAccessor *accessor,
-        const std::vector<attribute_id> &accessor_ids) const {
+AggregationHandleCount<count_star, nullable_type>::accumulateValueAccessor(
+    ValueAccessor *accessor,
+    const std::vector<attribute_id> &accessor_ids) const {
   DCHECK(!count_star)
       << "Called non-nullary accumulation method on an AggregationHandleCount "
       << "set up for nullary COUNT(*)";
@@ -113,28 +115,30 @@ AggregationState*
   InvokeOnValueAccessorMaybeTupleIdSequenceAdapter(
       accessor,
       [&accessor_id, &count](auto *accessor) -> void {  // NOLINT(build/c++11)
-    if (nullable_type) {
-      while (accessor->next()) {
-        count += !accessor->getTypedValue(accessor_id).isNull();
-      }
-    } else {
-      count = accessor->getNumTuples();
-    }
-  });
+        if (nullable_type) {
+          while (accessor->next()) {
+            count += !accessor->getTypedValue(accessor_id).isNull();
+          }
+        } else {
+          count = accessor->getNumTuples();
+        }
+      });
 
   return new AggregationStateCount(count);
 }
 #endif
 
 template <bool count_star, bool nullable_type>
-    void AggregationHandleCount<count_star, nullable_type>::aggregateValueAccessorIntoHashTable(
+void AggregationHandleCount<count_star, nullable_type>::
+    aggregateValueAccessorIntoHashTable(
         ValueAccessor *accessor,
         const std::vector<attribute_id> &argument_ids,
         const std::vector<attribute_id> &group_by_key_ids,
         AggregationStateHashTableBase *hash_table) const {
   if (count_star) {
     DCHECK_EQ(0u, argument_ids.size())
-        << "Got wrong number of arguments for COUNT(*): " << argument_ids.size();
+        << "Got wrong number of arguments for COUNT(*): "
+        << argument_ids.size();
   } else {
     DCHECK_EQ(1u, argument_ids.size())
         << "Got wrong number of arguments for COUNT: " << argument_ids.size();
@@ -142,62 +146,60 @@ template <bool count_star, bool nullable_type>
 }
 
 template <bool count_star, bool nullable_type>
-    void AggregationHandleCount<count_star, nullable_type>::mergeStates(
-        const AggregationState &source,
-        AggregationState *destination) const {
-  const AggregationStateCount &count_source = static_cast<const AggregationStateCount&>(source);
-  AggregationStateCount *count_destination = static_cast<AggregationStateCount*>(destination);
-
-  count_destination->count_.fetch_add(count_source.count_.load(std::memory_order_relaxed),
-                                      std::memory_order_relaxed);
+void AggregationHandleCount<count_star, nullable_type>::mergeStates(
+    const AggregationState &source, AggregationState *destination) const {
+  const AggregationStateCount &count_source =
+      static_cast<const AggregationStateCount &>(source);
+  AggregationStateCount *count_destination =
+      static_cast<AggregationStateCount *>(destination);
+
+  count_destination->count_.fetch_add(
+      count_source.count_.load(std::memory_order_relaxed),
+      std::memory_order_relaxed);
 }
 
 template <bool count_star, bool nullable_type>
 void AggregationHandleCount<count_star, nullable_type>::mergeStatesFast(
-    const uint8_t *source,
-    uint8_t *destination) const {
-    const std::int64_t *src_count_ptr = reinterpret_cast<const std::int64_t *>(source);
-    std::int64_t *dst_count_ptr = reinterpret_cast<std::int64_t *>(destination);
-    (*dst_count_ptr) += (*src_count_ptr);
+    const std::uint8_t *source, std::uint8_t *destination) const {
+  const std::int64_t *src_count_ptr =
+      reinterpret_cast<const std::int64_t *>(source);
+  std::int64_t *dst_count_ptr = reinterpret_cast<std::int64_t *>(destination);
+  (*dst_count_ptr) += (*src_count_ptr);
 }
 
 template <bool count_star, bool nullable_type>
-    ColumnVector* AggregationHandleCount<count_star, nullable_type>::finalizeHashTable(
-        const AggregationStateHashTableBase &hash_table,
-        std::vector<std::vector<TypedValue>> *group_by_keys,
-        int index) const {
-  return finalizeHashTableHelperFast<AggregationHandleCount<count_star, nullable_type>,
-                                 AggregationStateFastHashTable>(
-      TypeFactory::GetType(kLong),
-      hash_table,
-      group_by_keys,
-      index);
+ColumnVector*
+AggregationHandleCount<count_star, nullable_type>::finalizeHashTable(
+    const AggregationStateHashTableBase &hash_table,
+    std::vector<std::vector<TypedValue>> *group_by_keys,
+    int index) const {
+  return finalizeHashTableHelperFast<
+      AggregationHandleCount<count_star, nullable_type>,
+      AggregationStateFastHashTable>(
+      TypeFactory::GetType(kLong), hash_table, group_by_keys, index);
 }
 
 template <bool count_star, bool nullable_type>
-AggregationState* AggregationHandleCount<count_star, nullable_type>
-    ::aggregateOnDistinctifyHashTableForSingle(
+AggregationState* AggregationHandleCount<count_star, nullable_type>::
+    aggregateOnDistinctifyHashTableForSingle(
         const AggregationStateHashTableBase &distinctify_hash_table) const {
   DCHECK_EQ(count_star, false);
   return aggregateOnDistinctifyHashTableForSingleUnaryHelperFast<
       AggregationHandleCount<count_star, nullable_type>,
-      AggregationStateCount>(
-          distinctify_hash_table);
+      AggregationStateCount>(distinctify_hash_table);
 }
 
 template <bool count_star, bool nullable_type>
-void AggregationHandleCount<count_star, nullable_type>
-    ::aggregateOnDistinctifyHashTableForGroupBy(
+void AggregationHandleCount<count_star, nullable_type>::
+    aggregateOnDistinctifyHashTableForGroupBy(
         const AggregationStateHashTableBase &distinctify_hash_table,
         AggregationStateHashTableBase *aggregation_hash_table,
-        int index) const {
+        std::size_t index) const {
   DCHECK_EQ(count_star, false);
   aggregateOnDistinctifyHashTableForGroupByUnaryHelperFast<
       AggregationHandleCount<count_star, nullable_type>,
       AggregationStateFastHashTable>(
-          distinctify_hash_table,
-          aggregation_hash_table,
-          index);
+      distinctify_hash_table, aggregation_hash_table, index);
 }
 
 // Explicitly instantiate and compile in the different versions of

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/expressions/aggregation/AggregationHandleCount.hpp
----------------------------------------------------------------------
diff --git a/expressions/aggregation/AggregationHandleCount.hpp b/expressions/aggregation/AggregationHandleCount.hpp
index 6594ed2..22f33ec 100644
--- a/expressions/aggregation/AggregationHandleCount.hpp
+++ b/expressions/aggregation/AggregationHandleCount.hpp
@@ -29,8 +29,8 @@
 #include "catalog/CatalogTypedefs.hpp"
 #include "expressions/aggregation/AggregationConcreteHandle.hpp"
 #include "expressions/aggregation/AggregationHandle.hpp"
-#include "storage/HashTableBase.hpp"
 #include "storage/FastHashTable.hpp"
+#include "storage/HashTableBase.hpp"
 #include "types/TypedValue.hpp"
 #include "utility/Macros.hpp"
 
@@ -41,7 +41,8 @@ class StorageManager;
 class Type;
 class ValueAccessor;
 
-template <bool, bool> class AggregationHandleCount;
+template <bool, bool>
+class AggregationHandleCount;
 
 /** \addtogroup Expressions
  *  @{
@@ -63,8 +64,10 @@ class AggregationStateCount : public AggregationState {
    */
   ~AggregationStateCount() override {}
 
-  size_t getPayloadSize() const {
-     return sizeof(count_);
+  std::size_t getPayloadSize() const { return sizeof(count_); }
+
+  const std::uint8_t* getPayloadAddress() const {
+    return reinterpret_cast<const uint8_t *>(&count_);
   }
 
  private:
@@ -73,13 +76,10 @@ class AggregationStateCount : public AggregationState {
   friend class AggregationHandleCount<true, false>;
   friend class AggregationHandleCount<true, true>;
 
-  AggregationStateCount()
-      : count_(0) {
-  }
+  AggregationStateCount() : count_(0) {}
 
   explicit AggregationStateCount(const std::int64_t initial_count)
-      : count_(initial_count) {
-  }
+      : count_(initial_count) {}
 
   std::atomic<std::int64_t> count_;
 };
@@ -96,16 +96,15 @@ class AggregationStateCount : public AggregationState {
 template <bool count_star, bool nullable_type>
 class AggregationHandleCount : public AggregationConcreteHandle {
  public:
-  ~AggregationHandleCount() override {
-  }
+  ~AggregationHandleCount() override {}
 
   AggregationState* createInitialState() const override {
     return new AggregationStateCount();
   }
 
-  AggregationStateHashTableBase* createGroupByHashTable(
+  AggregationStateHashTableBase *createGroupByHashTable(
       const HashTableImplType hash_table_impl,
-      const std::vector<const Type*> &group_by_types,
+      const std::vector<const Type *> &group_by_types,
       const std::size_t estimated_num_groups,
       StorageManager *storage_manager) const override;
 
@@ -113,54 +112,56 @@ class AggregationHandleCount : public AggregationConcreteHandle {
     state->count_.fetch_add(1, std::memory_order_relaxed);
   }
 
-  inline void iterateNullaryInlFast(uint8_t *byte_ptr) const {
-      std::int64_t *count_ptr = reinterpret_cast<std::int64_t *>(byte_ptr);
-      (*count_ptr)++;
+  inline void iterateNullaryInlFast(std::uint8_t *byte_ptr) const {
+    std::int64_t *count_ptr = reinterpret_cast<std::int64_t *>(byte_ptr);
+    (*count_ptr)++;
   }
 
   /**
    * @brief Iterate with count aggregation state.
    */
-  inline void iterateUnaryInl(AggregationStateCount *state, const TypedValue &value) const {
+  inline void iterateUnaryInl(AggregationStateCount *state,
+                              const TypedValue &value) const {
     if ((!nullable_type) || (!value.isNull())) {
       state->count_.fetch_add(1, std::memory_order_relaxed);
     }
   }
 
-  inline void iterateUnaryInlFast(const TypedValue &value, uint8_t *byte_ptr) const {
+  inline void iterateUnaryInlFast(const TypedValue &value,
+                                  std::uint8_t *byte_ptr) const {
     if ((!nullable_type) || (!value.isNull())) {
       std::int64_t *count_ptr = reinterpret_cast<std::int64_t *>(byte_ptr);
       (*count_ptr)++;
     }
   }
 
-  inline void iterateInlFast(const std::vector<TypedValue> &arguments, uint8_t *byte_ptr) const override {
-     if (block_update) return;
-     if (arguments.size())
-         iterateUnaryInlFast(arguments.front(), byte_ptr);
-     else
-         iterateNullaryInlFast(byte_ptr);
+  inline void updateState(const std::vector<TypedValue> &arguments,
+                          std::uint8_t *byte_ptr) const override {
+    if (!block_update_) {
+      if (arguments.size())
+        iterateUnaryInlFast(arguments.front(), byte_ptr);
+      else
+        iterateNullaryInlFast(byte_ptr);
+    }
   }
 
-  void BlockUpdate() override {
-     block_update = true;
-  }
+  void blockUpdate() override { block_update_ = true; }
 
-  void AllowUpdate() override {
-     block_update = false;
-  }
+  void allowUpdate() override { block_update_ = false; }
 
-  void initPayload(uint8_t *byte_ptr) const override {
-     std::int64_t *count_ptr = reinterpret_cast<std::int64_t *>(byte_ptr);
-     *count_ptr = 0;
+  void initPayload(std::uint8_t *byte_ptr) const override {
+    std::int64_t *count_ptr = reinterpret_cast<std::int64_t *>(byte_ptr);
+    *count_ptr = 0;
   }
 
-  AggregationState* accumulateNullary(const std::size_t num_tuples) const override {
+  AggregationState* accumulateNullary(
+      const std::size_t num_tuples) const override {
     return new AggregationStateCount(num_tuples);
   }
 
   AggregationState* accumulateColumnVectors(
-      const std::vector<std::unique_ptr<ColumnVector>> &column_vectors) const override;
+      const std::vector<std::unique_ptr<ColumnVector>> &column_vectors)
+      const override;
 
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
   AggregationState* accumulateValueAccessor(
@@ -177,25 +178,26 @@ class AggregationHandleCount : public AggregationConcreteHandle {
   void mergeStates(const AggregationState &source,
                    AggregationState *destination) const override;
 
-  void mergeStatesFast(const uint8_t *source,
-                   uint8_t *destination) const override;
+  void mergeStatesFast(const std::uint8_t *source,
+                       std::uint8_t *destination) const override;
 
   TypedValue finalize(const AggregationState &state) const override {
-    return TypedValue(static_cast<const AggregationStateCount&>(state).count_.load(std::memory_order_relaxed));
+    return TypedValue(
+        static_cast<const AggregationStateCount &>(state).count_.load(
+            std::memory_order_relaxed));
   }
 
-  inline TypedValue finalizeHashTableEntry(const AggregationState &state) const {
-    return TypedValue(static_cast<const AggregationStateCount&>(state).count_.load(std::memory_order_relaxed));
+  inline TypedValue finalizeHashTableEntry(
+      const AggregationState &state) const {
+    return TypedValue(
+        static_cast<const AggregationStateCount &>(state).count_.load(
+            std::memory_order_relaxed));
   }
 
-  inline TypedValue finalizeHashTableEntryFast(const uint8_t *byte_ptr) const {
-//    const AggregationStateAvg &agg_state = static_cast<const AggregationStateAvg&>(state);
-    // TODO(chasseur): Could improve performance further if we made a special
-    // version of finalizeHashTable() that collects all the sums into one
-    // ColumnVector and all the counts into another and then applies
-    // '*divide_operator_' to them in bulk.
-
-    const std::int64_t *count_ptr = reinterpret_cast<const std::int64_t *>(byte_ptr);
+  inline TypedValue finalizeHashTableEntryFast(
+      const std::uint8_t *byte_ptr) const {
+    const std::int64_t *count_ptr =
+        reinterpret_cast<const std::int64_t *>(byte_ptr);
     return TypedValue(*count_ptr);
   }
 
@@ -205,24 +207,25 @@ class AggregationHandleCount : public AggregationConcreteHandle {
       int index) const override;
 
   /**
-   * @brief Implementation of AggregationHandle::aggregateOnDistinctifyHashTableForSingle()
+   * @brief Implementation of
+   * AggregationHandle::aggregateOnDistinctifyHashTableForSingle()
    *        for SUM aggregation.
    */
   AggregationState* aggregateOnDistinctifyHashTableForSingle(
-      const AggregationStateHashTableBase &distinctify_hash_table) const override;
+      const AggregationStateHashTableBase &distinctify_hash_table)
+      const override;
 
   /**
-   * @brief Implementation of AggregationHandle::aggregateOnDistinctifyHashTableForGroupBy()
+   * @brief Implementation of
+   * AggregationHandle::aggregateOnDistinctifyHashTableForGroupBy()
    *        for SUM aggregation.
    */
   void aggregateOnDistinctifyHashTableForGroupBy(
       const AggregationStateHashTableBase &distinctify_hash_table,
       AggregationStateHashTableBase *aggregation_hash_table,
-      int index) const override;
+      std::size_t index) const override;
 
-  size_t getPayloadSize() const override {
-      return sizeof(std::int64_t);
-  }
+  std::size_t getPayloadSize() const override { return sizeof(std::int64_t); }
 
  private:
   friend class AggregateFunctionCount;
@@ -230,10 +233,9 @@ class AggregationHandleCount : public AggregationConcreteHandle {
   /**
    * @brief Constructor.
    **/
-  AggregationHandleCount() : block_update(false) {
-  }
+  AggregationHandleCount() : block_update_(false) {}
 
-  bool block_update;
+  bool block_update_;
 
   DISALLOW_COPY_AND_ASSIGN(AggregationHandleCount);
 };

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/expressions/aggregation/AggregationHandleDistinct.hpp
----------------------------------------------------------------------
diff --git a/expressions/aggregation/AggregationHandleDistinct.hpp b/expressions/aggregation/AggregationHandleDistinct.hpp
index 7bdb508..9f351d4 100644
--- a/expressions/aggregation/AggregationHandleDistinct.hpp
+++ b/expressions/aggregation/AggregationHandleDistinct.hpp
@@ -47,27 +47,32 @@ class AggregationHandleDistinct : public AggregationConcreteHandle {
   /**
    * @brief Constructor.
    **/
-  AggregationHandleDistinct() {
-  }
+  AggregationHandleDistinct() {}
 
   AggregationState* createInitialState() const override {
-    LOG(FATAL) << "AggregationHandleDistinct does not support createInitialState().";
+    LOG(FATAL)
+        << "AggregationHandleDistinct does not support createInitialState().";
   }
 
-  AggregationState* accumulateNullary(const std::size_t num_tuples) const override {
-    LOG(FATAL) << "AggregationHandleDistinct does not support accumulateNullary().";
+  AggregationState* accumulateNullary(
+      const std::size_t num_tuples) const override {
+    LOG(FATAL)
+        << "AggregationHandleDistinct does not support accumulateNullary().";
   }
 
   AggregationState* accumulateColumnVectors(
-      const std::vector<std::unique_ptr<ColumnVector>> &column_vectors) const override {
-    LOG(FATAL) << "AggregationHandleDistinct does not support accumulateColumnVectors().";
+      const std::vector<std::unique_ptr<ColumnVector>> &column_vectors)
+      const override {
+    LOG(FATAL) << "AggregationHandleDistinct does not support "
+                  "accumulateColumnVectors().";
   }
 
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
   AggregationState* accumulateValueAccessor(
       ValueAccessor *accessor,
       const std::vector<attribute_id> &accessor_ids) const override {
-    LOG(FATAL) << "AggregationHandleDistinct does not support accumulateValueAccessor().";
+    LOG(FATAL) << "AggregationHandleDistinct does not support "
+                  "accumulateValueAccessor().";
   }
 #endif
 
@@ -81,7 +86,8 @@ class AggregationHandleDistinct : public AggregationConcreteHandle {
   }
 
   AggregationState* aggregateOnDistinctifyHashTableForSingle(
-      const AggregationStateHashTableBase &distinctify_hash_table) const override {
+      const AggregationStateHashTableBase &distinctify_hash_table)
+      const override {
     LOG(FATAL) << "AggregationHandleDistinct does not support "
                << "aggregateOnDistinctifyHashTableForSingle().";
   }
@@ -89,14 +95,14 @@ class AggregationHandleDistinct : public AggregationConcreteHandle {
   void aggregateOnDistinctifyHashTableForGroupBy(
       const AggregationStateHashTableBase &distinctify_hash_table,
       AggregationStateHashTableBase *groupby_hash_table,
-      int index) const override {
+      std::size_t index) const override {
     LOG(FATAL) << "AggregationHandleDistinct does not support "
                << "aggregateOnDistinctifyHashTableForGroupBy().";
   }
 
   AggregationStateHashTableBase* createGroupByHashTable(
       const HashTableImplType hash_table_impl,
-      const std::vector<const Type*> &group_by_types,
+      const std::vector<const Type *> &group_by_types,
       const std::size_t estimated_num_groups,
       StorageManager *storage_manager) const override;
 

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/expressions/aggregation/AggregationHandleMax.cpp
----------------------------------------------------------------------
diff --git a/expressions/aggregation/AggregationHandleMax.cpp b/expressions/aggregation/AggregationHandleMax.cpp
index d63564a..765a57a 100644
--- a/expressions/aggregation/AggregationHandleMax.cpp
+++ b/expressions/aggregation/AggregationHandleMax.cpp
@@ -37,22 +37,19 @@ namespace quickstep {
 class StorageManager;
 
 AggregationHandleMax::AggregationHandleMax(const Type &type)
-    : type_(type), block_update(false) {
-  fast_comparator_.reset(ComparisonFactory::GetComparison(ComparisonID::kGreater)
-                         .makeUncheckedComparatorForTypes(type,
-                                                          type.getNonNullableVersion()));
+    : type_(type), block_update_(false) {
+  fast_comparator_.reset(
+      ComparisonFactory::GetComparison(ComparisonID::kGreater)
+          .makeUncheckedComparatorForTypes(type, type.getNonNullableVersion()));
 }
 
 AggregationStateHashTableBase* AggregationHandleMax::createGroupByHashTable(
     const HashTableImplType hash_table_impl,
-    const std::vector<const Type*> &group_by_types,
+    const std::vector<const Type *> &group_by_types,
     const std::size_t estimated_num_groups,
     StorageManager *storage_manager) const {
   return AggregationStateHashTableFactory<AggregationStateMax>::CreateResizable(
-      hash_table_impl,
-      group_by_types,
-      estimated_num_groups,
-      storage_manager);
+      hash_table_impl, group_by_types, estimated_num_groups, storage_manager);
 }
 
 AggregationState* AggregationHandleMax::accumulateColumnVectors(
@@ -60,9 +57,8 @@ AggregationState* AggregationHandleMax::accumulateColumnVectors(
   DCHECK_EQ(1u, column_vectors.size())
       << "Got wrong number of ColumnVectors for MAX: " << column_vectors.size();
 
-  return new AggregationStateMax(
-      fast_comparator_->accumulateColumnVector(type_.getNullableVersion().makeNullValue(),
-                                               *column_vectors.front()));
+  return new AggregationStateMax(fast_comparator_->accumulateColumnVector(
+      type_.getNullableVersion().makeNullValue(), *column_vectors.front()));
 }
 
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
@@ -72,10 +68,10 @@ AggregationState* AggregationHandleMax::accumulateValueAccessor(
   DCHECK_EQ(1u, accessor_ids.size())
       << "Got wrong number of attributes for MAX: " << accessor_ids.size();
 
-  return new AggregationStateMax(
-      fast_comparator_->accumulateValueAccessor(type_.getNullableVersion().makeNullValue(),
-                                                accessor,
-                                                accessor_ids.front()));
+  return new AggregationStateMax(fast_comparator_->accumulateValueAccessor(
+      type_.getNullableVersion().makeNullValue(),
+      accessor,
+      accessor_ids.front()));
 }
 #endif  // QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
 
@@ -88,24 +84,24 @@ void AggregationHandleMax::aggregateValueAccessorIntoHashTable(
       << "Got wrong number of arguments for MAX: " << argument_ids.size();
 }
 
-void AggregationHandleMax::mergeStates(
-    const AggregationState &source,
-    AggregationState *destination) const {
-  const AggregationStateMax &max_source = static_cast<const AggregationStateMax&>(source);
-  AggregationStateMax *max_destination = static_cast<AggregationStateMax*>(destination);
+void AggregationHandleMax::mergeStates(const AggregationState &source,
+                                       AggregationState *destination) const {
+  const AggregationStateMax &max_source =
+      static_cast<const AggregationStateMax &>(source);
+  AggregationStateMax *max_destination =
+      static_cast<AggregationStateMax *>(destination);
 
   if (!max_source.max_.isNull()) {
     compareAndUpdate(max_destination, max_source.max_);
   }
 }
 
-void AggregationHandleMax::mergeStatesFast(
-    const std::uint8_t *source,
-    std::uint8_t *destination) const {
-    const TypedValue *src_max_ptr = reinterpret_cast<const TypedValue *>(source);
-    TypedValue *dst_max_ptr = reinterpret_cast<TypedValue *>(destination);
-    if (!(src_max_ptr->isNull())) {
-      compareAndUpdateFast(dst_max_ptr, *src_max_ptr);
+void AggregationHandleMax::mergeStatesFast(const std::uint8_t *source,
+                                           std::uint8_t *destination) const {
+  const TypedValue *src_max_ptr = reinterpret_cast<const TypedValue *>(source);
+  TypedValue *dst_max_ptr = reinterpret_cast<TypedValue *>(destination);
+  if (!(src_max_ptr->isNull())) {
+    compareAndUpdateFast(dst_max_ptr, *src_max_ptr);
   }
 }
 
@@ -114,31 +110,26 @@ ColumnVector* AggregationHandleMax::finalizeHashTable(
     std::vector<std::vector<TypedValue>> *group_by_keys,
     int index) const {
   return finalizeHashTableHelperFast<AggregationHandleMax,
-                                 AggregationStateFastHashTable>(
-      type_.getNullableVersion(),
-      hash_table,
-      group_by_keys,
-      index);
+                                     AggregationStateFastHashTable>(
+      type_.getNullableVersion(), hash_table, group_by_keys, index);
 }
 
-AggregationState* AggregationHandleMax::aggregateOnDistinctifyHashTableForSingle(
+AggregationState*
+AggregationHandleMax::aggregateOnDistinctifyHashTableForSingle(
     const AggregationStateHashTableBase &distinctify_hash_table) const {
   return aggregateOnDistinctifyHashTableForSingleUnaryHelperFast<
       AggregationHandleMax,
-      AggregationStateMax>(
-          distinctify_hash_table);
+      AggregationStateMax>(distinctify_hash_table);
 }
 
 void AggregationHandleMax::aggregateOnDistinctifyHashTableForGroupBy(
     const AggregationStateHashTableBase &distinctify_hash_table,
     AggregationStateHashTableBase *aggregation_hash_table,
-    int index) const {
+    std::size_t index) const {
   aggregateOnDistinctifyHashTableForGroupByUnaryHelperFast<
       AggregationHandleMax,
       AggregationStateFastHashTable>(
-          distinctify_hash_table,
-          aggregation_hash_table,
-          index);
+      distinctify_hash_table, aggregation_hash_table, index);
 }
 
 }  // namespace quickstep

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/expressions/aggregation/AggregationHandleMax.hpp
----------------------------------------------------------------------
diff --git a/expressions/aggregation/AggregationHandleMax.hpp b/expressions/aggregation/AggregationHandleMax.hpp
index 16c7458..8be6826 100644
--- a/expressions/aggregation/AggregationHandleMax.hpp
+++ b/expressions/aggregation/AggregationHandleMax.hpp
@@ -28,8 +28,8 @@
 #include "catalog/CatalogTypedefs.hpp"
 #include "expressions/aggregation/AggregationConcreteHandle.hpp"
 #include "expressions/aggregation/AggregationHandle.hpp"
-#include "storage/HashTableBase.hpp"
 #include "storage/FastHashTable.hpp"
+#include "storage/HashTableBase.hpp"
 #include "threading/SpinMutex.hpp"
 #include "types/Type.hpp"
 #include "types/TypedValue.hpp"
@@ -56,25 +56,24 @@ class AggregationStateMax : public AggregationState {
   /**
    * @brief Copy constructor (ignores mutex).
    */
-  AggregationStateMax(const AggregationStateMax &orig)
-      : max_(orig.max_) {
-  }
+  AggregationStateMax(const AggregationStateMax &orig) : max_(orig.max_) {}
 
   /**
    * @brief Destructor.
    */
-  ~AggregationStateMax() override {};
+  ~AggregationStateMax() override{};
+
+  const std::uint8_t* getPayloadAddress() const {
+    return reinterpret_cast<const uint8_t *>(&max_);
+  }
 
  private:
   friend class AggregationHandleMax;
 
   explicit AggregationStateMax(const Type &type)
-      : max_(type.getNullableVersion().makeNullValue()) {
-  }
+      : max_(type.getNullableVersion().makeNullValue()) {}
 
-  explicit AggregationStateMax(TypedValue &&value)
-      : max_(std::move(value)) {
-  }
+  explicit AggregationStateMax(TypedValue &&value) : max_(std::move(value)) {}
 
   TypedValue max_;
   SpinMutex mutex_;
@@ -85,8 +84,7 @@ class AggregationStateMax : public AggregationState {
  **/
 class AggregationHandleMax : public AggregationConcreteHandle {
  public:
-  ~AggregationHandleMax() override {
-  }
+  ~AggregationHandleMax() override {}
 
   AggregationState* createInitialState() const override {
     return new AggregationStateMax(type_);
@@ -94,45 +92,46 @@ class AggregationHandleMax : public AggregationConcreteHandle {
 
   AggregationStateHashTableBase* createGroupByHashTable(
       const HashTableImplType hash_table_impl,
-      const std::vector<const Type*> &group_by_types,
+      const std::vector<const Type *> &group_by_types,
       const std::size_t estimated_num_groups,
       StorageManager *storage_manager) const override;
 
   /**
    * @brief Iterate with max aggregation state.
    */
-  inline void iterateUnaryInl(AggregationStateMax *state, const TypedValue &value) const {
+  inline void iterateUnaryInl(AggregationStateMax *state,
+                              const TypedValue &value) const {
     DCHECK(value.isPlausibleInstanceOf(type_.getSignature()));
-    compareAndUpdate(static_cast<AggregationStateMax*>(state), value);
+    compareAndUpdate(static_cast<AggregationStateMax *>(state), value);
   }
 
-  inline void iterateUnaryInlFast(const TypedValue &value, std::uint8_t *byte_ptr) const {
+  inline void iterateUnaryInlFast(const TypedValue &value,
+                                  std::uint8_t *byte_ptr) const {
     DCHECK(value.isPlausibleInstanceOf(type_.getSignature()));
     TypedValue *max_ptr = reinterpret_cast<TypedValue *>(byte_ptr);
     compareAndUpdateFast(max_ptr, value);
   }
 
-  inline void iterateInlFast(const std::vector<TypedValue> &arguments, uint8_t *byte_ptr) const override {
-    if (block_update) return;
-    iterateUnaryInlFast(arguments.front(), byte_ptr);
+  inline void updateState(const std::vector<TypedValue> &arguments,
+                          std::uint8_t *byte_ptr) const override {
+    if (!block_update_) {
+      iterateUnaryInlFast(arguments.front(), byte_ptr);
+    }
   }
 
-  void BlockUpdate() override {
-      block_update = true;
-  }
+  void blockUpdate() override { block_update_ = true; }
 
-  void AllowUpdate() override {
-      block_update = false;
-  }
+  void allowUpdate() override { block_update_ = false; }
 
-  void initPayload(uint8_t *byte_ptr) const override {
+  void initPayload(std::uint8_t *byte_ptr) const override {
     TypedValue *max_ptr = reinterpret_cast<TypedValue *>(byte_ptr);
     TypedValue t1 = (type_.getNullableVersion().makeNullValue());
     *max_ptr = t1;
   }
 
   AggregationState* accumulateColumnVectors(
-      const std::vector<std::unique_ptr<ColumnVector>> &column_vectors) const override;
+      const std::vector<std::unique_ptr<ColumnVector>> &column_vectors)
+      const override;
 
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
   AggregationState* accumulateValueAccessor(
@@ -150,17 +149,19 @@ class AggregationHandleMax : public AggregationConcreteHandle {
                    AggregationState *destination) const override;
 
   void mergeStatesFast(const std::uint8_t *source,
-                   std::uint8_t *destination) const override;
+                       std::uint8_t *destination) const override;
 
   TypedValue finalize(const AggregationState &state) const override {
-    return TypedValue(static_cast<const AggregationStateMax&>(state).max_);
+    return TypedValue(static_cast<const AggregationStateMax &>(state).max_);
   }
 
-  inline TypedValue finalizeHashTableEntry(const AggregationState &state) const {
-    return TypedValue(static_cast<const AggregationStateMax&>(state).max_);
+  inline TypedValue finalizeHashTableEntry(
+      const AggregationState &state) const {
+    return TypedValue(static_cast<const AggregationStateMax &>(state).max_);
   }
 
-  inline TypedValue finalizeHashTableEntryFast(const std::uint8_t *byte_ptr) const {
+  inline TypedValue finalizeHashTableEntryFast(
+      const std::uint8_t *byte_ptr) const {
     const TypedValue *max_ptr = reinterpret_cast<const TypedValue *>(byte_ptr);
     return TypedValue(*max_ptr);
   }
@@ -171,25 +172,25 @@ class AggregationHandleMax : public AggregationConcreteHandle {
       int index) const override;
 
   /**
-   * @brief Implementation of AggregationHandle::aggregateOnDistinctifyHashTableForSingle()
+   * @brief Implementation of
+   * AggregationHandle::aggregateOnDistinctifyHashTableForSingle()
    *        for MAX aggregation.
    */
   AggregationState* aggregateOnDistinctifyHashTableForSingle(
-      const AggregationStateHashTableBase &distinctify_hash_table) const override;
-
+      const AggregationStateHashTableBase &distinctify_hash_table)
+      const override;
 
   /**
-   * @brief Implementation of AggregationHandle::aggregateOnDistinctifyHashTableForGroupBy()
+   * @brief Implementation of
+   * AggregationHandle::aggregateOnDistinctifyHashTableForGroupBy()
    *        for MAX aggregation.
    */
   void aggregateOnDistinctifyHashTableForGroupBy(
       const AggregationStateHashTableBase &distinctify_hash_table,
       AggregationStateHashTableBase *aggregation_hash_table,
-      int index) const override;
+      std::size_t index) const override;
 
-  size_t getPayloadSize() const override {
-      return sizeof(TypedValue);
-  }
+  std::size_t getPayloadSize() const override { return sizeof(TypedValue); }
 
  private:
   friend class AggregateFunctionMax;
@@ -202,24 +203,29 @@ class AggregationHandleMax : public AggregationConcreteHandle {
   explicit AggregationHandleMax(const Type &type);
 
   /**
-   * @brief compare the value with max_ and update it if the value is larger than
+   * @brief compare the value with max_ and update it if the value is larger
+   *than
    *        current maximum. NULLs are ignored.
    *
    * @param value A TypedValue to compare
    **/
-  inline void compareAndUpdate(AggregationStateMax *state, const TypedValue &value) const {
+  inline void compareAndUpdate(AggregationStateMax *state,
+                               const TypedValue &value) const {
     // TODO(chasseur): Avoid null-checks when aggregating a non-nullable Type.
     if (value.isNull()) return;
 
     SpinMutexLock lock(state->mutex_);
-    if (state->max_.isNull() || fast_comparator_->compareTypedValues(value, state->max_)) {
+    if (state->max_.isNull() ||
+        fast_comparator_->compareTypedValues(value, state->max_)) {
       state->max_ = value;
     }
   }
 
-  inline void compareAndUpdateFast(TypedValue *max_ptr, const TypedValue &value) const {
+  inline void compareAndUpdateFast(TypedValue *max_ptr,
+                                   const TypedValue &value) const {
     if (value.isNull()) return;
-    if (max_ptr->isNull() || fast_comparator_->compareTypedValues(value, *max_ptr)) {
+    if (max_ptr->isNull() ||
+        fast_comparator_->compareTypedValues(value, *max_ptr)) {
       *max_ptr = value;
     }
   }
@@ -227,7 +233,7 @@ class AggregationHandleMax : public AggregationConcreteHandle {
   const Type &type_;
   std::unique_ptr<UncheckedComparator> fast_comparator_;
 
-  bool block_update;
+  bool block_update_;
 
   DISALLOW_COPY_AND_ASSIGN(AggregationHandleMax);
 };

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/expressions/aggregation/AggregationHandleMin.cpp
----------------------------------------------------------------------
diff --git a/expressions/aggregation/AggregationHandleMin.cpp b/expressions/aggregation/AggregationHandleMin.cpp
index 3582adc..c1655a6 100644
--- a/expressions/aggregation/AggregationHandleMin.cpp
+++ b/expressions/aggregation/AggregationHandleMin.cpp
@@ -39,22 +39,19 @@ namespace quickstep {
 class StorageManager;
 
 AggregationHandleMin::AggregationHandleMin(const Type &type)
-    : type_(type), block_update(false) {
-  fast_comparator_.reset(ComparisonFactory::GetComparison(ComparisonID::kLess)
-                         .makeUncheckedComparatorForTypes(type,
-                                                          type.getNonNullableVersion()));
+    : type_(type), block_update_(false) {
+  fast_comparator_.reset(
+      ComparisonFactory::GetComparison(ComparisonID::kLess)
+          .makeUncheckedComparatorForTypes(type, type.getNonNullableVersion()));
 }
 
 AggregationStateHashTableBase* AggregationHandleMin::createGroupByHashTable(
     const HashTableImplType hash_table_impl,
-    const std::vector<const Type*> &group_by_types,
+    const std::vector<const Type *> &group_by_types,
     const std::size_t estimated_num_groups,
     StorageManager *storage_manager) const {
   return AggregationStateHashTableFactory<AggregationStateMin>::CreateResizable(
-      hash_table_impl,
-      group_by_types,
-      estimated_num_groups,
-      storage_manager);
+      hash_table_impl, group_by_types, estimated_num_groups, storage_manager);
 }
 
 AggregationState* AggregationHandleMin::accumulateColumnVectors(
@@ -62,9 +59,8 @@ AggregationState* AggregationHandleMin::accumulateColumnVectors(
   DCHECK_EQ(1u, column_vectors.size())
       << "Got wrong number of ColumnVectors for MIN: " << column_vectors.size();
 
-  return new AggregationStateMin(
-      fast_comparator_->accumulateColumnVector(type_.getNullableVersion().makeNullValue(),
-                                               *column_vectors.front()));
+  return new AggregationStateMin(fast_comparator_->accumulateColumnVector(
+      type_.getNullableVersion().makeNullValue(), *column_vectors.front()));
 }
 
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
@@ -74,10 +70,10 @@ AggregationState* AggregationHandleMin::accumulateValueAccessor(
   DCHECK_EQ(1u, accessor_ids.size())
       << "Got wrong number of attributes for MIN: " << accessor_ids.size();
 
-  return new AggregationStateMin(
-      fast_comparator_->accumulateValueAccessor(type_.getNullableVersion().makeNullValue(),
-                                                accessor,
-                                                accessor_ids.front()));
+  return new AggregationStateMin(fast_comparator_->accumulateValueAccessor(
+      type_.getNullableVersion().makeNullValue(),
+      accessor,
+      accessor_ids.front()));
 }
 #endif  // QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
 
@@ -90,26 +86,26 @@ void AggregationHandleMin::aggregateValueAccessorIntoHashTable(
       << "Got wrong number of arguments for MIN: " << argument_ids.size();
 }
 
-void AggregationHandleMin::mergeStates(
-    const AggregationState &source,
-    AggregationState *destination) const {
-  const AggregationStateMin &min_source = static_cast<const AggregationStateMin&>(source);
-  AggregationStateMin *min_destination = static_cast<AggregationStateMin*>(destination);
+void AggregationHandleMin::mergeStates(const AggregationState &source,
+                                       AggregationState *destination) const {
+  const AggregationStateMin &min_source =
+      static_cast<const AggregationStateMin &>(source);
+  AggregationStateMin *min_destination =
+      static_cast<AggregationStateMin *>(destination);
 
   if (!min_source.min_.isNull()) {
     compareAndUpdate(min_destination, min_source.min_);
   }
 }
 
-void AggregationHandleMin::mergeStatesFast(
-    const std::uint8_t *source,
-    std::uint8_t *destination) const {
-    const TypedValue *src_min_ptr = reinterpret_cast<const TypedValue *>(source);
-    TypedValue *dst_min_ptr = reinterpret_cast<TypedValue *>(destination);
+void AggregationHandleMin::mergeStatesFast(const std::uint8_t *source,
+                                           std::uint8_t *destination) const {
+  const TypedValue *src_min_ptr = reinterpret_cast<const TypedValue *>(source);
+  TypedValue *dst_min_ptr = reinterpret_cast<TypedValue *>(destination);
 
-    if (!(src_min_ptr->isNull())) {
-      compareAndUpdateFast(dst_min_ptr, *src_min_ptr);
-    }
+  if (!(src_min_ptr->isNull())) {
+    compareAndUpdateFast(dst_min_ptr, *src_min_ptr);
+  }
 }
 
 ColumnVector* AggregationHandleMin::finalizeHashTable(
@@ -117,31 +113,26 @@ ColumnVector* AggregationHandleMin::finalizeHashTable(
     std::vector<std::vector<TypedValue>> *group_by_keys,
     int index) const {
   return finalizeHashTableHelperFast<AggregationHandleMin,
-                                 AggregationStateFastHashTable>(
-      type_.getNonNullableVersion(),
-      hash_table,
-      group_by_keys,
-      index);
+                                     AggregationStateFastHashTable>(
+      type_.getNonNullableVersion(), hash_table, group_by_keys, index);
 }
 
-AggregationState* AggregationHandleMin::aggregateOnDistinctifyHashTableForSingle(
+AggregationState*
+AggregationHandleMin::aggregateOnDistinctifyHashTableForSingle(
     const AggregationStateHashTableBase &distinctify_hash_table) const {
   return aggregateOnDistinctifyHashTableForSingleUnaryHelperFast<
       AggregationHandleMin,
-      AggregationStateMin>(
-          distinctify_hash_table);
+      AggregationStateMin>(distinctify_hash_table);
 }
 
 void AggregationHandleMin::aggregateOnDistinctifyHashTableForGroupBy(
     const AggregationStateHashTableBase &distinctify_hash_table,
     AggregationStateHashTableBase *aggregation_hash_table,
-    int index) const {
+    std::size_t index) const {
   aggregateOnDistinctifyHashTableForGroupByUnaryHelperFast<
       AggregationHandleMin,
       AggregationStateFastHashTable>(
-          distinctify_hash_table,
-          aggregation_hash_table,
-          index);
+      distinctify_hash_table, aggregation_hash_table, index);
 }
 
 }  // namespace quickstep


[4/7] incubator-quickstep git commit: Modified Aggregation unit test. Ran clang-format.

Posted by ra...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/expressions/aggregation/tests/AggregationHandleSum_unittest.cpp
----------------------------------------------------------------------
diff --git a/expressions/aggregation/tests/AggregationHandleSum_unittest.cpp b/expressions/aggregation/tests/AggregationHandleSum_unittest.cpp
index abf8a89..5dc4fb8 100644
--- a/expressions/aggregation/tests/AggregationHandleSum_unittest.cpp
+++ b/expressions/aggregation/tests/AggregationHandleSum_unittest.cpp
@@ -26,6 +26,8 @@
 #include "expressions/aggregation/AggregationHandle.hpp"
 #include "expressions/aggregation/AggregationHandleSum.hpp"
 #include "expressions/aggregation/AggregationID.hpp"
+#include "storage/AggregationOperationState.hpp"
+#include "storage/FastHashTableFactory.hpp"
 #include "storage/StorageManager.hpp"
 #include "types/CharType.hpp"
 #include "types/DatetimeIntervalType.hpp"
@@ -50,51 +52,56 @@
 
 namespace quickstep {
 
-class AggregationHandleSumTest : public::testing::Test {
+class AggregationHandleSumTest : public ::testing::Test {
  protected:
   static const int kNumSamples = 1000;
 
   // Helper method that calls AggregationHandleSum::iterateUnaryInl() to
   // aggregate 'value' into '*state'.
   void iterateHandle(AggregationState *state, const TypedValue &value) {
-    static_cast<const AggregationHandleSum&>(*aggregation_handle_sum_).iterateUnaryInl(
-        static_cast<AggregationStateSum*>(state),
-        value);
+    static_cast<const AggregationHandleSum &>(*aggregation_handle_sum_)
+        .iterateUnaryInl(static_cast<AggregationStateSum *>(state), value);
   }
 
   void initializeHandle(const Type &type) {
     aggregation_handle_sum_.reset(
-        AggregateFunctionFactory::Get(AggregationID::kSum).createHandle(
-            std::vector<const Type*>(1, &type)));
+        AggregateFunctionFactory::Get(AggregationID::kSum)
+            .createHandle(std::vector<const Type *>(1, &type)));
     aggregation_handle_sum_state_.reset(
         aggregation_handle_sum_->createInitialState());
   }
 
   static bool ApplyToTypesTest(TypeID typeID) {
-    const Type &type = (typeID == kChar || typeID == kVarChar) ?
-        TypeFactory::GetType(typeID, static_cast<std::size_t>(10)) :
-        TypeFactory::GetType(typeID);
+    const Type &type =
+        (typeID == kChar || typeID == kVarChar)
+            ? TypeFactory::GetType(typeID, static_cast<std::size_t>(10))
+            : TypeFactory::GetType(typeID);
 
-    return AggregateFunctionFactory::Get(AggregationID::kSum).canApplyToTypes(
-        std::vector<const Type*>(1, &type));
+    return AggregateFunctionFactory::Get(AggregationID::kSum)
+        .canApplyToTypes(std::vector<const Type *>(1, &type));
   }
 
   static bool ResultTypeForArgumentTypeTest(TypeID input_type_id,
                                             TypeID output_type_id) {
-    const Type *result_type
-        = AggregateFunctionFactory::Get(AggregationID::kSum).resultTypeForArgumentTypes(
-            std::vector<const Type*>(1, &TypeFactory::GetType(input_type_id)));
+    const Type *result_type =
+        AggregateFunctionFactory::Get(AggregationID::kSum)
+            .resultTypeForArgumentTypes(std::vector<const Type *>(
+                1, &TypeFactory::GetType(input_type_id)));
     return (result_type->getTypeID() == output_type_id);
   }
 
   template <typename CppType>
-  static void CheckSumValue(
-      CppType expected,
-      const AggregationHandle &target,
-      const AggregationState &state) {
+  static void CheckSumValue(CppType expected,
+                            const AggregationHandle &target,
+                            const AggregationState &state) {
     EXPECT_EQ(expected, target.finalize(state).getLiteral<CppType>());
   }
 
+  template <typename CppType>
+  static void CheckSumValue(CppType expected, const TypedValue &value) {
+    EXPECT_EQ(expected, value.getLiteral<CppType>());
+  }
+
   // Static templated method to set a meaningful to data types.
   template <typename CppType>
   static void SetDataType(int value, CppType *data) {
@@ -106,7 +113,9 @@ class AggregationHandleSumTest : public::testing::Test {
     const GenericType &type = GenericType::Instance(true);
 
     initializeHandle(type);
-    EXPECT_TRUE(aggregation_handle_sum_->finalize(*aggregation_handle_sum_state_).isNull());
+    EXPECT_TRUE(
+        aggregation_handle_sum_->finalize(*aggregation_handle_sum_state_)
+            .isNull());
 
     typename GenericType::cpptype val;
     typename PrecisionType::cpptype sum;
@@ -117,13 +126,14 @@ class AggregationHandleSumTest : public::testing::Test {
       if (type.getTypeID() == kInt || type.getTypeID() == kLong) {
         SetDataType(i - 10, &val);
       } else {
-        SetDataType(static_cast<float>(i - 10)/10, &val);
+        SetDataType(static_cast<float>(i - 10) / 10, &val);
       }
       iterateHandle(aggregation_handle_sum_state_.get(), type.makeValue(&val));
       sum += val;
     }
     iterateHandle(aggregation_handle_sum_state_.get(), type.makeNullValue());
-    CheckSumValue<typename PrecisionType::cpptype>(sum, *aggregation_handle_sum_, *aggregation_handle_sum_state_);
+    CheckSumValue<typename PrecisionType::cpptype>(
+        sum, *aggregation_handle_sum_, *aggregation_handle_sum_state_);
 
     // Test mergeStates().
     std::unique_ptr<AggregationState> merge_state(
@@ -136,7 +146,7 @@ class AggregationHandleSumTest : public::testing::Test {
       if (type.getTypeID() == kInt || type.getTypeID() == kLong) {
         SetDataType(i - 10, &val);
       } else {
-        SetDataType(static_cast<float>(i - 10)/10, &val);
+        SetDataType(static_cast<float>(i - 10) / 10, &val);
       }
       iterateHandle(merge_state.get(), type.makeValue(&val));
       sum += val;
@@ -144,13 +154,11 @@ class AggregationHandleSumTest : public::testing::Test {
     aggregation_handle_sum_->mergeStates(*merge_state,
                                          aggregation_handle_sum_state_.get());
     CheckSumValue<typename PrecisionType::cpptype>(
-        sum,
-        *aggregation_handle_sum_,
-        *aggregation_handle_sum_state_);
+        sum, *aggregation_handle_sum_, *aggregation_handle_sum_state_);
   }
 
   template <typename GenericType, typename Output>
-  ColumnVector *createColumnVectorGeneric(const Type &type, Output *sum) {
+  ColumnVector* createColumnVectorGeneric(const Type &type, Output *sum) {
     NativeColumnVector *column = new NativeColumnVector(type, kNumSamples + 3);
 
     typename GenericType::cpptype val;
@@ -161,12 +169,12 @@ class AggregationHandleSumTest : public::testing::Test {
       if (type.getTypeID() == kInt || type.getTypeID() == kLong) {
         SetDataType(i - 10, &val);
       } else {
-        SetDataType(static_cast<float>(i - 10)/10, &val);
+        SetDataType(static_cast<float>(i - 10) / 10, &val);
       }
       column->appendTypedValue(type.makeValue(&val));
       *sum += val;
       // One NULL in the middle.
-      if (i == kNumSamples/2) {
+      if (i == kNumSamples / 2) {
         column->appendTypedValue(type.makeNullValue());
       }
     }
@@ -180,12 +188,15 @@ class AggregationHandleSumTest : public::testing::Test {
     const GenericType &type = GenericType::Instance(true);
 
     initializeHandle(type);
-    EXPECT_TRUE(aggregation_handle_sum_->finalize(*aggregation_handle_sum_state_).isNull());
+    EXPECT_TRUE(
+        aggregation_handle_sum_->finalize(*aggregation_handle_sum_state_)
+            .isNull());
 
     typename PrecisionType::cpptype sum;
     std::vector<std::unique_ptr<ColumnVector>> column_vectors;
     column_vectors.emplace_back(
-        createColumnVectorGeneric<GenericType, typename PrecisionType::cpptype>(type, &sum));
+        createColumnVectorGeneric<GenericType, typename PrecisionType::cpptype>(
+            type, &sum));
 
     std::unique_ptr<AggregationState> cv_state(
         aggregation_handle_sum_->accumulateColumnVectors(column_vectors));
@@ -193,15 +204,12 @@ class AggregationHandleSumTest : public::testing::Test {
     // Test the state generated directly by accumulateColumnVectors(), and also
     // test after merging back.
     CheckSumValue<typename PrecisionType::cpptype>(
-        sum,
-        *aggregation_handle_sum_,
-        *cv_state);
+        sum, *aggregation_handle_sum_, *cv_state);
 
-    aggregation_handle_sum_->mergeStates(*cv_state, aggregation_handle_sum_state_.get());
+    aggregation_handle_sum_->mergeStates(*cv_state,
+                                         aggregation_handle_sum_state_.get());
     CheckSumValue<typename PrecisionType::cpptype>(
-        sum,
-        *aggregation_handle_sum_,
-        *aggregation_handle_sum_state_);
+        sum, *aggregation_handle_sum_, *aggregation_handle_sum_state_);
   }
 
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
@@ -210,29 +218,30 @@ class AggregationHandleSumTest : public::testing::Test {
     const GenericType &type = GenericType::Instance(true);
 
     initializeHandle(type);
-    EXPECT_TRUE(aggregation_handle_sum_->finalize(*aggregation_handle_sum_state_).isNull());
+    EXPECT_TRUE(
+        aggregation_handle_sum_->finalize(*aggregation_handle_sum_state_)
+            .isNull());
 
     typename PrecisionType::cpptype sum;
-    std::unique_ptr<ColumnVectorsValueAccessor> accessor(new ColumnVectorsValueAccessor());
+    std::unique_ptr<ColumnVectorsValueAccessor> accessor(
+        new ColumnVectorsValueAccessor());
     accessor->addColumn(
-        createColumnVectorGeneric<GenericType, typename PrecisionType::cpptype>(type, &sum));
+        createColumnVectorGeneric<GenericType, typename PrecisionType::cpptype>(
+            type, &sum));
 
     std::unique_ptr<AggregationState> va_state(
-        aggregation_handle_sum_->accumulateValueAccessor(accessor.get(),
-                                                         std::vector<attribute_id>(1, 0)));
+        aggregation_handle_sum_->accumulateValueAccessor(
+            accessor.get(), std::vector<attribute_id>(1, 0)));
 
     // Test the state generated directly by accumulateValueAccessor(), and also
     // test after merging back.
     CheckSumValue<typename PrecisionType::cpptype>(
-        sum,
-        *aggregation_handle_sum_,
-        *va_state);
+        sum, *aggregation_handle_sum_, *va_state);
 
-    aggregation_handle_sum_->mergeStates(*va_state, aggregation_handle_sum_state_.get());
+    aggregation_handle_sum_->mergeStates(*va_state,
+                                         aggregation_handle_sum_state_.get());
     CheckSumValue<typename PrecisionType::cpptype>(
-        sum,
-        *aggregation_handle_sum_,
-        *aggregation_handle_sum_state_);
+        sum, *aggregation_handle_sum_, *aggregation_handle_sum_state_);
   }
 #endif  // QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
 
@@ -245,9 +254,7 @@ const int AggregationHandleSumTest::kNumSamples;
 
 template <>
 void AggregationHandleSumTest::CheckSumValue<float>(
-    float val,
-    const AggregationHandle &handle,
-    const AggregationState &state) {
+    float val, const AggregationHandle &handle, const AggregationState &state) {
   EXPECT_FLOAT_EQ(val, handle.finalize(state).getLiteral<float>());
 }
 
@@ -260,12 +267,14 @@ void AggregationHandleSumTest::CheckSumValue<double>(
 }
 
 template <>
-void AggregationHandleSumTest::SetDataType<DatetimeIntervalLit>(int value, DatetimeIntervalLit *data) {
+void AggregationHandleSumTest::SetDataType<DatetimeIntervalLit>(
+    int value, DatetimeIntervalLit *data) {
   data->interval_ticks = value;
 }
 
 template <>
-void AggregationHandleSumTest::SetDataType<YearMonthIntervalLit>(int value, YearMonthIntervalLit *data) {
+void AggregationHandleSumTest::SetDataType<YearMonthIntervalLit>(
+    int value, YearMonthIntervalLit *data) {
   data->months = value;
 }
 
@@ -312,11 +321,13 @@ TEST_F(AggregationHandleSumTest, DoubleTypeColumnVectorTest) {
 }
 
 TEST_F(AggregationHandleSumTest, DatetimeIntervalTypeColumnVectorTest) {
-  checkAggregationSumGenericColumnVector<DatetimeIntervalType, DatetimeIntervalType>();
+  checkAggregationSumGenericColumnVector<DatetimeIntervalType,
+                                         DatetimeIntervalType>();
 }
 
 TEST_F(AggregationHandleSumTest, YearMonthIntervalTypeColumnVectorTest) {
-  checkAggregationSumGenericColumnVector<YearMonthIntervalType, YearMonthIntervalType>();
+  checkAggregationSumGenericColumnVector<YearMonthIntervalType,
+                                         YearMonthIntervalType>();
 }
 
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
@@ -337,11 +348,13 @@ TEST_F(AggregationHandleSumTest, DoubleTypeValueAccessorTest) {
 }
 
 TEST_F(AggregationHandleSumTest, DatetimeIntervalTypeValueAccessorTest) {
-  checkAggregationSumGenericValueAccessor<DatetimeIntervalType, DatetimeIntervalType>();
+  checkAggregationSumGenericValueAccessor<DatetimeIntervalType,
+                                          DatetimeIntervalType>();
 }
 
 TEST_F(AggregationHandleSumTest, YearMonthIntervalTypeValueAccessorTest) {
-  checkAggregationSumGenericValueAccessor<YearMonthIntervalType, YearMonthIntervalType>();
+  checkAggregationSumGenericValueAccessor<YearMonthIntervalType,
+                                          YearMonthIntervalType>();
 }
 #endif  // QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
 
@@ -371,38 +384,53 @@ TEST_F(AggregationHandleSumDeathTest, WrongTypeTest) {
   float float_val = 0;
 
   // Passes.
-  iterateHandle(aggregation_handle_sum_state_.get(), int_non_null_type.makeValue(&int_val));
+  iterateHandle(aggregation_handle_sum_state_.get(),
+                int_non_null_type.makeValue(&int_val));
 
-  EXPECT_DEATH(iterateHandle(aggregation_handle_sum_state_.get(), long_type.makeValue(&long_val)), "");
-  EXPECT_DEATH(iterateHandle(aggregation_handle_sum_state_.get(), double_type.makeValue(&double_val)), "");
-  EXPECT_DEATH(iterateHandle(aggregation_handle_sum_state_.get(), float_type.makeValue(&float_val)), "");
-  EXPECT_DEATH(iterateHandle(aggregation_handle_sum_state_.get(), char_type.makeValue("asdf", 5)), "");
-  EXPECT_DEATH(iterateHandle(aggregation_handle_sum_state_.get(), varchar_type.makeValue("asdf", 5)), "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_sum_state_.get(),
+                             long_type.makeValue(&long_val)),
+               "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_sum_state_.get(),
+                             double_type.makeValue(&double_val)),
+               "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_sum_state_.get(),
+                             float_type.makeValue(&float_val)),
+               "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_sum_state_.get(),
+                             char_type.makeValue("asdf", 5)),
+               "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_sum_state_.get(),
+                             varchar_type.makeValue("asdf", 5)),
+               "");
 
   // Test mergeStates() with incorrectly typed handles.
   std::unique_ptr<AggregationHandle> aggregation_handle_sum_double(
-      AggregateFunctionFactory::Get(AggregationID::kSum).createHandle(
-          std::vector<const Type*>(1, &double_type)));
+      AggregateFunctionFactory::Get(AggregationID::kSum)
+          .createHandle(std::vector<const Type *>(1, &double_type)));
   std::unique_ptr<AggregationState> aggregation_state_sum_merge_double(
       aggregation_handle_sum_double->createInitialState());
-  static_cast<const AggregationHandleSum&>(*aggregation_handle_sum_double).iterateUnaryInl(
-      static_cast<AggregationStateSum*>(aggregation_state_sum_merge_double.get()),
-      double_type.makeValue(&double_val));
-  EXPECT_DEATH(aggregation_handle_sum_->mergeStates(*aggregation_state_sum_merge_double,
-                                                    aggregation_handle_sum_state_.get()),
-               "");
+  static_cast<const AggregationHandleSum &>(*aggregation_handle_sum_double)
+      .iterateUnaryInl(static_cast<AggregationStateSum *>(
+                           aggregation_state_sum_merge_double.get()),
+                       double_type.makeValue(&double_val));
+  EXPECT_DEATH(
+      aggregation_handle_sum_->mergeStates(*aggregation_state_sum_merge_double,
+                                           aggregation_handle_sum_state_.get()),
+      "");
 
   std::unique_ptr<AggregationHandle> aggregation_handle_sum_float(
-      AggregateFunctionFactory::Get(AggregationID::kSum).createHandle(
-          std::vector<const Type*>(1, &float_type)));
+      AggregateFunctionFactory::Get(AggregationID::kSum)
+          .createHandle(std::vector<const Type *>(1, &float_type)));
   std::unique_ptr<AggregationState> aggregation_state_sum_merge_float(
       aggregation_handle_sum_float->createInitialState());
-  static_cast<const AggregationHandleSum&>(*aggregation_handle_sum_float).iterateUnaryInl(
-      static_cast<AggregationStateSum*>(aggregation_state_sum_merge_float.get()),
-      float_type.makeValue(&float_val));
-  EXPECT_DEATH(aggregation_handle_sum_->mergeStates(*aggregation_state_sum_merge_float,
-                                                    aggregation_handle_sum_state_.get()),
-               "");
+  static_cast<const AggregationHandleSum &>(*aggregation_handle_sum_float)
+      .iterateUnaryInl(static_cast<AggregationStateSum *>(
+                           aggregation_state_sum_merge_float.get()),
+                       float_type.makeValue(&float_val));
+  EXPECT_DEATH(
+      aggregation_handle_sum_->mergeStates(*aggregation_state_sum_merge_float,
+                                           aggregation_handle_sum_state_.get()),
+      "");
 }
 #endif
 
@@ -423,8 +451,10 @@ TEST_F(AggregationHandleSumTest, ResultTypeForArgumentTypeTest) {
   EXPECT_TRUE(ResultTypeForArgumentTypeTest(kLong, kLong));
   EXPECT_TRUE(ResultTypeForArgumentTypeTest(kFloat, kDouble));
   EXPECT_TRUE(ResultTypeForArgumentTypeTest(kDouble, kDouble));
-  EXPECT_TRUE(ResultTypeForArgumentTypeTest(kDatetimeInterval, kDatetimeInterval));
-  EXPECT_TRUE(ResultTypeForArgumentTypeTest(kYearMonthInterval, kYearMonthInterval));
+  EXPECT_TRUE(
+      ResultTypeForArgumentTypeTest(kDatetimeInterval, kDatetimeInterval));
+  EXPECT_TRUE(
+      ResultTypeForArgumentTypeTest(kYearMonthInterval, kYearMonthInterval));
 }
 
 TEST_F(AggregationHandleSumTest, GroupByTableMergeTest) {
@@ -432,25 +462,28 @@ TEST_F(AggregationHandleSumTest, GroupByTableMergeTest) {
   initializeHandle(long_non_null_type);
   storage_manager_.reset(new StorageManager("./test_sum_data"));
   std::unique_ptr<AggregationStateHashTableBase> source_hash_table(
-      aggregation_handle_sum_->createGroupByHashTable(
-          HashTableImplType::kSimpleScalarSeparateChaining,
+      AggregationStateFastHashTableFactory::CreateResizable(
+          HashTableImplType::kSeparateChaining,
           std::vector<const Type *>(1, &long_non_null_type),
           10,
+          {aggregation_handle_sum_.get()->getPayloadSize()},
+          {aggregation_handle_sum_.get()},
           storage_manager_.get()));
   std::unique_ptr<AggregationStateHashTableBase> destination_hash_table(
-      aggregation_handle_sum_->createGroupByHashTable(
-          HashTableImplType::kSimpleScalarSeparateChaining,
+      AggregationStateFastHashTableFactory::CreateResizable(
+          HashTableImplType::kSeparateChaining,
           std::vector<const Type *>(1, &long_non_null_type),
           10,
+          {aggregation_handle_sum_.get()->getPayloadSize()},
+          {aggregation_handle_sum_.get()},
           storage_manager_.get()));
 
-  AggregationStateHashTable<AggregationStateSum> *destination_hash_table_derived =
-      static_cast<AggregationStateHashTable<AggregationStateSum> *>(
+  AggregationStateFastHashTable *destination_hash_table_derived =
+      static_cast<AggregationStateFastHashTable *>(
           destination_hash_table.get());
 
-  AggregationStateHashTable<AggregationStateSum> *source_hash_table_derived =
-      static_cast<AggregationStateHashTable<AggregationStateSum> *>(
-          source_hash_table.get());
+  AggregationStateFastHashTable *source_hash_table_derived =
+      static_cast<AggregationStateFastHashTable *>(source_hash_table.get());
 
   AggregationHandleSum *aggregation_handle_sum_derived =
       static_cast<AggregationHandleSum *>(aggregation_handle_sum_.get());
@@ -469,7 +502,8 @@ TEST_F(AggregationHandleSumTest, GroupByTableMergeTest) {
   const std::int64_t common_key_destination_sum = 4000;
   TypedValue common_key_destination_sum_val(common_key_destination_sum);
 
-  const std::int64_t merged_common_key = common_key_source_sum + common_key_destination_sum;
+  const std::int64_t merged_common_key =
+      common_key_source_sum + common_key_destination_sum;
   TypedValue common_key_merged_val(merged_common_key);
 
   const std::int64_t exclusive_key_source_sum = 100;
@@ -494,59 +528,82 @@ TEST_F(AggregationHandleSumTest, GroupByTableMergeTest) {
   // Create sum value states for keys.
   aggregation_handle_sum_derived->iterateUnaryInl(common_key_source_state.get(),
                                                   common_key_source_sum_val);
-  std::int64_t actual_val = aggregation_handle_sum_->finalize(*common_key_source_state)
-                       .getLiteral<std::int64_t>();
+  std::int64_t actual_val =
+      aggregation_handle_sum_->finalize(*common_key_source_state)
+          .getLiteral<std::int64_t>();
   EXPECT_EQ(common_key_source_sum_val.getLiteral<std::int64_t>(), actual_val);
 
   aggregation_handle_sum_derived->iterateUnaryInl(
       common_key_destination_state.get(), common_key_destination_sum_val);
   actual_val = aggregation_handle_sum_->finalize(*common_key_destination_state)
                    .getLiteral<std::int64_t>();
-  EXPECT_EQ(common_key_destination_sum_val.getLiteral<std::int64_t>(), actual_val);
+  EXPECT_EQ(common_key_destination_sum_val.getLiteral<std::int64_t>(),
+            actual_val);
 
   aggregation_handle_sum_derived->iterateUnaryInl(
       exclusive_key_destination_state.get(), exclusive_key_destination_sum_val);
   actual_val =
       aggregation_handle_sum_->finalize(*exclusive_key_destination_state)
           .getLiteral<std::int64_t>();
-  EXPECT_EQ(exclusive_key_destination_sum_val.getLiteral<std::int64_t>(), actual_val);
+  EXPECT_EQ(exclusive_key_destination_sum_val.getLiteral<std::int64_t>(),
+            actual_val);
 
   aggregation_handle_sum_derived->iterateUnaryInl(
       exclusive_key_source_state.get(), exclusive_key_source_sum_val);
   actual_val = aggregation_handle_sum_->finalize(*exclusive_key_source_state)
                    .getLiteral<std::int64_t>();
-  EXPECT_EQ(exclusive_key_source_sum_val.getLiteral<std::int64_t>(), actual_val);
+  EXPECT_EQ(exclusive_key_source_sum_val.getLiteral<std::int64_t>(),
+            actual_val);
 
   // Add the key-state pairs to the hash tables.
-  source_hash_table_derived->putCompositeKey(common_key,
-                                             *common_key_source_state);
-  destination_hash_table_derived->putCompositeKey(
-      common_key, *common_key_destination_state);
-  source_hash_table_derived->putCompositeKey(exclusive_source_key,
-                                             *exclusive_key_source_state);
-  destination_hash_table_derived->putCompositeKey(
-      exclusive_destination_key, *exclusive_key_destination_state);
+  unsigned char buffer[100];
+  buffer[0] = '\0';
+  memcpy(buffer + 1,
+         common_key_source_state.get()->getPayloadAddress(),
+         aggregation_handle_sum_.get()->getPayloadSize());
+  source_hash_table_derived->putCompositeKeyFast(common_key, buffer);
+
+  memcpy(buffer + 1,
+         common_key_destination_state.get()->getPayloadAddress(),
+         aggregation_handle_sum_.get()->getPayloadSize());
+  destination_hash_table_derived->putCompositeKeyFast(common_key, buffer);
+
+  memcpy(buffer + 1,
+         exclusive_key_source_state.get()->getPayloadAddress(),
+         aggregation_handle_sum_.get()->getPayloadSize());
+  source_hash_table_derived->putCompositeKeyFast(exclusive_source_key, buffer);
+
+  memcpy(buffer + 1,
+         exclusive_key_destination_state.get()->getPayloadAddress(),
+         aggregation_handle_sum_.get()->getPayloadSize());
+  destination_hash_table_derived->putCompositeKeyFast(exclusive_destination_key,
+                                                      buffer);
 
   EXPECT_EQ(2u, destination_hash_table_derived->numEntries());
   EXPECT_EQ(2u, source_hash_table_derived->numEntries());
 
-  aggregation_handle_sum_->mergeGroupByHashTables(*source_hash_table,
-                                                  destination_hash_table.get());
+  AggregationOperationState::mergeGroupByHashTables(
+      source_hash_table.get(), destination_hash_table.get());
 
   EXPECT_EQ(3u, destination_hash_table_derived->numEntries());
 
   CheckSumValue<std::int64_t>(
       common_key_merged_val.getLiteral<std::int64_t>(),
-      *aggregation_handle_sum_derived,
-      *(destination_hash_table_derived->getSingleCompositeKey(common_key)));
-  CheckSumValue<std::int64_t>(exclusive_key_destination_sum_val.getLiteral<std::int64_t>(),
-                     *aggregation_handle_sum_derived,
-                     *(destination_hash_table_derived->getSingleCompositeKey(
-                         exclusive_destination_key)));
-  CheckSumValue<std::int64_t>(exclusive_key_source_sum_val.getLiteral<std::int64_t>(),
-                     *aggregation_handle_sum_derived,
-                     *(source_hash_table_derived->getSingleCompositeKey(
-                         exclusive_source_key)));
+      aggregation_handle_sum_derived->finalizeHashTableEntryFast(
+          destination_hash_table_derived->getSingleCompositeKey(common_key) +
+          1));
+  CheckSumValue<std::int64_t>(
+      exclusive_key_destination_sum_val.getLiteral<std::int64_t>(),
+      aggregation_handle_sum_derived->finalizeHashTableEntryFast(
+          destination_hash_table_derived->getSingleCompositeKey(
+              exclusive_destination_key) +
+          1));
+  CheckSumValue<std::int64_t>(
+      exclusive_key_source_sum_val.getLiteral<std::int64_t>(),
+      aggregation_handle_sum_derived->finalizeHashTableEntryFast(
+          source_hash_table_derived->getSingleCompositeKey(
+              exclusive_source_key) +
+          1));
 }
 
 }  // namespace quickstep

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/storage/AggregationOperationState.cpp
----------------------------------------------------------------------
diff --git a/storage/AggregationOperationState.cpp b/storage/AggregationOperationState.cpp
index 8019473..e637105 100644
--- a/storage/AggregationOperationState.cpp
+++ b/storage/AggregationOperationState.cpp
@@ -59,7 +59,7 @@ namespace quickstep {
 
 AggregationOperationState::AggregationOperationState(
     const CatalogRelationSchema &input_relation,
-    const std::vector<const AggregateFunction*> &aggregate_functions,
+    const std::vector<const AggregateFunction *> &aggregate_functions,
     std::vector<std::vector<std::unique_ptr<const Scalar>>> &&arguments,
     std::vector<bool> &&is_distinct,
     std::vector<std::unique_ptr<const Scalar>> &&group_by,
@@ -78,7 +78,7 @@ AggregationOperationState::AggregationOperationState(
   DCHECK(aggregate_functions.size() == arguments_.size());
 
   // Get the types of GROUP BY expressions for creating HashTables below.
-  std::vector<const Type*> group_by_types;
+  std::vector<const Type *> group_by_types;
   for (const std::unique_ptr<const Scalar> &group_by_element : group_by_list_) {
     group_by_types.emplace_back(&group_by_element->getType());
   }
@@ -94,27 +94,29 @@ AggregationOperationState::AggregationOperationState(
     handles_.emplace_back(new AggregationHandleDistinct());
     arguments_.push_back({});
     is_distinct_.emplace_back(false);
-    group_by_hashtable_pools_.emplace_back(std::unique_ptr<HashTablePool>(
-        new HashTablePool(estimated_num_entries,
-                          hash_table_impl_type,
-                          group_by_types,
-                          {1},
-                          handles_,
-                          storage_manager)));
+    group_by_hashtable_pools_.emplace_back(
+        std::unique_ptr<HashTablePool>(new HashTablePool(estimated_num_entries,
+                                                         hash_table_impl_type,
+                                                         group_by_types,
+                                                         {1},
+                                                         handles_,
+                                                         storage_manager)));
   } else {
     // Set up each individual aggregate in this operation.
-    std::vector<const AggregateFunction*>::const_iterator agg_func_it
-        = aggregate_functions.begin();
-    std::vector<std::vector<std::unique_ptr<const Scalar>>>::const_iterator args_it
-        = arguments_.begin();
+    std::vector<const AggregateFunction *>::const_iterator agg_func_it =
+        aggregate_functions.begin();
+    std::vector<std::vector<std::unique_ptr<const Scalar>>>::const_iterator
+        args_it = arguments_.begin();
     std::vector<bool>::const_iterator is_distinct_it = is_distinct_.begin();
-    std::vector<HashTableImplType>::const_iterator distinctify_hash_table_impl_types_it
-        = distinctify_hash_table_impl_types.begin();
+    std::vector<HashTableImplType>::const_iterator
+        distinctify_hash_table_impl_types_it =
+            distinctify_hash_table_impl_types.begin();
     std::vector<std::size_t> payload_sizes;
-    for (; agg_func_it != aggregate_functions.end(); ++agg_func_it, ++args_it, ++is_distinct_it) {
+    for (; agg_func_it != aggregate_functions.end();
+         ++agg_func_it, ++args_it, ++is_distinct_it) {
       // Get the Types of this aggregate's arguments so that we can create an
       // AggregationHandle.
-      std::vector<const Type*> argument_types;
+      std::vector<const Type *> argument_types;
       for (const std::unique_ptr<const Scalar> &argument : *args_it) {
         argument_types.emplace_back(&argument->getType());
       }
@@ -129,12 +131,13 @@ AggregationOperationState::AggregationOperationState(
       handles_.emplace_back((*agg_func_it)->createHandle(argument_types));
 
       if (!group_by_list_.empty()) {
-        // Aggregation with GROUP BY: combined payload is partially updated in the presence of DISTINCT.
-         if (*is_distinct_it) {
-            handles_.back()->BlockUpdate();
-         }
-         group_by_handles.emplace_back(handles_.back());
-         payload_sizes.emplace_back(group_by_handles.back()->getPayloadSize());
+        // Aggregation with GROUP BY: combined payload is partially updated in
+        // the presence of DISTINCT.
+        if (*is_distinct_it) {
+          handles_.back()->blockUpdate();
+        }
+        group_by_handles.emplace_back(handles_.back());
+        payload_sizes.emplace_back(group_by_handles.back()->getPayloadSize());
       } else {
         // Aggregation without GROUP BY: create a single global state.
         single_states_.emplace_back(handles_.back()->createInitialState());
@@ -146,31 +149,38 @@ AggregationOperationState::AggregationOperationState(
         std::vector<attribute_id> local_arguments_as_attributes;
         local_arguments_as_attributes.reserve(args_it->size());
         for (const std::unique_ptr<const Scalar> &argument : *args_it) {
-          const attribute_id argument_id = argument->getAttributeIdForValueAccessor();
+          const attribute_id argument_id =
+              argument->getAttributeIdForValueAccessor();
           if (argument_id == -1) {
             local_arguments_as_attributes.clear();
             break;
           } else {
-            DCHECK_EQ(input_relation_.getID(), argument->getRelationIdForValueAccessor());
+            DCHECK_EQ(input_relation_.getID(),
+                      argument->getRelationIdForValueAccessor());
             local_arguments_as_attributes.push_back(argument_id);
           }
         }
 
-        arguments_as_attributes_.emplace_back(std::move(local_arguments_as_attributes));
+        arguments_as_attributes_.emplace_back(
+            std::move(local_arguments_as_attributes));
 #endif
       }
 
-      // Initialize the corresponding distinctify hash table if this is a DISTINCT
+      // Initialize the corresponding distinctify hash table if this is a
+      // DISTINCT
       // aggregation.
       if (*is_distinct_it) {
-        std::vector<const Type*> key_types(group_by_types);
-        key_types.insert(key_types.end(), argument_types.begin(), argument_types.end());
-        // TODO(jianqiao): estimated_num_entries is quite inaccurate for estimating
+        std::vector<const Type *> key_types(group_by_types);
+        key_types.insert(
+            key_types.end(), argument_types.begin(), argument_types.end());
+        // TODO(jianqiao): estimated_num_entries is quite inaccurate for
+        // estimating
         // the number of entries in the distinctify hash table. We may estimate
-        // for each distinct aggregation an estimated_num_distinct_keys value during
+        // for each distinct aggregation an estimated_num_distinct_keys value
+        // during
         // query optimization, if it worths.
         distinctify_hashtables_.emplace_back(
-        AggregationStateFastHashTableFactory::CreateResizable(
+            AggregationStateFastHashTableFactory::CreateResizable(
                 *distinctify_hash_table_impl_types_it,
                 key_types,
                 estimated_num_entries,
@@ -184,16 +194,17 @@ AggregationOperationState::AggregationOperationState(
     }
 
     if (!group_by_handles.empty()) {
-      // Aggregation with GROUP BY: create a HashTable pool for per-group states.
+      // Aggregation with GROUP BY: create a HashTable pool for per-group
+      // states.
       group_by_hashtable_pools_.emplace_back(std::unique_ptr<HashTablePool>(
-            new HashTablePool(estimated_num_entries,
-                              hash_table_impl_type,
-                              group_by_types,
-                              payload_sizes,
-                              group_by_handles,
-                              storage_manager)));
-      }
+          new HashTablePool(estimated_num_entries,
+                            hash_table_impl_type,
+                            group_by_types,
+                            payload_sizes,
+                            group_by_handles,
+                            storage_manager)));
     }
+  }
 }
 
 AggregationOperationState* AggregationOperationState::ReconstructFromProto(
@@ -203,7 +214,7 @@ AggregationOperationState* AggregationOperationState::ReconstructFromProto(
   DCHECK(ProtoIsValid(proto, database));
 
   // Rebuild contructor arguments from their representation in 'proto'.
-  std::vector<const AggregateFunction*> aggregate_functions;
+  std::vector<const AggregateFunction *> aggregate_functions;
   std::vector<std::vector<std::unique_ptr<const Scalar>>> arguments;
   std::vector<bool> is_distinct;
   std::vector<HashTableImplType> distinctify_hash_table_impl_types;
@@ -216,62 +227,63 @@ AggregationOperationState* AggregationOperationState::ReconstructFromProto(
 
     arguments.emplace_back();
     arguments.back().reserve(agg_proto.argument_size());
-    for (int argument_idx = 0; argument_idx < agg_proto.argument_size(); ++argument_idx) {
+    for (int argument_idx = 0; argument_idx < agg_proto.argument_size();
+         ++argument_idx) {
       arguments.back().emplace_back(ScalarFactory::ReconstructFromProto(
-          agg_proto.argument(argument_idx),
-          database));
+          agg_proto.argument(argument_idx), database));
     }
 
     is_distinct.emplace_back(agg_proto.is_distinct());
 
     if (agg_proto.is_distinct()) {
       distinctify_hash_table_impl_types.emplace_back(
-          HashTableImplTypeFromProto(
-              proto.distinctify_hash_table_impl_types(distinctify_hash_table_impl_type_index)));
+          HashTableImplTypeFromProto(proto.distinctify_hash_table_impl_types(
+              distinctify_hash_table_impl_type_index)));
       ++distinctify_hash_table_impl_type_index;
     }
   }
 
   std::vector<std::unique_ptr<const Scalar>> group_by_expressions;
-  for (int group_by_idx = 0;
-       group_by_idx < proto.group_by_expressions_size();
+  for (int group_by_idx = 0; group_by_idx < proto.group_by_expressions_size();
        ++group_by_idx) {
     group_by_expressions.emplace_back(ScalarFactory::ReconstructFromProto(
-        proto.group_by_expressions(group_by_idx),
-        database));
+        proto.group_by_expressions(group_by_idx), database));
   }
 
   unique_ptr<Predicate> predicate;
   if (proto.has_predicate()) {
     predicate.reset(
-        PredicateFactory::ReconstructFromProto(proto.predicate(),
-                                               database));
+        PredicateFactory::ReconstructFromProto(proto.predicate(), database));
   }
 
-  return new AggregationOperationState(database.getRelationSchemaById(proto.relation_id()),
-                                       aggregate_functions,
-                                       std::move(arguments),
-                                       std::move(is_distinct),
-                                       std::move(group_by_expressions),
-                                       predicate.release(),
-                                       proto.estimated_num_entries(),
-                                       HashTableImplTypeFromProto(proto.hash_table_impl_type()),
-                                       distinctify_hash_table_impl_types,
-                                       storage_manager);
+  return new AggregationOperationState(
+      database.getRelationSchemaById(proto.relation_id()),
+      aggregate_functions,
+      std::move(arguments),
+      std::move(is_distinct),
+      std::move(group_by_expressions),
+      predicate.release(),
+      proto.estimated_num_entries(),
+      HashTableImplTypeFromProto(proto.hash_table_impl_type()),
+      distinctify_hash_table_impl_types,
+      storage_manager);
 }
 
-bool AggregationOperationState::ProtoIsValid(const serialization::AggregationOperationState &proto,
-                                             const CatalogDatabaseLite &database) {
+bool AggregationOperationState::ProtoIsValid(
+    const serialization::AggregationOperationState &proto,
+    const CatalogDatabaseLite &database) {
   if (!proto.IsInitialized() ||
       !database.hasRelationWithId(proto.relation_id()) ||
       (proto.aggregates_size() < 0)) {
     return false;
   }
 
-  std::size_t num_distinctify_hash_tables = proto.distinctify_hash_table_impl_types_size();
+  std::size_t num_distinctify_hash_tables =
+      proto.distinctify_hash_table_impl_types_size();
   std::size_t distinctify_hash_table_impl_type_index = 0;
   for (int i = 0; i < proto.aggregates_size(); ++i) {
-    if (!AggregateFunctionFactory::ProtoIsValid(proto.aggregates(i).function())) {
+    if (!AggregateFunctionFactory::ProtoIsValid(
+            proto.aggregates(i).function())) {
       return false;
     }
 
@@ -282,16 +294,18 @@ bool AggregationOperationState::ProtoIsValid(const serialization::AggregationOpe
     for (int argument_idx = 0;
          argument_idx < proto.aggregates(i).argument_size();
          ++argument_idx) {
-      if (!ScalarFactory::ProtoIsValid(proto.aggregates(i).argument(argument_idx),
-                                       database)) {
+      if (!ScalarFactory::ProtoIsValid(
+              proto.aggregates(i).argument(argument_idx), database)) {
         return false;
       }
     }
 
     if (proto.aggregates(i).is_distinct()) {
-      if (distinctify_hash_table_impl_type_index >= num_distinctify_hash_tables ||
+      if (distinctify_hash_table_impl_type_index >=
+              num_distinctify_hash_tables ||
           !serialization::HashTableImplType_IsValid(
-              proto.distinctify_hash_table_impl_types(distinctify_hash_table_impl_type_index))) {
+              proto.distinctify_hash_table_impl_types(
+                  distinctify_hash_table_impl_type_index))) {
         return false;
       }
     }
@@ -304,8 +318,9 @@ bool AggregationOperationState::ProtoIsValid(const serialization::AggregationOpe
   }
 
   if (proto.group_by_expressions_size() > 0) {
-    if (!proto.has_hash_table_impl_type()
-        || !serialization::HashTableImplType_IsValid(proto.hash_table_impl_type())) {
+    if (!proto.has_hash_table_impl_type() ||
+        !serialization::HashTableImplType_IsValid(
+            proto.hash_table_impl_type())) {
       return false;
     }
   }
@@ -327,7 +342,8 @@ void AggregationOperationState::aggregateBlock(const block_id input_block) {
   }
 }
 
-void AggregationOperationState::finalizeAggregate(InsertDestination *output_destination) {
+void AggregationOperationState::finalizeAggregate(
+    InsertDestination *output_destination) {
   if (group_by_list_.empty()) {
     finalizeSingleState(output_destination);
   } else {
@@ -346,19 +362,19 @@ void AggregationOperationState::mergeSingleState(
   }
 }
 
-void AggregationOperationState::aggregateBlockSingleState(const block_id input_block) {
+void AggregationOperationState::aggregateBlockSingleState(
+    const block_id input_block) {
   // Aggregate per-block state for each aggregate.
   std::vector<std::unique_ptr<AggregationState>> local_state;
 
-  BlockReference block(storage_manager_->getBlock(input_block, input_relation_));
+  BlockReference block(
+      storage_manager_->getBlock(input_block, input_relation_));
 
   // If there is a filter predicate, 'reuse_matches' holds the set of matching
   // tuples so that it can be reused across multiple aggregates (i.e. we only
   // pay the cost of evaluating the predicate once).
   std::unique_ptr<TupleIdSequence> reuse_matches;
-  for (std::size_t agg_idx = 0;
-       agg_idx < handles_.size();
-       ++agg_idx) {
+  for (std::size_t agg_idx = 0; agg_idx < handles_.size(); ++agg_idx) {
     const std::vector<attribute_id> *local_arguments_as_attributes = nullptr;
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
     // If all arguments are attributes of the input relation, elide a copy.
@@ -381,12 +397,11 @@ void AggregationOperationState::aggregateBlockSingleState(const block_id input_b
       local_state.emplace_back(nullptr);
     } else {
       // Call StorageBlock::aggregate() to actually do the aggregation.
-      local_state.emplace_back(
-          block->aggregate(*handles_[agg_idx],
-                           arguments_[agg_idx],
-                           local_arguments_as_attributes,
-                           predicate_.get(),
-                           &reuse_matches));
+      local_state.emplace_back(block->aggregate(*handles_[agg_idx],
+                                                arguments_[agg_idx],
+                                                local_arguments_as_attributes,
+                                                predicate_.get(),
+                                                &reuse_matches));
     }
   }
 
@@ -394,8 +409,10 @@ void AggregationOperationState::aggregateBlockSingleState(const block_id input_b
   mergeSingleState(local_state);
 }
 
-void AggregationOperationState::aggregateBlockHashTable(const block_id input_block) {
-  BlockReference block(storage_manager_->getBlock(input_block, input_relation_));
+void AggregationOperationState::aggregateBlockHashTable(
+    const block_id input_block) {
+  BlockReference block(
+      storage_manager_->getBlock(input_block, input_relation_));
 
   // If there is a filter predicate, 'reuse_matches' holds the set of matching
   // tuples so that it can be reused across multiple aggregates (i.e. we only
@@ -407,11 +424,10 @@ void AggregationOperationState::aggregateBlockHashTable(const block_id input_blo
   // GROUP BY expressions once).
   std::vector<std::unique_ptr<ColumnVector>> reuse_group_by_vectors;
 
-  for (std::size_t agg_idx = 0;
-       agg_idx < handles_.size();
-       ++agg_idx) {
+  for (std::size_t agg_idx = 0; agg_idx < handles_.size(); ++agg_idx) {
     if (is_distinct_[agg_idx]) {
-      // Call StorageBlock::aggregateDistinct() to insert the GROUP BY expression
+      // Call StorageBlock::aggregateDistinct() to insert the GROUP BY
+      // expression
       // values and the aggregation arguments together as keys directly into the
       // (threadsafe) shared global distinctify HashTable for this aggregate.
       block->aggregateDistinct(*handles_[agg_idx],
@@ -429,7 +445,8 @@ void AggregationOperationState::aggregateBlockHashTable(const block_id input_blo
   // directly into the (threadsafe) shared global HashTable for this
   // aggregate.
   DCHECK(group_by_hashtable_pools_[0] != nullptr);
-  AggregationStateHashTableBase *agg_hash_table = group_by_hashtable_pools_[0]->getHashTableFast();
+  AggregationStateHashTableBase *agg_hash_table =
+      group_by_hashtable_pools_[0]->getHashTableFast();
   DCHECK(agg_hash_table != nullptr);
   block->aggregateGroupByFast(arguments_,
                               group_by_list_,
@@ -440,32 +457,35 @@ void AggregationOperationState::aggregateBlockHashTable(const block_id input_blo
   group_by_hashtable_pools_[0]->returnHashTable(agg_hash_table);
 }
 
-void AggregationOperationState::finalizeSingleState(InsertDestination *output_destination) {
+void AggregationOperationState::finalizeSingleState(
+    InsertDestination *output_destination) {
   // Simply build up a Tuple from the finalized values for each aggregate and
   // insert it in '*output_destination'.
   std::vector<TypedValue> attribute_values;
 
-  for (std::size_t agg_idx = 0;
-       agg_idx < handles_.size();
-       ++agg_idx) {
+  for (std::size_t agg_idx = 0; agg_idx < handles_.size(); ++agg_idx) {
     if (is_distinct_[agg_idx]) {
       single_states_[agg_idx].reset(
-          handles_[agg_idx]->aggregateOnDistinctifyHashTableForSingle(*distinctify_hashtables_[agg_idx]));
+          handles_[agg_idx]->aggregateOnDistinctifyHashTableForSingle(
+              *distinctify_hashtables_[agg_idx]));
     }
 
-    attribute_values.emplace_back(handles_[agg_idx]->finalize(*single_states_[agg_idx]));
+    attribute_values.emplace_back(
+        handles_[agg_idx]->finalize(*single_states_[agg_idx]));
   }
 
   output_destination->insertTuple(Tuple(std::move(attribute_values)));
 }
 
-void AggregationOperationState::mergeGroupByHashTables(AggregationStateHashTableBase *src,
-                                                       AggregationStateHashTableBase *dst) {
-    HashTableMergerFast merger(dst);
-    (static_cast<FastHashTable<true, false, true, false> *>(src))->forEachCompositeKeyFast(&merger);
+void AggregationOperationState::mergeGroupByHashTables(
+    AggregationStateHashTableBase *src, AggregationStateHashTableBase *dst) {
+  HashTableMergerFast merger(dst);
+  (static_cast<FastHashTable<true, false, true, false> *>(src))
+      ->forEachCompositeKeyFast(&merger);
 }
 
-void AggregationOperationState::finalizeHashTable(InsertDestination *output_destination) {
+void AggregationOperationState::finalizeHashTable(
+    InsertDestination *output_destination) {
   // Each element of 'group_by_keys' is a vector of values for a particular
   // group (which is also the prefix of the finalized Tuple for that group).
   std::vector<std::vector<TypedValue>> group_by_keys;
@@ -483,17 +503,14 @@ void AggregationOperationState::finalizeHashTable(InsertDestination *output_dest
          hash_table_index < static_cast<int>(hash_tables->size() - 1);
          ++hash_table_index) {
       // Merge each hash table to the last hash table.
-      mergeGroupByHashTables(
-          (*hash_tables)[hash_table_index].get(),
-          hash_tables->back().get());
+      mergeGroupByHashTables((*hash_tables)[hash_table_index].get(),
+                             hash_tables->back().get());
     }
   }
 
   // Collect per-aggregate finalized values.
   std::vector<std::unique_ptr<ColumnVector>> final_values;
-  for (std::size_t agg_idx = 0;
-       agg_idx < handles_.size();
-       ++agg_idx) {
+  for (std::size_t agg_idx = 0; agg_idx < handles_.size(); ++agg_idx) {
     if (is_distinct_[agg_idx]) {
       DCHECK(group_by_hashtable_pools_[0] != nullptr);
       auto *hash_tables = group_by_hashtable_pools_[0]->getAllHashTables();
@@ -502,18 +519,17 @@ void AggregationOperationState::finalizeHashTable(InsertDestination *output_dest
         // We may have a case where hash_tables is empty, e.g. no input blocks.
         // However for aggregateOnDistinctifyHashTableForGroupBy to work
         // correctly, we should create an empty group by hash table.
-        AggregationStateHashTableBase *new_hash_table = group_by_hashtable_pools_[0]->getHashTableFast();
+        AggregationStateHashTableBase *new_hash_table =
+            group_by_hashtable_pools_[0]->getHashTableFast();
         group_by_hashtable_pools_[0]->returnHashTable(new_hash_table);
         hash_tables = group_by_hashtable_pools_[0]->getAllHashTables();
       }
       DCHECK(hash_tables->back() != nullptr);
       AggregationStateHashTableBase *agg_hash_table = hash_tables->back().get();
       DCHECK(agg_hash_table != nullptr);
-      handles_[agg_idx]->AllowUpdate();
+      handles_[agg_idx]->allowUpdate();
       handles_[agg_idx]->aggregateOnDistinctifyHashTableForGroupBy(
-          *distinctify_hashtables_[agg_idx],
-          agg_hash_table,
-          agg_idx);
+          *distinctify_hashtables_[agg_idx], agg_hash_table, agg_idx);
     }
 
     auto *hash_tables = group_by_hashtable_pools_[0]->getAllHashTables();
@@ -522,16 +538,15 @@ void AggregationOperationState::finalizeHashTable(InsertDestination *output_dest
       // We may have a case where hash_tables is empty, e.g. no input blocks.
       // However for aggregateOnDistinctifyHashTableForGroupBy to work
       // correctly, we should create an empty group by hash table.
-      AggregationStateHashTableBase *new_hash_table = group_by_hashtable_pools_[0]->getHashTable();
+      AggregationStateHashTableBase *new_hash_table =
+          group_by_hashtable_pools_[0]->getHashTable();
       group_by_hashtable_pools_[0]->returnHashTable(new_hash_table);
       hash_tables = group_by_hashtable_pools_[0]->getAllHashTables();
     }
     AggregationStateHashTableBase *agg_hash_table = hash_tables->back().get();
     DCHECK(agg_hash_table != nullptr);
-    ColumnVector* agg_result_col =
-        handles_[agg_idx]->finalizeHashTable(*agg_hash_table,
-                                             &group_by_keys,
-                                              agg_idx);
+    ColumnVector *agg_result_col = handles_[agg_idx]->finalizeHashTable(
+        *agg_hash_table, &group_by_keys, agg_idx);
     if (agg_result_col != nullptr) {
       final_values.emplace_back(agg_result_col);
     }
@@ -549,16 +564,20 @@ void AggregationOperationState::finalizeHashTable(InsertDestination *output_dest
   for (const std::unique_ptr<const Scalar> &group_by_element : group_by_list_) {
     const Type &group_by_type = group_by_element->getType();
     if (NativeColumnVector::UsableForType(group_by_type)) {
-      NativeColumnVector *element_cv = new NativeColumnVector(group_by_type, group_by_keys.size());
+      NativeColumnVector *element_cv =
+          new NativeColumnVector(group_by_type, group_by_keys.size());
       group_by_cvs.emplace_back(element_cv);
       for (std::vector<TypedValue> &group_key : group_by_keys) {
-        element_cv->appendTypedValue(std::move(group_key[group_by_element_idx]));
+        element_cv->appendTypedValue(
+            std::move(group_key[group_by_element_idx]));
       }
     } else {
-      IndirectColumnVector *element_cv = new IndirectColumnVector(group_by_type, group_by_keys.size());
+      IndirectColumnVector *element_cv =
+          new IndirectColumnVector(group_by_type, group_by_keys.size());
       group_by_cvs.emplace_back(element_cv);
       for (std::vector<TypedValue> &group_key : group_by_keys) {
-        element_cv->appendTypedValue(std::move(group_key[group_by_element_idx]));
+        element_cv->appendTypedValue(
+            std::move(group_key[group_by_element_idx]));
       }
     }
     ++group_by_element_idx;

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/storage/AggregationOperationState.hpp
----------------------------------------------------------------------
diff --git a/storage/AggregationOperationState.hpp b/storage/AggregationOperationState.hpp
index 8934cda..ba54098 100644
--- a/storage/AggregationOperationState.hpp
+++ b/storage/AggregationOperationState.hpp
@@ -102,16 +102,17 @@ class AggregationOperationState {
    *        tables. Single aggregation state (when GROUP BY list is not
    *        specified) is not allocated using memory from storage manager.
    */
-  AggregationOperationState(const CatalogRelationSchema &input_relation,
-                            const std::vector<const AggregateFunction*> &aggregate_functions,
-                            std::vector<std::vector<std::unique_ptr<const Scalar>>> &&arguments,
-                            std::vector<bool> &&is_distinct,
-                            std::vector<std::unique_ptr<const Scalar>> &&group_by,
-                            const Predicate *predicate,
-                            const std::size_t estimated_num_entries,
-                            const HashTableImplType hash_table_impl_type,
-                            const std::vector<HashTableImplType> &distinctify_hash_table_impl_types,
-                            StorageManager *storage_manager);
+  AggregationOperationState(
+      const CatalogRelationSchema &input_relation,
+      const std::vector<const AggregateFunction *> &aggregate_functions,
+      std::vector<std::vector<std::unique_ptr<const Scalar>>> &&arguments,
+      std::vector<bool> &&is_distinct,
+      std::vector<std::unique_ptr<const Scalar>> &&group_by,
+      const Predicate *predicate,
+      const std::size_t estimated_num_entries,
+      const HashTableImplType hash_table_impl_type,
+      const std::vector<HashTableImplType> &distinctify_hash_table_impl_types,
+      StorageManager *storage_manager);
 
   ~AggregationOperationState() {}
 
@@ -143,8 +144,9 @@ class AggregationOperationState {
    *        in.
    * @return Whether proto is fully-formed and valid.
    **/
-  static bool ProtoIsValid(const serialization::AggregationOperationState &proto,
-                           const CatalogDatabaseLite &database);
+  static bool ProtoIsValid(
+      const serialization::AggregationOperationState &proto,
+      const CatalogDatabaseLite &database);
 
   /**
    * @brief Compute aggregates on the tuples of the given storage block,
@@ -165,12 +167,16 @@ class AggregationOperationState {
    **/
   void finalizeAggregate(InsertDestination *output_destination);
 
+  static void mergeGroupByHashTables(AggregationStateHashTableBase *src,
+                                     AggregationStateHashTableBase *dst);
+
   int dflag;
 
  private:
   // Merge locally (per storage block) aggregated states with global aggregation
   // states.
-  void mergeSingleState(const std::vector<std::unique_ptr<AggregationState>> &local_state);
+  void mergeSingleState(
+      const std::vector<std::unique_ptr<AggregationState>> &local_state);
 
   // Aggregate on input block.
   void aggregateBlockSingleState(const block_id input_block);
@@ -187,7 +193,7 @@ class AggregationOperationState {
 
   // Each individual aggregate in this operation has an AggregationHandle and
   // some number of Scalar arguments.
-//  std::vector<std::unique_ptr<AggregationHandle>> handles_;
+  //  std::vector<std::unique_ptr<AggregationHandle>> handles_;
   std::vector<AggregationHandle *> handles_;
   std::vector<std::vector<std::unique_ptr<const Scalar>>> arguments_;
 
@@ -196,7 +202,8 @@ class AggregationOperationState {
   std::vector<bool> is_distinct_;
 
   // Hash table for obtaining distinct (i.e. unique) arguments.
-  std::vector<std::unique_ptr<AggregationStateHashTableBase>> distinctify_hashtables_;
+  std::vector<std::unique_ptr<AggregationStateHashTableBase>>
+      distinctify_hashtables_;
 
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
   // If all an aggregate's argument expressions are simply attributes in
@@ -211,15 +218,14 @@ class AggregationOperationState {
   //
   // TODO(shoban): We should ideally store the aggregation state together in one
   // hash table to prevent multiple lookups.
-  std::vector<std::unique_ptr<AggregationStateHashTableBase>> group_by_hashtables_;
+  std::vector<std::unique_ptr<AggregationStateHashTableBase>>
+      group_by_hashtables_;
 
   // A vector of group by hash table pools, one for each group by clause.
   std::vector<std::unique_ptr<HashTablePool>> group_by_hashtable_pools_;
 
   StorageManager *storage_manager_;
 
-  void mergeGroupByHashTables(AggregationStateHashTableBase *src, AggregationStateHashTableBase *dst);
-
   DISALLOW_COPY_AND_ASSIGN(AggregationOperationState);
 };
 


[2/7] incubator-quickstep git commit: Modified Aggregation unit test. Ran clang-format.

Posted by ra...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/storage/FastSeparateChainingHashTable.hpp
----------------------------------------------------------------------
diff --git a/storage/FastSeparateChainingHashTable.hpp b/storage/FastSeparateChainingHashTable.hpp
index 0670993..886a8ca 100644
--- a/storage/FastSeparateChainingHashTable.hpp
+++ b/storage/FastSeparateChainingHashTable.hpp
@@ -27,8 +27,8 @@
 #include <utility>
 #include <vector>
 
-#include "storage/HashTable.hpp"
 #include "storage/FastHashTable.hpp"
+#include "storage/HashTable.hpp"
 #include "storage/HashTableBase.hpp"
 #include "storage/HashTableKeyManager.hpp"
 #include "storage/StorageBlob.hpp"
@@ -55,43 +55,42 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-class FastSeparateChainingHashTable : public FastHashTable<resizable,
-                                                   serializable,
-                                                   force_key_copy,
-                                                   allow_duplicate_keys> {
+class FastSeparateChainingHashTable
+    : public FastHashTable<resizable,
+                           serializable,
+                           force_key_copy,
+                           allow_duplicate_keys> {
  public:
-  FastSeparateChainingHashTable(const std::vector<const Type*> &key_types,
-                            const std::size_t num_entries,
-                            const std::vector<std::size_t> &payload_sizes,
-                            const std::vector<AggregationHandle *> &handles,
-                            StorageManager *storage_manager);
-
-  FastSeparateChainingHashTable(const std::vector<const Type*> &key_types,
-                            void *hash_table_memory,
-                            const std::size_t hash_table_memory_size,
-                            const bool new_hash_table,
-                            const bool hash_table_memory_zeroed);
+  FastSeparateChainingHashTable(const std::vector<const Type *> &key_types,
+                                const std::size_t num_entries,
+                                const std::vector<std::size_t> &payload_sizes,
+                                const std::vector<AggregationHandle *> &handles,
+                                StorageManager *storage_manager);
+
+  FastSeparateChainingHashTable(const std::vector<const Type *> &key_types,
+                                void *hash_table_memory,
+                                const std::size_t hash_table_memory_size,
+                                const bool new_hash_table,
+                                const bool hash_table_memory_zeroed);
 
   // Delegating constructors for single scalar keys.
   FastSeparateChainingHashTable(const Type &key_type,
-                            const std::size_t num_entries,
-                            StorageManager *storage_manager)
-      : FastSeparateChainingHashTable(std::vector<const Type*>(1, &key_type),
-                                  num_entries,
-                                  storage_manager) {
-  }
+                                const std::size_t num_entries,
+                                StorageManager *storage_manager)
+      : FastSeparateChainingHashTable(std::vector<const Type *>(1, &key_type),
+                                      num_entries,
+                                      storage_manager) {}
 
   FastSeparateChainingHashTable(const Type &key_type,
-                            void *hash_table_memory,
-                            const std::size_t hash_table_memory_size,
-                            const bool new_hash_table,
-                            const bool hash_table_memory_zeroed)
-      : FastSeparateChainingHashTable(std::vector<const Type*>(1, &key_type),
-                                  hash_table_memory,
-                                  hash_table_memory_size,
-                                  new_hash_table,
-                                  hash_table_memory_zeroed) {
-  }
+                                void *hash_table_memory,
+                                const std::size_t hash_table_memory_size,
+                                const bool new_hash_table,
+                                const bool hash_table_memory_zeroed)
+      : FastSeparateChainingHashTable(std::vector<const Type *>(1, &key_type),
+                                      hash_table_memory,
+                                      hash_table_memory_size,
+                                      new_hash_table,
+                                      hash_table_memory_zeroed) {}
 
   ~FastSeparateChainingHashTable() override {
     DestroyValues(buckets_,
@@ -106,48 +105,54 @@ class FastSeparateChainingHashTable : public FastHashTable<resizable,
     return header_->buckets_allocated.load(std::memory_order_relaxed);
   }
 
-  const uint8_t* getSingle(const TypedValue &key) const override;
-  const uint8_t* getSingleCompositeKey(const std::vector<TypedValue> &key) const override;
-  const uint8_t* getSingleCompositeKey(const std::vector<TypedValue> &key, int index) const override;
+  const std::uint8_t* getSingle(const TypedValue &key) const override;
+  const std::uint8_t* getSingleCompositeKey(
+      const std::vector<TypedValue> &key) const override;
+  const std::uint8_t* getSingleCompositeKey(const std::vector<TypedValue> &key,
+                                            int index) const override;
 
   void getAll(const TypedValue &key,
-              std::vector<const uint8_t*> *values) const override;
-  void getAllCompositeKey(const std::vector<TypedValue> &key,
-                          std::vector<const uint8_t*> *values) const override;
+              std::vector<const std::uint8_t *> *values) const override;
+  void getAllCompositeKey(
+      const std::vector<TypedValue> &key,
+      std::vector<const std::uint8_t *> *values) const override;
 
  protected:
-  HashTablePutResult putInternal(const TypedValue &key,
-                                 const std::size_t variable_key_size,
-                                 const uint8_t &value,
-                                 HashTablePreallocationState *prealloc_state) override;
-
-  HashTablePutResult putCompositeKeyInternalFast(const std::vector<TypedValue> &key,
-                                             const std::size_t variable_key_size,
-                                             const std::uint8_t *init_value_ptr,
-                                             HashTablePreallocationState *prealloc_state) override;
-
-  uint8_t* upsertInternalFast(const TypedValue &key,
-                         const std::size_t variable_key_size,
-                         const std::uint8_t *init_value_ptr) override;
-
-  uint8_t* upsertCompositeKeyInternalFast(const std::vector<TypedValue> &key,
-                                     const std::uint8_t *init_value_ptr,
-                                     const std::size_t variable_key_size) override;
+  HashTablePutResult putInternal(
+      const TypedValue &key,
+      const std::size_t variable_key_size,
+      const std::uint8_t &value,
+      HashTablePreallocationState *prealloc_state) override;
+
+  HashTablePutResult putCompositeKeyInternalFast(
+      const std::vector<TypedValue> &key,
+      const std::size_t variable_key_size,
+      const std::uint8_t *init_value_ptr,
+      HashTablePreallocationState *prealloc_state) override;
+
+  std::uint8_t* upsertInternalFast(const TypedValue &key,
+                                   const std::size_t variable_key_size,
+                                   const std::uint8_t *init_value_ptr) override;
+
+  std::uint8_t* upsertCompositeKeyInternalFast(
+      const std::vector<TypedValue> &key,
+      const std::uint8_t *init_value_ptr,
+      const std::size_t variable_key_size) override;
 
   bool getNextEntry(TypedValue *key,
-                    const uint8_t **value,
+                    const std::uint8_t **value,
                     std::size_t *entry_num) const override;
   bool getNextEntryCompositeKey(std::vector<TypedValue> *key,
-                                const uint8_t **value,
+                                const std::uint8_t **value,
                                 std::size_t *entry_num) const override;
 
   bool getNextEntryForKey(const TypedValue &key,
                           const std::size_t hash_code,
-                          const uint8_t **value,
+                          const std::uint8_t **value,
                           std::size_t *entry_num) const override;
   bool getNextEntryForCompositeKey(const std::vector<TypedValue> &key,
                                    const std::size_t hash_code,
-                                   const uint8_t **value,
+                                   const std::uint8_t **value,
                                    std::size_t *entry_num) const override;
 
   bool hasKey(const TypedValue &key) const override;
@@ -157,15 +162,16 @@ class FastSeparateChainingHashTable : public FastHashTable<resizable,
               const std::size_t extra_variable_storage,
               const std::size_t retry_num = 0) override;
 
-  bool preallocateForBulkInsert(const std::size_t total_entries,
-                                const std::size_t total_variable_key_size,
-                                HashTablePreallocationState *prealloc_state) override;
+  bool preallocateForBulkInsert(
+      const std::size_t total_entries,
+      const std::size_t total_variable_key_size,
+      HashTablePreallocationState *prealloc_state) override;
+
  private:
   struct Header {
     std::size_t num_slots;
     std::size_t num_buckets;
-    alignas(kCacheLineBytes)
-        std::atomic<std::size_t> buckets_allocated;
+    alignas(kCacheLineBytes) std::atomic<std::size_t> buckets_allocated;
     alignas(kCacheLineBytes)
         std::atomic<std::size_t> variable_length_bytes_allocated;
   };
@@ -179,16 +185,18 @@ class FastSeparateChainingHashTable : public FastHashTable<resizable,
 
   // Round bucket size up to a multiple of kBucketAlignment.
   constexpr std::size_t ComputeBucketSize(const std::size_t fixed_key_size) {
-    return (((kValueOffset + this->total_payload_size_ + fixed_key_size - 1) / kBucketAlignment) + 1)
-           * kBucketAlignment;
+    return (((kValueOffset + this->total_payload_size_ + fixed_key_size - 1) /
+             kBucketAlignment) +
+            1) *
+           kBucketAlignment;
   }
   // If ValueT is not trivially destructible, invoke its destructor for all
   // values held in the specified buckets (including those in "empty" buckets
   // that were default constructed). If ValueT is trivially destructible, this
   // is a no-op.
   void DestroyValues(void *buckets,
-                            const std::size_t num_buckets,
-                            const std::size_t bucket_size);
+                     const std::size_t num_buckets,
+                     const std::size_t bucket_size);
 
   // Attempt to find an empty bucket to insert 'hash_code' into, starting after
   // '*bucket' in the chain (or, if '*bucket' is NULL, starting from the slot
@@ -201,30 +209,33 @@ class FastSeparateChainingHashTable : public FastHashTable<resizable,
   // attempt to allocate storage for a variable-length key BEFORE allocating a
   // bucket, so that no bucket number below 'header_->num_buckets' is ever
   // deallocated after being allocated.
-  inline bool locateBucketForInsertion(const std::size_t hash_code,
-                                       const std::size_t variable_key_allocation_required,
-                                       void **bucket,
-                                       std::atomic<std::size_t> **pending_chain_ptr,
-                                       std::size_t *pending_chain_ptr_finish_value,
-                                       HashTablePreallocationState *prealloc_state);
+  inline bool locateBucketForInsertion(
+      const std::size_t hash_code,
+      const std::size_t variable_key_allocation_required,
+      void **bucket,
+      std::atomic<std::size_t> **pending_chain_ptr,
+      std::size_t *pending_chain_ptr_finish_value,
+      HashTablePreallocationState *prealloc_state);
 
   // Write a scalar 'key' and its 'hash_code' into the '*bucket', which was
   // found by locateBucketForInsertion(). Assumes that storage for a
   // variable-length key copy (if any) was already allocated by a successful
   // call to allocateVariableLengthKeyStorage().
-  inline void writeScalarKeyToBucket(const TypedValue &key,
-                                     const std::size_t hash_code,
-                                     void *bucket,
-                                     HashTablePreallocationState *prealloc_state);
+  inline void writeScalarKeyToBucket(
+      const TypedValue &key,
+      const std::size_t hash_code,
+      void *bucket,
+      HashTablePreallocationState *prealloc_state);
 
   // Write a composite 'key' and its 'hash_code' into the '*bucket', which was
   // found by locateBucketForInsertion(). Assumes that storage for
   // variable-length key copies (if any) was already allocated by a successful
   // call to allocateVariableLengthKeyStorage().
-  inline void writeCompositeKeyToBucket(const std::vector<TypedValue> &key,
-                                        const std::size_t hash_code,
-                                        void *bucket,
-                                        HashTablePreallocationState *prealloc_state);
+  inline void writeCompositeKeyToBucket(
+      const std::vector<TypedValue> &key,
+      const std::size_t hash_code,
+      void *bucket,
+      HashTablePreallocationState *prealloc_state);
 
   // Determine whether it is actually necessary to resize this hash table.
   // Checks that there is at least one unallocated bucket, and that there is
@@ -275,30 +286,37 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::FastSeparateChainingHashTable(const std::vector<const Type*> &key_types,
-                                const std::size_t num_entries,
-                                const std::vector<std::size_t> &payload_sizes,
-                                const std::vector<AggregationHandle *> &handles,
-                                StorageManager *storage_manager)
-        : FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>(
-              key_types,
-              num_entries,
-              handles,
-              payload_sizes,
-              storage_manager,
-              false,
-              false,
-              true),
-          kBucketAlignment(alignof(std::atomic<std::size_t>)),
-          kValueOffset(sizeof(std::atomic<std::size_t>) + sizeof(std::size_t)),
-          key_manager_(this->key_types_, kValueOffset + this->total_payload_size_),
-          bucket_size_(ComputeBucketSize(key_manager_.getFixedKeySize())) {
-  init_payload_ = static_cast<std::uint8_t *>(calloc(this->total_payload_size_, 1));
+FastSeparateChainingHashTable<resizable,
+                              serializable,
+                              force_key_copy,
+                              allow_duplicate_keys>::
+    FastSeparateChainingHashTable(
+        const std::vector<const Type *> &key_types,
+        const std::size_t num_entries,
+        const std::vector<std::size_t> &payload_sizes,
+        const std::vector<AggregationHandle *> &handles,
+        StorageManager *storage_manager)
+    : FastHashTable<resizable,
+                    serializable,
+                    force_key_copy,
+                    allow_duplicate_keys>(key_types,
+                                          num_entries,
+                                          handles,
+                                          payload_sizes,
+                                          storage_manager,
+                                          false,
+                                          false,
+                                          true),
+      kBucketAlignment(alignof(std::atomic<std::size_t>)),
+      kValueOffset(sizeof(std::atomic<std::size_t>) + sizeof(std::size_t)),
+      key_manager_(this->key_types_, kValueOffset + this->total_payload_size_),
+      bucket_size_(ComputeBucketSize(key_manager_.getFixedKeySize())) {
+  init_payload_ =
+      static_cast<std::uint8_t *>(calloc(this->total_payload_size_, 1));
   int k = 0;
   for (auto handle : handles) {
-      handle->initPayload(init_payload_+this->payload_offsets_[k]);
-      k++;
+    handle->initPayload(init_payload_ + this->payload_offsets_[k]);
+    k++;
   }
   // Bucket size always rounds up to the alignment requirement of the atomic
   // size_t "next" pointer at the front or a ValueT, whichever is larger.
@@ -308,19 +326,23 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
   this->setKeyInline(key_manager_.getKeyInline());
 
   // Pick out a prime number of slots and calculate storage requirements.
-  std::size_t num_slots_tmp = get_next_prime_number(num_entries * kHashTableLoadFactor);
-  std::size_t required_memory = sizeof(Header)
-                                + num_slots_tmp * sizeof(std::atomic<std::size_t>)
-                                + (num_slots_tmp / kHashTableLoadFactor)
-                                    * (bucket_size_ + key_manager_.getEstimatedVariableKeySize());
-  std::size_t num_storage_slots = this->storage_manager_->SlotsNeededForBytes(required_memory);
+  std::size_t num_slots_tmp =
+      get_next_prime_number(num_entries * kHashTableLoadFactor);
+  std::size_t required_memory =
+      sizeof(Header) + num_slots_tmp * sizeof(std::atomic<std::size_t>) +
+      (num_slots_tmp / kHashTableLoadFactor) *
+          (bucket_size_ + key_manager_.getEstimatedVariableKeySize());
+  std::size_t num_storage_slots =
+      this->storage_manager_->SlotsNeededForBytes(required_memory);
   if (num_storage_slots == 0) {
-    FATAL_ERROR("Storage requirement for SeparateChainingHashTable "
-                "exceeds maximum allocation size.");
+    FATAL_ERROR(
+        "Storage requirement for SeparateChainingHashTable "
+        "exceeds maximum allocation size.");
   }
 
   // Get a StorageBlob to hold the hash table.
-  const block_id blob_id = this->storage_manager_->createBlob(num_storage_slots);
+  const block_id blob_id =
+      this->storage_manager_->createBlob(num_storage_slots);
   this->blob_ = this->storage_manager_->getBlobMutable(blob_id);
 
   void *aligned_memory_start = this->blob_->getMemoryMutable();
@@ -328,14 +350,14 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
   if (align(alignof(Header),
             sizeof(Header),
             aligned_memory_start,
-            available_memory)
-          == nullptr) {
+            available_memory) == nullptr) {
     // With current values from StorageConstants.hpp, this should be
     // impossible. A blob is at least 1 MB, while a Header has alignment
     // requirement of just kCacheLineBytes (64 bytes).
-    FATAL_ERROR("StorageBlob used to hold resizable "
-                "SeparateChainingHashTable is too small to meet alignment "
-                "requirements of SeparateChainingHashTable::Header.");
+    FATAL_ERROR(
+        "StorageBlob used to hold resizable "
+        "SeparateChainingHashTable is too small to meet alignment "
+        "requirements of SeparateChainingHashTable::Header.");
   } else if (aligned_memory_start != this->blob_->getMemoryMutable()) {
     // This should also be impossible, since the StorageManager allocates slots
     // aligned to kCacheLineBytes.
@@ -346,8 +368,9 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
   }
 
   // Locate the header.
-  header_ = static_cast<Header*>(aligned_memory_start);
-  aligned_memory_start = static_cast<char*>(aligned_memory_start) + sizeof(Header);
+  header_ = static_cast<Header *>(aligned_memory_start);
+  aligned_memory_start =
+      static_cast<char *>(aligned_memory_start) + sizeof(Header);
   available_memory -= sizeof(Header);
 
   // Recompute the number of slots & buckets using the actual available memory.
@@ -355,19 +378,20 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
   // the storage blob's size. It's also possible (though very unlikely) that we
   // will wind up with fewer buckets than we initially wanted because of screwy
   // alignment requirements for ValueT.
-  std::size_t num_buckets_tmp
-      = available_memory / (kHashTableLoadFactor * sizeof(std::atomic<std::size_t>)
-                            + bucket_size_
-                            + key_manager_.getEstimatedVariableKeySize());
-  num_slots_tmp = get_previous_prime_number(num_buckets_tmp * kHashTableLoadFactor);
+  std::size_t num_buckets_tmp =
+      available_memory /
+      (kHashTableLoadFactor * sizeof(std::atomic<std::size_t>) + bucket_size_ +
+       key_manager_.getEstimatedVariableKeySize());
+  num_slots_tmp =
+      get_previous_prime_number(num_buckets_tmp * kHashTableLoadFactor);
   num_buckets_tmp = num_slots_tmp / kHashTableLoadFactor;
   DEBUG_ASSERT(num_slots_tmp > 0);
   DEBUG_ASSERT(num_buckets_tmp > 0);
 
   // Locate the slot array.
-  slots_ = static_cast<std::atomic<std::size_t>*>(aligned_memory_start);
-  aligned_memory_start = static_cast<char*>(aligned_memory_start)
-                         + sizeof(std::atomic<std::size_t>) * num_slots_tmp;
+  slots_ = static_cast<std::atomic<std::size_t> *>(aligned_memory_start);
+  aligned_memory_start = static_cast<char *>(aligned_memory_start) +
+                         sizeof(std::atomic<std::size_t>) * num_slots_tmp;
   available_memory -= sizeof(std::atomic<std::size_t>) * num_slots_tmp;
 
   // Locate the buckets.
@@ -375,17 +399,16 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
   // Extra-paranoid: If ValueT has an alignment requirement greater than that
   // of std::atomic<std::size_t>, we may need to adjust the start of the bucket
   // array.
-  if (align(kBucketAlignment,
-            bucket_size_,
-            buckets_,
-            available_memory)
-          == nullptr) {
-    FATAL_ERROR("StorageBlob used to hold resizable "
-                "SeparateChainingHashTable is too small to meet "
-                "alignment requirements of buckets.");
+  if (align(kBucketAlignment, bucket_size_, buckets_, available_memory) ==
+      nullptr) {
+    FATAL_ERROR(
+        "StorageBlob used to hold resizable "
+        "SeparateChainingHashTable is too small to meet "
+        "alignment requirements of buckets.");
   } else if (buckets_ != aligned_memory_start) {
-    DEV_WARNING("Bucket array start position adjusted to meet alignment "
-                "requirement for SeparateChainingHashTable's value type.");
+    DEV_WARNING(
+        "Bucket array start position adjusted to meet alignment "
+        "requirement for SeparateChainingHashTable's value type.");
     if (num_buckets_tmp * bucket_size_ > available_memory) {
       --num_buckets_tmp;
     }
@@ -401,7 +424,7 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
   // Locate variable-length key storage region, and give it all the remaining
   // bytes in the blob.
   key_manager_.setVariableLengthStorageInfo(
-      static_cast<char*>(buckets_) + header_->num_buckets * bucket_size_,
+      static_cast<char *>(buckets_) + header_->num_buckets * bucket_size_,
       available_memory,
       &(header_->variable_length_bytes_allocated));
 }
@@ -410,36 +433,43 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::FastSeparateChainingHashTable(const std::vector<const Type*> &key_types,
-                                void *hash_table_memory,
-                                const std::size_t hash_table_memory_size,
-                                const bool new_hash_table,
-                                const bool hash_table_memory_zeroed)
-        : FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>(
-              key_types,
-              hash_table_memory,
-              hash_table_memory_size,
-              new_hash_table,
-              hash_table_memory_zeroed,
-              false,
-              false,
-              true),
-          kBucketAlignment(alignof(std::atomic<std::size_t>) < alignof(uint8_t) ? alignof(uint8_t)
-                                                  : alignof(std::atomic<std::size_t>)),
-          kValueOffset(sizeof(std::atomic<std::size_t>) + sizeof(std::size_t)),
-          key_manager_(this->key_types_, kValueOffset + sizeof(uint8_t)),
-          bucket_size_(ComputeBucketSize(key_manager_.getFixedKeySize())) {
+FastSeparateChainingHashTable<resizable,
+                              serializable,
+                              force_key_copy,
+                              allow_duplicate_keys>::
+    FastSeparateChainingHashTable(const std::vector<const Type *> &key_types,
+                                  void *hash_table_memory,
+                                  const std::size_t hash_table_memory_size,
+                                  const bool new_hash_table,
+                                  const bool hash_table_memory_zeroed)
+    : FastHashTable<resizable,
+                    serializable,
+                    force_key_copy,
+                    allow_duplicate_keys>(key_types,
+                                          hash_table_memory,
+                                          hash_table_memory_size,
+                                          new_hash_table,
+                                          hash_table_memory_zeroed,
+                                          false,
+                                          false,
+                                          true),
+      kBucketAlignment(alignof(std::atomic<std::size_t>) < alignof(std::uint8_t)
+                           ? alignof(std::uint8_t)
+                           : alignof(std::atomic<std::size_t>)),
+      kValueOffset(sizeof(std::atomic<std::size_t>) + sizeof(std::size_t)),
+      key_manager_(this->key_types_, kValueOffset + sizeof(std::uint8_t)),
+      bucket_size_(ComputeBucketSize(key_manager_.getFixedKeySize())) {
   // Bucket size always rounds up to the alignment requirement of the atomic
   // size_t "next" pointer at the front or a ValueT, whichever is larger.
   //
   // Make sure that the larger of the two alignment requirements also satisfies
   // the smaller.
-  static_assert(alignof(std::atomic<std::size_t>) < alignof(uint8_t)
-                    ? alignof(uint8_t) % alignof(std::atomic<std::size_t>) == 0
-                    : alignof(std::atomic<std::size_t>) % alignof(uint8_t) == 0,
-                "Alignment requirement of std::atomic<std::size_t> does not "
-                "evenly divide with alignment requirement of ValueT.");
+  static_assert(
+      alignof(std::atomic<std::size_t>) < alignof(std::uint8_t)
+          ? alignof(std::uint8_t) % alignof(std::atomic<std::size_t>) == 0
+          : alignof(std::atomic<std::size_t>) % alignof(std::uint8_t) == 0,
+      "Alignment requirement of std::atomic<std::size_t> does not "
+      "evenly divide with alignment requirement of ValueT.");
 
   // Give base HashTable information about what key components are stored
   // inline from 'key_manager_'.
@@ -460,12 +490,13 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
   if (align(alignof(Header),
             sizeof(Header),
             aligned_memory_start,
-            available_memory)
-          == nullptr) {
+            available_memory) == nullptr) {
     FATAL_ERROR("Attempted to create a non-resizable "
                 << "SeparateChainingHashTable with "
-                << available_memory << " bytes of memory at "
-                << aligned_memory_start << " which either can not fit a "
+                << available_memory
+                << " bytes of memory at "
+                << aligned_memory_start
+                << " which either can not fit a "
                 << "SeparateChainingHashTable::Header or meet its alignement "
                 << "requirement.");
   } else if (aligned_memory_start != this->hash_table_memory_) {
@@ -477,32 +508,36 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
                 << "SeparateChainingHashTable::Header.");
   }
 
-  header_ = static_cast<Header*>(aligned_memory_start);
-  aligned_memory_start = static_cast<char*>(aligned_memory_start) + sizeof(Header);
+  header_ = static_cast<Header *>(aligned_memory_start);
+  aligned_memory_start =
+      static_cast<char *>(aligned_memory_start) + sizeof(Header);
   available_memory -= sizeof(Header);
 
   if (new_hash_table) {
-    std::size_t estimated_bucket_capacity
-        = available_memory / (kHashTableLoadFactor * sizeof(std::atomic<std::size_t>)
-                              + bucket_size_
-                              + key_manager_.getEstimatedVariableKeySize());
-    std::size_t num_slots = get_previous_prime_number(estimated_bucket_capacity * kHashTableLoadFactor);
+    std::size_t estimated_bucket_capacity =
+        available_memory /
+        (kHashTableLoadFactor * sizeof(std::atomic<std::size_t>) +
+         bucket_size_ + key_manager_.getEstimatedVariableKeySize());
+    std::size_t num_slots = get_previous_prime_number(
+        estimated_bucket_capacity * kHashTableLoadFactor);
 
     // Fill in the header.
     header_->num_slots = num_slots;
     header_->num_buckets = num_slots / kHashTableLoadFactor;
     header_->buckets_allocated.store(0, std::memory_order_relaxed);
-    header_->variable_length_bytes_allocated.store(0, std::memory_order_relaxed);
+    header_->variable_length_bytes_allocated.store(0,
+                                                   std::memory_order_relaxed);
   }
 
   // Locate the slot array.
-  slots_ = static_cast<std::atomic<std::size_t>*>(aligned_memory_start);
-  aligned_memory_start = static_cast<char*>(aligned_memory_start)
-                         + sizeof(std::atomic<std::size_t>) * header_->num_slots;
+  slots_ = static_cast<std::atomic<std::size_t> *>(aligned_memory_start);
+  aligned_memory_start = static_cast<char *>(aligned_memory_start) +
+                         sizeof(std::atomic<std::size_t>) * header_->num_slots;
   available_memory -= sizeof(std::atomic<std::size_t>) * header_->num_slots;
 
   if (new_hash_table && !hash_table_memory_zeroed) {
-    std::memset(slots_, 0x0, sizeof(std::atomic<std::size_t>) * header_->num_slots);
+    std::memset(
+        slots_, 0x0, sizeof(std::atomic<std::size_t>) * header_->num_slots);
   }
 
   // Locate the buckets.
@@ -510,20 +545,20 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
   // Extra-paranoid: sizeof(Header) should almost certainly be a multiple of
   // kBucketAlignment, unless ValueT has some members with seriously big
   // (> kCacheLineBytes) alignment requirements specified using alignas().
-  if (align(kBucketAlignment,
-            bucket_size_,
-            buckets_,
-            available_memory)
-          == nullptr) {
+  if (align(kBucketAlignment, bucket_size_, buckets_, available_memory) ==
+      nullptr) {
     FATAL_ERROR("Attempted to create a non-resizable "
                 << "SeparateChainingHashTable with "
-                << this->hash_table_memory_size_ << " bytes of memory at "
-                << this->hash_table_memory_ << ", which can hold an aligned "
+                << this->hash_table_memory_size_
+                << " bytes of memory at "
+                << this->hash_table_memory_
+                << ", which can hold an aligned "
                 << "SeparateChainingHashTable::Header but does not have "
                 << "enough remaining space for even a single hash bucket.");
   } else if (buckets_ != aligned_memory_start) {
-    DEV_WARNING("Bucket array start position adjusted to meet alignment "
-                "requirement for SeparateChainingHashTable's value type.");
+    DEV_WARNING(
+        "Bucket array start position adjusted to meet alignment "
+        "requirement for SeparateChainingHashTable's value type.");
     if (header_->num_buckets * bucket_size_ > available_memory) {
       DEBUG_ASSERT(new_hash_table);
       --(header_->num_buckets);
@@ -538,7 +573,7 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
 
   // Locate variable-length key storage region.
   key_manager_.setVariableLengthStorageInfo(
-      static_cast<char*>(buckets_) + header_->num_buckets * bucket_size_,
+      static_cast<char *>(buckets_) + header_->num_buckets * bucket_size_,
       available_memory,
       &(header_->variable_length_bytes_allocated));
 }
@@ -547,16 +582,18 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::clear() {
-  const std::size_t used_buckets = header_->buckets_allocated.load(std::memory_order_relaxed);
+void FastSeparateChainingHashTable<resizable,
+                                   serializable,
+                                   force_key_copy,
+                                   allow_duplicate_keys>::clear() {
+  const std::size_t used_buckets =
+      header_->buckets_allocated.load(std::memory_order_relaxed);
   // Destroy existing values, if necessary.
-  DestroyValues(buckets_,
-                used_buckets,
-                bucket_size_);
+  DestroyValues(buckets_, used_buckets, bucket_size_);
 
   // Zero-out slot array.
-  std::memset(slots_, 0x0, sizeof(std::atomic<std::size_t>) * header_->num_slots);
+  std::memset(
+      slots_, 0x0, sizeof(std::atomic<std::size_t>) * header_->num_slots);
 
   // Zero-out used buckets.
   std::memset(buckets_, 0x0, used_buckets * bucket_size_);
@@ -570,24 +607,33 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-const uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getSingle(const TypedValue &key) const {
+const std::uint8_t* FastSeparateChainingHashTable<
+    resizable,
+    serializable,
+    force_key_copy,
+    allow_duplicate_keys>::getSingle(const TypedValue &key) const {
   DEBUG_ASSERT(!allow_duplicate_keys);
   DEBUG_ASSERT(this->key_types_.size() == 1);
-  DEBUG_ASSERT(key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
+  DEBUG_ASSERT(
+      key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
 
   const std::size_t hash_code = key.getHash();
-  std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
+  std::size_t bucket_ref =
+      slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
   while (bucket_ref != 0) {
     DEBUG_ASSERT(bucket_ref != std::numeric_limits<std::size_t>::max());
-    const char *bucket = static_cast<const char*>(buckets_) + (bucket_ref - 1) * bucket_size_;
-    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t*>(
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (bucket_ref - 1) * bucket_size_;
+    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t *>(
         bucket + sizeof(std::atomic<std::size_t>));
-    if ((bucket_hash == hash_code) && key_manager_.scalarKeyCollisionCheck(key, bucket)) {
+    if ((bucket_hash == hash_code) &&
+        key_manager_.scalarKeyCollisionCheck(key, bucket)) {
       // Match located.
-      return reinterpret_cast<const uint8_t*>(bucket + kValueOffset);
+      return reinterpret_cast<const std::uint8_t *>(bucket + kValueOffset);
     }
-    bucket_ref = reinterpret_cast<const std::atomic<std::size_t>*>(bucket)->load(std::memory_order_relaxed);
+    bucket_ref =
+        reinterpret_cast<const std::atomic<std::size_t> *>(bucket)->load(
+            std::memory_order_relaxed);
   }
 
   // Reached the end of the chain and didn't find a match.
@@ -598,23 +644,31 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-const uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getSingleCompositeKey(const std::vector<TypedValue> &key) const {
+const std::uint8_t* FastSeparateChainingHashTable<resizable,
+                                                  serializable,
+                                                  force_key_copy,
+                                                  allow_duplicate_keys>::
+    getSingleCompositeKey(const std::vector<TypedValue> &key) const {
   DEBUG_ASSERT(!allow_duplicate_keys);
   DEBUG_ASSERT(this->key_types_.size() == key.size());
 
   const std::size_t hash_code = this->hashCompositeKey(key);
-  std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
+  std::size_t bucket_ref =
+      slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
   while (bucket_ref != 0) {
     DEBUG_ASSERT(bucket_ref != std::numeric_limits<std::size_t>::max());
-    const char *bucket = static_cast<const char*>(buckets_) + (bucket_ref - 1) * bucket_size_;
-    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t*>(
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (bucket_ref - 1) * bucket_size_;
+    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t *>(
         bucket + sizeof(std::atomic<std::size_t>));
-    if ((bucket_hash == hash_code) && key_manager_.compositeKeyCollisionCheck(key, bucket)) {
+    if ((bucket_hash == hash_code) &&
+        key_manager_.compositeKeyCollisionCheck(key, bucket)) {
       // Match located.
-      return reinterpret_cast<const uint8_t*>(bucket + kValueOffset);
+      return reinterpret_cast<const std::uint8_t *>(bucket + kValueOffset);
     }
-    bucket_ref = reinterpret_cast<const std::atomic<std::size_t>*>(bucket)->load(std::memory_order_relaxed);
+    bucket_ref =
+        reinterpret_cast<const std::atomic<std::size_t> *>(bucket)->load(
+            std::memory_order_relaxed);
   }
 
   // Reached the end of the chain and didn't find a match.
@@ -625,23 +679,32 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-const uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getSingleCompositeKey(const std::vector<TypedValue> &key, int index) const {
+const std::uint8_t* FastSeparateChainingHashTable<resizable,
+                                                  serializable,
+                                                  force_key_copy,
+                                                  allow_duplicate_keys>::
+    getSingleCompositeKey(const std::vector<TypedValue> &key, int index) const {
   DEBUG_ASSERT(!allow_duplicate_keys);
   DEBUG_ASSERT(this->key_types_.size() == key.size());
 
   const std::size_t hash_code = this->hashCompositeKey(key);
-  std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
+  std::size_t bucket_ref =
+      slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
   while (bucket_ref != 0) {
     DEBUG_ASSERT(bucket_ref != std::numeric_limits<std::size_t>::max());
-    const char *bucket = static_cast<const char*>(buckets_) + (bucket_ref - 1) * bucket_size_;
-    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t*>(
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (bucket_ref - 1) * bucket_size_;
+    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t *>(
         bucket + sizeof(std::atomic<std::size_t>));
-    if ((bucket_hash == hash_code) && key_manager_.compositeKeyCollisionCheck(key, bucket)) {
+    if ((bucket_hash == hash_code) &&
+        key_manager_.compositeKeyCollisionCheck(key, bucket)) {
       // Match located.
-      return reinterpret_cast<const uint8_t*>(bucket + kValueOffset)+this->payload_offsets_[index];
+      return reinterpret_cast<const std::uint8_t *>(bucket + kValueOffset) +
+             this->payload_offsets_[index];
     }
-    bucket_ref = reinterpret_cast<const std::atomic<std::size_t>*>(bucket)->load(std::memory_order_relaxed);
+    bucket_ref =
+        reinterpret_cast<const std::atomic<std::size_t> *>(bucket)->load(
+            std::memory_order_relaxed);
   }
 
   // Reached the end of the chain and didn't find a match.
@@ -652,26 +715,38 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getAll(const TypedValue &key, std::vector<const uint8_t*> *values) const {
+void FastSeparateChainingHashTable<
+    resizable,
+    serializable,
+    force_key_copy,
+    allow_duplicate_keys>::getAll(const TypedValue &key,
+                                  std::vector<const std::uint8_t *> *values)
+    const {
   DEBUG_ASSERT(this->key_types_.size() == 1);
-  DEBUG_ASSERT(key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
+  DEBUG_ASSERT(
+      key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
 
   const std::size_t hash_code = key.getHash();
-  std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
+  std::size_t bucket_ref =
+      slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
   while (bucket_ref != 0) {
     DEBUG_ASSERT(bucket_ref != std::numeric_limits<std::size_t>::max());
-    const char *bucket = static_cast<const char*>(buckets_) + (bucket_ref - 1) * bucket_size_;
-    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t*>(
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (bucket_ref - 1) * bucket_size_;
+    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t *>(
         bucket + sizeof(std::atomic<std::size_t>));
-    if ((bucket_hash == hash_code) && key_manager_.scalarKeyCollisionCheck(key, bucket)) {
+    if ((bucket_hash == hash_code) &&
+        key_manager_.scalarKeyCollisionCheck(key, bucket)) {
       // Match located.
-      values->push_back(reinterpret_cast<const uint8_t*>(bucket + kValueOffset));
+      values->push_back(
+          reinterpret_cast<const std::uint8_t *>(bucket + kValueOffset));
       if (!allow_duplicate_keys) {
         return;
       }
     }
-    bucket_ref = reinterpret_cast<const std::atomic<std::size_t>*>(bucket)->load(std::memory_order_relaxed);
+    bucket_ref =
+        reinterpret_cast<const std::atomic<std::size_t> *>(bucket)->load(
+            std::memory_order_relaxed);
   }
 }
 
@@ -679,25 +754,35 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getAllCompositeKey(const std::vector<TypedValue> &key, std::vector<const uint8_t*> *values) const {
+void FastSeparateChainingHashTable<resizable,
+                                   serializable,
+                                   force_key_copy,
+                                   allow_duplicate_keys>::
+    getAllCompositeKey(const std::vector<TypedValue> &key,
+                       std::vector<const std::uint8_t *> *values) const {
   DEBUG_ASSERT(this->key_types_.size() == key.size());
 
   const std::size_t hash_code = this->hashCompositeKey(key);
-  std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
+  std::size_t bucket_ref =
+      slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
   while (bucket_ref != 0) {
     DEBUG_ASSERT(bucket_ref != std::numeric_limits<std::size_t>::max());
-    const char *bucket = static_cast<const char*>(buckets_) + (bucket_ref - 1) * bucket_size_;
-    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t*>(
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (bucket_ref - 1) * bucket_size_;
+    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t *>(
         bucket + sizeof(std::atomic<std::size_t>));
-    if ((bucket_hash == hash_code) && key_manager_.compositeKeyCollisionCheck(key, bucket)) {
+    if ((bucket_hash == hash_code) &&
+        key_manager_.compositeKeyCollisionCheck(key, bucket)) {
       // Match located.
-      values->push_back(reinterpret_cast<const uint8_t*>(bucket + kValueOffset));
+      values->push_back(
+          reinterpret_cast<const std::uint8_t *>(bucket + kValueOffset));
       if (!allow_duplicate_keys) {
         return;
       }
     }
-    bucket_ref = reinterpret_cast<const std::atomic<std::size_t>*>(bucket)->load(std::memory_order_relaxed);
+    bucket_ref =
+        reinterpret_cast<const std::atomic<std::size_t> *>(bucket)->load(
+            std::memory_order_relaxed);
   }
 }
 
@@ -705,18 +790,22 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-HashTablePutResult
-    FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-        ::putInternal(const TypedValue &key,
-                      const std::size_t variable_key_size,
-                      const uint8_t &value,
-                      HashTablePreallocationState *prealloc_state) {
+HashTablePutResult FastSeparateChainingHashTable<resizable,
+                                                 serializable,
+                                                 force_key_copy,
+                                                 allow_duplicate_keys>::
+    putInternal(const TypedValue &key,
+                const std::size_t variable_key_size,
+                const std::uint8_t &value,
+                HashTablePreallocationState *prealloc_state) {
   DEBUG_ASSERT(this->key_types_.size() == 1);
-  DEBUG_ASSERT(key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
+  DEBUG_ASSERT(
+      key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
 
   if (prealloc_state == nullptr) {
     // Early check for a free bucket.
-    if (header_->buckets_allocated.load(std::memory_order_relaxed) >= header_->num_buckets) {
+    if (header_->buckets_allocated.load(std::memory_order_relaxed) >=
+        header_->num_buckets) {
       return HashTablePutResult::kOutOfSpace;
     }
 
@@ -763,10 +852,11 @@ HashTablePutResult
   writeScalarKeyToBucket(key, hash_code, bucket, prealloc_state);
 
   // Store the value by using placement new with ValueT's copy constructor.
-  new(static_cast<char*>(bucket) + kValueOffset) uint8_t(value);
+  new (static_cast<char *>(bucket) + kValueOffset) std::uint8_t(value);
 
   // Update the previous chain pointer to point to the new bucket.
-  pending_chain_ptr->store(pending_chain_ptr_finish_value, std::memory_order_release);
+  pending_chain_ptr->store(pending_chain_ptr_finish_value,
+                           std::memory_order_release);
 
   // We're all done.
   return HashTablePutResult::kOK;
@@ -776,17 +866,20 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-HashTablePutResult
-    FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-        ::putCompositeKeyInternalFast(const std::vector<TypedValue> &key,
-                                  const std::size_t variable_key_size,
-                                  const uint8_t *init_value_ptr,
-                                  HashTablePreallocationState *prealloc_state) {
+HashTablePutResult FastSeparateChainingHashTable<resizable,
+                                                 serializable,
+                                                 force_key_copy,
+                                                 allow_duplicate_keys>::
+    putCompositeKeyInternalFast(const std::vector<TypedValue> &key,
+                                const std::size_t variable_key_size,
+                                const std::uint8_t *init_value_ptr,
+                                HashTablePreallocationState *prealloc_state) {
   DEBUG_ASSERT(this->key_types_.size() == key.size());
 
   if (prealloc_state == nullptr) {
     // Early check for a free bucket.
-    if (header_->buckets_allocated.load(std::memory_order_relaxed) >= header_->num_buckets) {
+    if (header_->buckets_allocated.load(std::memory_order_relaxed) >=
+        header_->num_buckets) {
       return HashTablePutResult::kOutOfSpace;
     }
 
@@ -832,12 +925,11 @@ HashTablePutResult
   // Write the key and hash.
   writeCompositeKeyToBucket(key, hash_code, bucket, prealloc_state);
 
-  // Store the value by using placement new with ValueT's copy constructor.
-//  new(static_cast<char*>(bucket) + kValueOffset) uint8_t(value);
-    uint8_t *value = static_cast<uint8_t*>(bucket) + kValueOffset;
-        memcpy(value, init_value_ptr, this->total_payload_size_);
+  std::uint8_t *value = static_cast<std::uint8_t *>(bucket) + kValueOffset;
+  memcpy(value, init_value_ptr, this->total_payload_size_);
   // Update the previous chain pointer to point to the new bucket.
-  pending_chain_ptr->store(pending_chain_ptr_finish_value, std::memory_order_release);
+  pending_chain_ptr->store(pending_chain_ptr_finish_value,
+                           std::memory_order_release);
 
   // We're all done.
   return HashTablePutResult::kOK;
@@ -847,13 +939,17 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::upsertInternalFast(const TypedValue &key,
-                     const std::size_t variable_key_size,
-                     const std::uint8_t *init_value_ptr) {
+std::uint8_t* FastSeparateChainingHashTable<resizable,
+                                            serializable,
+                                            force_key_copy,
+                                            allow_duplicate_keys>::
+    upsertInternalFast(const TypedValue &key,
+                       const std::size_t variable_key_size,
+                       const std::uint8_t *init_value_ptr) {
   DEBUG_ASSERT(!allow_duplicate_keys);
   DEBUG_ASSERT(this->key_types_.size() == 1);
-  DEBUG_ASSERT(key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
+  DEBUG_ASSERT(
+      key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
 
   if (variable_key_size > 0) {
     // Don't allocate yet, since the key may already be present. However, we
@@ -861,9 +957,11 @@ uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy,
     // space is big enough to hold the key (at least one must be true: either
     // the key is already present and allocated, or we need to be able to
     // allocate enough space for it).
-    std::size_t allocated_bytes = header_->variable_length_bytes_allocated.load(std::memory_order_relaxed);
-    if ((allocated_bytes < variable_key_size)
-        && (allocated_bytes + variable_key_size > key_manager_.getVariableLengthKeyStorageSize())) {
+    std::size_t allocated_bytes = header_->variable_length_bytes_allocated.load(
+        std::memory_order_relaxed);
+    if ((allocated_bytes < variable_key_size) &&
+        (allocated_bytes + variable_key_size >
+         key_manager_.getVariableLengthKeyStorageSize())) {
       return nullptr;
     }
   }
@@ -886,7 +984,8 @@ uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy,
       return nullptr;
     } else if (key_manager_.scalarKeyCollisionCheck(key, bucket)) {
       // Found an already-existing entry for this key.
-      return reinterpret_cast<uint8_t*>(static_cast<char*>(bucket) + kValueOffset);
+      return reinterpret_cast<std::uint8_t *>(static_cast<char *>(bucket) +
+                                              kValueOffset);
     }
   }
 
@@ -895,16 +994,15 @@ uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy,
   writeScalarKeyToBucket(key, hash_code, bucket, nullptr);
 
   // Copy the supplied 'initial_value' into place.
-//  uint8_t *value = new(static_cast<char*>(bucket) + kValueOffset) uint8_t(initial_value);
-
-    uint8_t *value = static_cast<unsigned char*>(bucket) + kValueOffset;
-    if (init_value_ptr == nullptr)
-        memcpy(value, init_payload_, this->total_payload_size_);
-    else
-        memcpy(value, init_value_ptr, this->total_payload_size_);
+  std::uint8_t *value = static_cast<unsigned char *>(bucket) + kValueOffset;
+  if (init_value_ptr == nullptr)
+    memcpy(value, init_payload_, this->total_payload_size_);
+  else
+    memcpy(value, init_value_ptr, this->total_payload_size_);
 
   // Update the previous chain pointer to point to the new bucket.
-  pending_chain_ptr->store(pending_chain_ptr_finish_value, std::memory_order_release);
+  pending_chain_ptr->store(pending_chain_ptr_finish_value,
+                           std::memory_order_release);
 
   // Return the value.
   return value;
@@ -914,10 +1012,13 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::upsertCompositeKeyInternalFast(const std::vector<TypedValue> &key,
-                                 const std::uint8_t *init_value_ptr,
-                                 const std::size_t variable_key_size) {
+std::uint8_t* FastSeparateChainingHashTable<resizable,
+                                            serializable,
+                                            force_key_copy,
+                                            allow_duplicate_keys>::
+    upsertCompositeKeyInternalFast(const std::vector<TypedValue> &key,
+                                   const std::uint8_t *init_value_ptr,
+                                   const std::size_t variable_key_size) {
   DEBUG_ASSERT(!allow_duplicate_keys);
   DEBUG_ASSERT(this->key_types_.size() == key.size());
 
@@ -927,9 +1028,11 @@ uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy,
     // space is big enough to hold the key (at least one must be true: either
     // the key is already present and allocated, or we need to be able to
     // allocate enough space for it).
-    std::size_t allocated_bytes = header_->variable_length_bytes_allocated.load(std::memory_order_relaxed);
-    if ((allocated_bytes < variable_key_size)
-        && (allocated_bytes + variable_key_size > key_manager_.getVariableLengthKeyStorageSize())) {
+    std::size_t allocated_bytes = header_->variable_length_bytes_allocated.load(
+        std::memory_order_relaxed);
+    if ((allocated_bytes < variable_key_size) &&
+        (allocated_bytes + variable_key_size >
+         key_manager_.getVariableLengthKeyStorageSize())) {
       return nullptr;
     }
   }
@@ -952,7 +1055,8 @@ uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy,
       return nullptr;
     } else if (key_manager_.compositeKeyCollisionCheck(key, bucket)) {
       // Found an already-existing entry for this key.
-      return reinterpret_cast<uint8_t*>(static_cast<char*>(bucket) + kValueOffset);
+      return reinterpret_cast<std::uint8_t *>(static_cast<char *>(bucket) +
+                                              kValueOffset);
     }
   }
 
@@ -960,17 +1064,16 @@ uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy,
   // Write the key and hash.
   writeCompositeKeyToBucket(key, hash_code, bucket, nullptr);
 
-//  uint8_t *value;
-//  value = static_cast<unsigned char*>(bucket) + kValueOffset;
-    uint8_t *value = static_cast<unsigned char*>(bucket) + kValueOffset;
-    if (init_value_ptr == nullptr) {
-        memcpy(value, init_payload_, this->total_payload_size_);
-    } else {
-        memcpy(value, init_value_ptr, this->total_payload_size_);
-    }
+  std::uint8_t *value = static_cast<unsigned char *>(bucket) + kValueOffset;
+  if (init_value_ptr == nullptr) {
+    memcpy(value, init_payload_, this->total_payload_size_);
+  } else {
+    memcpy(value, init_value_ptr, this->total_payload_size_);
+  }
 
   // Update the previous chaing pointer to point to the new bucket.
-  pending_chain_ptr->store(pending_chain_ptr_finish_value, std::memory_order_release);
+  pending_chain_ptr->store(pending_chain_ptr_finish_value,
+                           std::memory_order_release);
 
   // Return the value.
   return value;
@@ -980,13 +1083,19 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getNextEntry(TypedValue *key, const uint8_t **value, std::size_t *entry_num) const {
+bool FastSeparateChainingHashTable<
+    resizable,
+    serializable,
+    force_key_copy,
+    allow_duplicate_keys>::getNextEntry(TypedValue *key,
+                                        const std::uint8_t **value,
+                                        std::size_t *entry_num) const {
   DEBUG_ASSERT(this->key_types_.size() == 1);
   if (*entry_num < header_->buckets_allocated.load(std::memory_order_relaxed)) {
-    const char *bucket = static_cast<const char*>(buckets_) + (*entry_num) * bucket_size_;
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (*entry_num) * bucket_size_;
     *key = key_manager_.getKeyComponentTyped(bucket, 0);
-    *value = reinterpret_cast<const uint8_t*>(bucket + kValueOffset);
+    *value = reinterpret_cast<const std::uint8_t *>(bucket + kValueOffset);
     ++(*entry_num);
     return true;
   } else {
@@ -998,18 +1107,22 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getNextEntryCompositeKey(std::vector<TypedValue> *key,
-                               const uint8_t **value,
-                               std::size_t *entry_num) const {
+bool FastSeparateChainingHashTable<resizable,
+                                   serializable,
+                                   force_key_copy,
+                                   allow_duplicate_keys>::
+    getNextEntryCompositeKey(std::vector<TypedValue> *key,
+                             const std::uint8_t **value,
+                             std::size_t *entry_num) const {
   if (*entry_num < header_->buckets_allocated.load(std::memory_order_relaxed)) {
-    const char *bucket = static_cast<const char*>(buckets_) + (*entry_num) * bucket_size_;
-    for (std::vector<const Type*>::size_type key_idx = 0;
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (*entry_num) * bucket_size_;
+    for (std::vector<const Type *>::size_type key_idx = 0;
          key_idx < this->key_types_.size();
          ++key_idx) {
       key->emplace_back(key_manager_.getKeyComponentTyped(bucket, key_idx));
     }
-    *value = reinterpret_cast<const uint8_t*>(bucket + kValueOffset);
+    *value = reinterpret_cast<const std::uint8_t *>(bucket + kValueOffset);
     ++(*entry_num);
     return true;
   } else {
@@ -1021,29 +1134,38 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getNextEntryForKey(const TypedValue &key,
-                         const std::size_t hash_code,
-                         const uint8_t **value,
-                         std::size_t *entry_num) const {
+bool FastSeparateChainingHashTable<
+    resizable,
+    serializable,
+    force_key_copy,
+    allow_duplicate_keys>::getNextEntryForKey(const TypedValue &key,
+                                              const std::size_t hash_code,
+                                              const std::uint8_t **value,
+                                              std::size_t *entry_num) const {
   DEBUG_ASSERT(this->key_types_.size() == 1);
-  DEBUG_ASSERT(key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
+  DEBUG_ASSERT(
+      key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
 
   if (*entry_num == 0) {
-    *entry_num = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
+    *entry_num =
+        slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
   } else if (*entry_num == std::numeric_limits<std::size_t>::max()) {
     return false;
   }
 
   while (*entry_num != 0) {
     DEBUG_ASSERT(*entry_num != std::numeric_limits<std::size_t>::max());
-    const char *bucket = static_cast<const char*>(buckets_) + (*entry_num - 1) * bucket_size_;
-    *entry_num = reinterpret_cast<const std::atomic<std::size_t>*>(bucket)->load(std::memory_order_relaxed);
-    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t*>(
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (*entry_num - 1) * bucket_size_;
+    *entry_num =
+        reinterpret_cast<const std::atomic<std::size_t> *>(bucket)->load(
+            std::memory_order_relaxed);
+    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t *>(
         bucket + sizeof(std::atomic<std::size_t>));
-    if ((bucket_hash == hash_code) && key_manager_.scalarKeyCollisionCheck(key, bucket)) {
+    if ((bucket_hash == hash_code) &&
+        key_manager_.scalarKeyCollisionCheck(key, bucket)) {
       // Match located.
-      *value = reinterpret_cast<const uint8_t*>(bucket + kValueOffset);
+      *value = reinterpret_cast<const std::uint8_t *>(bucket + kValueOffset);
       if (*entry_num == 0) {
         // If this is the last bucket in the chain, prevent the next call from
         // starting over again.
@@ -1061,28 +1183,36 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getNextEntryForCompositeKey(const std::vector<TypedValue> &key,
-                                  const std::size_t hash_code,
-                                  const uint8_t **value,
-                                  std::size_t *entry_num) const {
+bool FastSeparateChainingHashTable<resizable,
+                                   serializable,
+                                   force_key_copy,
+                                   allow_duplicate_keys>::
+    getNextEntryForCompositeKey(const std::vector<TypedValue> &key,
+                                const std::size_t hash_code,
+                                const std::uint8_t **value,
+                                std::size_t *entry_num) const {
   DEBUG_ASSERT(this->key_types_.size() == key.size());
 
   if (*entry_num == 0) {
-    *entry_num = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
+    *entry_num =
+        slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
   } else if (*entry_num == std::numeric_limits<std::size_t>::max()) {
     return false;
   }
 
   while (*entry_num != 0) {
     DEBUG_ASSERT(*entry_num != std::numeric_limits<std::size_t>::max());
-    const char *bucket = static_cast<const char*>(buckets_) + (*entry_num - 1) * bucket_size_;
-    *entry_num = reinterpret_cast<const std::atomic<std::size_t>*>(bucket)->load(std::memory_order_relaxed);
-    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t*>(
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (*entry_num - 1) * bucket_size_;
+    *entry_num =
+        reinterpret_cast<const std::atomic<std::size_t> *>(bucket)->load(
+            std::memory_order_relaxed);
+    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t *>(
         bucket + sizeof(std::atomic<std::size_t>));
-    if ((bucket_hash == hash_code) && key_manager_.compositeKeyCollisionCheck(key, bucket)) {
+    if ((bucket_hash == hash_code) &&
+        key_manager_.compositeKeyCollisionCheck(key, bucket)) {
       // Match located.
-      *value = reinterpret_cast<const uint8_t*>(bucket + kValueOffset);
+      *value = reinterpret_cast<const std::uint8_t *>(bucket + kValueOffset);
       if (*entry_num == 0) {
         // If this is the last bucket in the chain, prevent the next call from
         // starting over again.
@@ -1100,23 +1230,32 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::hasKey(const TypedValue &key) const {
+bool FastSeparateChainingHashTable<
+    resizable,
+    serializable,
+    force_key_copy,
+    allow_duplicate_keys>::hasKey(const TypedValue &key) const {
   DEBUG_ASSERT(this->key_types_.size() == 1);
-  DEBUG_ASSERT(key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
+  DEBUG_ASSERT(
+      key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
 
   const std::size_t hash_code = key.getHash();
-  std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
+  std::size_t bucket_ref =
+      slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
   while (bucket_ref != 0) {
     DEBUG_ASSERT(bucket_ref != std::numeric_limits<std::size_t>::max());
-    const char *bucket = static_cast<const char*>(buckets_) + (bucket_ref - 1) * bucket_size_;
-    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t*>(
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (bucket_ref - 1) * bucket_size_;
+    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t *>(
         bucket + sizeof(std::atomic<std::size_t>));
-    if ((bucket_hash == hash_code) && key_manager_.scalarKeyCollisionCheck(key, bucket)) {
+    if ((bucket_hash == hash_code) &&
+        key_manager_.scalarKeyCollisionCheck(key, bucket)) {
       // Find a match.
       return true;
     }
-    bucket_ref = reinterpret_cast<const std::atomic<std::size_t>*>(bucket)->load(std::memory_order_relaxed);
+    bucket_ref =
+        reinterpret_cast<const std::atomic<std::size_t> *>(bucket)->load(
+            std::memory_order_relaxed);
   }
   return false;
 }
@@ -1125,22 +1264,31 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::hasCompositeKey(const std::vector<TypedValue> &key) const {
+bool FastSeparateChainingHashTable<
+    resizable,
+    serializable,
+    force_key_copy,
+    allow_duplicate_keys>::hasCompositeKey(const std::vector<TypedValue> &key)
+    const {
   DEBUG_ASSERT(this->key_types_.size() == key.size());
 
   const std::size_t hash_code = this->hashCompositeKey(key);
-  std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
+  std::size_t bucket_ref =
+      slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
   while (bucket_ref != 0) {
     DEBUG_ASSERT(bucket_ref != std::numeric_limits<std::size_t>::max());
-    const char *bucket = static_cast<const char*>(buckets_) + (bucket_ref - 1) * bucket_size_;
-    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t*>(
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (bucket_ref - 1) * bucket_size_;
+    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t *>(
         bucket + sizeof(std::atomic<std::size_t>));
-    if ((bucket_hash == hash_code) && key_manager_.compositeKeyCollisionCheck(key, bucket)) {
+    if ((bucket_hash == hash_code) &&
+        key_manager_.compositeKeyCollisionCheck(key, bucket)) {
       // Find a match.
       return true;
     }
-    bucket_ref = reinterpret_cast<const std::atomic<std::size_t>*>(bucket)->load(std::memory_order_relaxed);
+    bucket_ref =
+        reinterpret_cast<const std::atomic<std::size_t> *>(bucket)->load(
+            std::memory_order_relaxed);
   }
   return false;
 }
@@ -1149,10 +1297,13 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::resize(const std::size_t extra_buckets,
-             const std::size_t extra_variable_storage,
-             const std::size_t retry_num) {
+void FastSeparateChainingHashTable<
+    resizable,
+    serializable,
+    force_key_copy,
+    allow_duplicate_keys>::resize(const std::size_t extra_buckets,
+                                  const std::size_t extra_variable_storage,
+                                  const std::size_t retry_num) {
   DEBUG_ASSERT(resizable);
 
   // A retry should never be necessary with this implementation of HashTable.
@@ -1178,33 +1329,36 @@ void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allo
   // account kHashTableLoadFactor.
   std::size_t resized_num_slots = get_next_prime_number(
       (header_->num_buckets + extra_buckets / 2) * kHashTableLoadFactor * 2);
-  std::size_t variable_storage_required
-      = (resized_num_slots / kHashTableLoadFactor) * key_manager_.getEstimatedVariableKeySize();
-  const std::size_t original_variable_storage_used
-      = header_->variable_length_bytes_allocated.load(std::memory_order_relaxed);
+  std::size_t variable_storage_required =
+      (resized_num_slots / kHashTableLoadFactor) *
+      key_manager_.getEstimatedVariableKeySize();
+  const std::size_t original_variable_storage_used =
+      header_->variable_length_bytes_allocated.load(std::memory_order_relaxed);
   // If this resize was triggered by a too-large variable-length key, bump up
   // the variable-length storage requirement.
-  if ((extra_variable_storage > 0)
-      && (extra_variable_storage + original_variable_storage_used
-          > key_manager_.getVariableLengthKeyStorageSize())) {
+  if ((extra_variable_storage > 0) &&
+      (extra_variable_storage + original_variable_storage_used >
+       key_manager_.getVariableLengthKeyStorageSize())) {
     variable_storage_required += extra_variable_storage;
   }
 
-  const std::size_t resized_memory_required
-      = sizeof(Header)
-        + resized_num_slots * sizeof(std::atomic<std::size_t>)
-        + (resized_num_slots / kHashTableLoadFactor) * bucket_size_
-        + variable_storage_required;
-  const std::size_t resized_storage_slots
-      = this->storage_manager_->SlotsNeededForBytes(resized_memory_required);
+  const std::size_t resized_memory_required =
+      sizeof(Header) + resized_num_slots * sizeof(std::atomic<std::size_t>) +
+      (resized_num_slots / kHashTableLoadFactor) * bucket_size_ +
+      variable_storage_required;
+  const std::size_t resized_storage_slots =
+      this->storage_manager_->SlotsNeededForBytes(resized_memory_required);
   if (resized_storage_slots == 0) {
-    FATAL_ERROR("Storage requirement for resized SeparateChainingHashTable "
-                "exceeds maximum allocation size.");
+    FATAL_ERROR(
+        "Storage requirement for resized SeparateChainingHashTable "
+        "exceeds maximum allocation size.");
   }
 
   // Get a new StorageBlob to hold the resized hash table.
-  const block_id resized_blob_id = this->storage_manager_->createBlob(resized_storage_slots);
-  MutableBlobReference resized_blob = this->storage_manager_->getBlobMutable(resized_blob_id);
+  const block_id resized_blob_id =
+      this->storage_manager_->createBlob(resized_storage_slots);
+  MutableBlobReference resized_blob =
+      this->storage_manager_->getBlobMutable(resized_blob_id);
 
   // Locate data structures inside the new StorageBlob.
   void *aligned_memory_start = resized_blob->getMemoryMutable();
@@ -1212,12 +1366,12 @@ void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allo
   if (align(alignof(Header),
             sizeof(Header),
             aligned_memory_start,
-            available_memory)
-          == nullptr) {
+            available_memory) == nullptr) {
     // Should be impossible, as noted in constructor.
-    FATAL_ERROR("StorageBlob used to hold resized SeparateChainingHashTable "
-                "is too small to meet alignment requirements of "
-                "LinearOpenAddressingHashTable::Header.");
+    FATAL_ERROR(
+        "StorageBlob used to hold resized SeparateChainingHashTable "
+        "is too small to meet alignment requirements of "
+        "LinearOpenAddressingHashTable::Header.");
   } else if (aligned_memory_start != resized_blob->getMemoryMutable()) {
     // Again, should be impossible.
     DEV_WARNING("In SeparateChainingHashTable::resize(), StorageBlob "
@@ -1227,59 +1381,63 @@ void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allo
                 << "LinearOpenAddressingHashTable::Header.");
   }
 
-  Header *resized_header = static_cast<Header*>(aligned_memory_start);
-  aligned_memory_start = static_cast<char*>(aligned_memory_start) + sizeof(Header);
+  Header *resized_header = static_cast<Header *>(aligned_memory_start);
+  aligned_memory_start =
+      static_cast<char *>(aligned_memory_start) + sizeof(Header);
   available_memory -= sizeof(Header);
 
   // As in constructor, recompute the number of slots and buckets using the
   // actual available memory.
-  std::size_t resized_num_buckets
-      = (available_memory - extra_variable_storage)
-        / (kHashTableLoadFactor * sizeof(std::atomic<std::size_t>)
-           + bucket_size_
-           + key_manager_.getEstimatedVariableKeySize());
-  resized_num_slots = get_previous_prime_number(resized_num_buckets * kHashTableLoadFactor);
+  std::size_t resized_num_buckets =
+      (available_memory - extra_variable_storage) /
+      (kHashTableLoadFactor * sizeof(std::atomic<std::size_t>) + bucket_size_ +
+       key_manager_.getEstimatedVariableKeySize());
+  resized_num_slots =
+      get_previous_prime_number(resized_num_buckets * kHashTableLoadFactor);
   resized_num_buckets = resized_num_slots / kHashTableLoadFactor;
 
   // Locate slot array.
-  std::atomic<std::size_t> *resized_slots = static_cast<std::atomic<std::size_t>*>(aligned_memory_start);
-  aligned_memory_start = static_cast<char*>(aligned_memory_start)
-                         + sizeof(std::atomic<std::size_t>) * resized_num_slots;
+  std::atomic<std::size_t> *resized_slots =
+      static_cast<std::atomic<std::size_t> *>(aligned_memory_start);
+  aligned_memory_start = static_cast<char *>(aligned_memory_start) +
+                         sizeof(std::atomic<std::size_t>) * resized_num_slots;
   available_memory -= sizeof(std::atomic<std::size_t>) * resized_num_slots;
 
   // As in constructor, we will be extra paranoid and use align() to locate the
   // start of the array of buckets, as well.
   void *resized_buckets = aligned_memory_start;
-  if (align(kBucketAlignment,
-            bucket_size_,
-            resized_buckets,
-            available_memory)
-          == nullptr) {
-    FATAL_ERROR("StorageBlob used to hold resized SeparateChainingHashTable "
-                "is too small to meet alignment requirements of buckets.");
+  if (align(
+          kBucketAlignment, bucket_size_, resized_buckets, available_memory) ==
+      nullptr) {
+    FATAL_ERROR(
+        "StorageBlob used to hold resized SeparateChainingHashTable "
+        "is too small to meet alignment requirements of buckets.");
   } else if (resized_buckets != aligned_memory_start) {
-    DEV_WARNING("Bucket array start position adjusted to meet alignment "
-                "requirement for SeparateChainingHashTable's value type.");
-    if (resized_num_buckets * bucket_size_ + variable_storage_required > available_memory) {
+    DEV_WARNING(
+        "Bucket array start position adjusted to meet alignment "
+        "requirement for SeparateChainingHashTable's value type.");
+    if (resized_num_buckets * bucket_size_ + variable_storage_required >
+        available_memory) {
       --resized_num_buckets;
     }
   }
-  aligned_memory_start = static_cast<char*>(aligned_memory_start)
-                         + resized_num_buckets * bucket_size_;
+  aligned_memory_start = static_cast<char *>(aligned_memory_start) +
+                         resized_num_buckets * bucket_size_;
   available_memory -= resized_num_buckets * bucket_size_;
 
   void *resized_variable_length_key_storage = aligned_memory_start;
   const std::size_t resized_variable_length_key_storage_size = available_memory;
 
-  const std::size_t original_buckets_used = header_->buckets_allocated.load(std::memory_order_relaxed);
+  const std::size_t original_buckets_used =
+      header_->buckets_allocated.load(std::memory_order_relaxed);
 
   // Initialize the header.
   resized_header->num_slots = resized_num_slots;
   resized_header->num_buckets = resized_num_buckets;
-  resized_header->buckets_allocated.store(original_buckets_used, std::memory_order_relaxed);
+  resized_header->buckets_allocated.store(original_buckets_used,
+                                          std::memory_order_relaxed);
   resized_header->variable_length_bytes_allocated.store(
-      original_variable_storage_used,
-      std::memory_order_relaxed);
+      original_variable_storage_used, std::memory_order_relaxed);
 
   // Bulk-copy buckets. This is safe because:
   //     1. The "next" pointers will be adjusted when rebuilding chains below.
@@ -1298,30 +1456,34 @@ void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allo
   // GCC 4.8.3, so we assume we need to invoke ValueT's copy or move
   // constructor, even though the plain memcpy above could suffice for many
   // possible ValueTs.
-  void *current_value_original = static_cast<char*>(buckets_) + kValueOffset;
-  void *current_value_resized = static_cast<char*>(resized_buckets) + kValueOffset;
-  for (std::size_t bucket_num = 0; bucket_num < original_buckets_used; ++bucket_num) {
+  void *current_value_original = static_cast<char *>(buckets_) + kValueOffset;
+  void *current_value_resized =
+      static_cast<char *>(resized_buckets) + kValueOffset;
+  for (std::size_t bucket_num = 0; bucket_num < original_buckets_used;
+       ++bucket_num) {
     // Use a move constructor if available to avoid a deep-copy, since resizes
     // always succeed.
-    new (current_value_resized) uint8_t(std::move(*static_cast<uint8_t*>(current_value_original)));
-    current_value_original = static_cast<char*>(current_value_original) + bucket_size_;
-    current_value_resized = static_cast<char*>(current_value_resized) + bucket_size_;
+    new (current_value_resized) std::uint8_t(
+        std::move(*static_cast<std::uint8_t *>(current_value_original)));
+    current_value_original =
+        static_cast<char *>(current_value_original) + bucket_size_;
+    current_value_resized =
+        static_cast<char *>(current_value_resized) + bucket_size_;
   }
 
   // Copy over variable-length key components, if any.
   if (original_variable_storage_used > 0) {
-    DEBUG_ASSERT(original_variable_storage_used
-                 == key_manager_.getNextVariableLengthKeyOffset());
-    DEBUG_ASSERT(original_variable_storage_used <= resized_variable_length_key_storage_size);
+    DEBUG_ASSERT(original_variable_storage_used ==
+                 key_manager_.getNextVariableLengthKeyOffset());
+    DEBUG_ASSERT(original_variable_storage_used <=
+                 resized_variable_length_key_storage_size);
     std::memcpy(resized_variable_length_key_storage,
                 key_manager_.getVariableLengthKeyStorage(),
                 original_variable_storage_used);
   }
 
   // Destroy values in the original hash table, if neccesary,
-  DestroyValues(buckets_,
-                original_buckets_used,
-                bucket_size_);
+  DestroyValues(buckets_, original_buckets_used, bucket_size_);
 
   // Make resized structures active.
   std::swap(this->blob_, resized_blob);
@@ -1340,17 +1502,18 @@ void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allo
 
   // Rebuild chains.
   void *current_bucket = buckets_;
-  for (std::size_t bucket_num = 0; bucket_num < original_buckets_used; ++bucket_num) {
-    std::atomic<std::size_t> *next_ptr
-        = static_cast<std::atomic<std::size_t>*>(current_bucket);
-    const std::size_t hash_code = *reinterpret_cast<const std::size_t*>(
-        static_cast<const char*>(current_bucket) + sizeof(std::atomic<std::size_t>));
+  for (std::size_t bucket_num = 0; bucket_num < original_buckets_used;
+       ++bucket_num) {
+    std::atomic<std::size_t> *next_ptr =
+        static_cast<std::atomic<std::size_t> *>(current_bucket);
+    const std::size_t hash_code = *reinterpret_cast<const std::size_t *>(
+        static_cast<const char *>(current_bucket) +
+        sizeof(std::atomic<std::size_t>));
 
     const std::size_t slot_number = hash_code % header_->num_slots;
     std::size_t slot_ptr_value = 0;
-    if (slots_[slot_number].compare_exchange_strong(slot_ptr_value,
-                                                    bucket_num + 1,
-                                                    std::memory_order_relaxed)) {
+    if (slots_[slot_number].compare_exchange_strong(
+            slot_ptr_value, bucket_num + 1, std::memory_order_relaxed)) {
       // This bucket is the first in the chain for this block, so reset its
       // next pointer to 0.
       next_ptr->store(0, std::memory_order_relaxed);
@@ -1360,7 +1523,7 @@ void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allo
       next_ptr->store(slot_ptr_value, std::memory_order_relaxed);
       slots_[slot_number].store(bucket_num + 1, std::memory_order_relaxed);
     }
-    current_bucket = static_cast<char*>(current_bucket) + bucket_size_;
+    current_bucket = static_cast<char *>(current_bucket) + bucket_size_;
   }
 }
 
@@ -1368,10 +1531,13 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::preallocateForBulkInsert(const std::size_t total_entries,
-                               const std::size_t total_variable_key_size,
-                               HashTablePreallocationState *prealloc_state) {
+bool FastSeparateChainingHashTable<resizable,
+                                   serializable,
+                                   force_key_copy,
+                                   allow_duplicate_keys>::
+    preallocateForBulkInsert(const std::size_t total_entries,
+                             const std::size_t total_variable_key_size,
+                             HashTablePreallocationState *prealloc_state) {
   DEBUG_ASSERT(allow_duplicate_keys);
   if (!key_manager_.allocateVariableLengthKeyStorage(total_variable_key_size)) {
     return false;
@@ -1382,12 +1548,15 @@ bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allo
   // than one bucket and exceed 'header_->num_buckets', their respective
   // rollbacks might happen in such an order that some bucket ranges get
   // skipped, while others might get double-allocated later.
-  std::size_t original_buckets_allocated = header_->buckets_allocated.load(std::memory_order_relaxed);
-  std::size_t buckets_post_allocation = original_buckets_allocated + total_entries;
-  while ((buckets_post_allocation <= header_->num_buckets)
-         && !header_->buckets_allocated.compare_exchange_weak(original_buckets_allocated,
-                                                              buckets_post_allocation,
-                                                              std::memory_order_relaxed)) {
+  std::size_t original_buckets_allocated =
+      header_->buckets_allocated.load(std::memory_order_relaxed);
+  std::size_t buckets_post_allocation =
+      original_buckets_allocated + total_entries;
+  while ((buckets_post_allocation <= header_->num_buckets) &&
+         !header_->buckets_allocated.compare_exchange_weak(
+             original_buckets_allocated,
+             buckets_post_allocation,
+             std::memory_order_relaxed)) {
     buckets_post_allocation = original_buckets_allocated + total_entries;
   }
 
@@ -1398,8 +1567,9 @@ bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allo
 
   prealloc_state->bucket_position = original_buckets_allocated;
   if (total_variable_key_size != 0) {
-    prealloc_state->variable_length_key_position
-        = key_manager_.incrementNextVariableLengthKeyOffset(total_variable_key_size);
+    prealloc_state->variable_length_key_position =
+        key_manager_.incrementNextVariableLengthKeyOffset(
+            total_variable_key_size);
   }
   return true;
 }
@@ -1408,17 +1578,18 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::DestroyValues(void *hash_buckets,
-                    const std::size_t num_buckets,
-                    const std::size_t bucket_size) {
-  if (!std::is_trivially_destructible<uint8_t>::value) {
-    void *value_ptr = static_cast<char*>(hash_buckets) + kValueOffset;
-    for (std::size_t bucket_num = 0;
-         bucket_num < num_buckets;
-         ++bucket_num) {
-     static_cast<uint8_t*>(value_ptr)->~uint8_t();
-      value_ptr = static_cast<char*>(value_ptr) + bucket_size;
+void FastSeparateChainingHashTable<
+    resizable,
+    serializable,
+    force_key_copy,
+    allow_duplicate_keys>::DestroyValues(void *hash_buckets,
+                                         const std::size_t num_buckets,
+                                         const std::size_t bucket_size) {
+  if (!std::is_trivially_destructible<std::uint8_t>::value) {
+    void *value_ptr = static_cast<char *>(hash_buckets) + kValueOffset;
+    for (std::size_t bucket_num = 0; bucket_num < num_buckets; ++bucket_num) {
+      static_cast<std::uint8_t *>(value_ptr)->~uint8_t();
+      value_ptr = static_cast<char *>(value_ptr) + bucket_size;
     }
   }
 }
@@ -1427,39 +1598,45 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-inline bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::locateBucketForInsertion(const std::size_t hash_code,
-                               const std::size_t variable_key_allocation_required,
-                               void **bucket,
-                               std::atomic<std::size_t> **pending_chain_ptr,
-                               std::size_t *pending_chain_ptr_finish_value,
-                               HashTablePreallocationState *prealloc_state) {
+inline bool FastSeparateChainingHashTable<resizable,
+                                          serializable,
+                                          force_key_copy,
+                                          allow_duplicate_keys>::
+    locateBucketForInsertion(const std::size_t hash_code,
+                             const std::size_t variable_key_allocation_required,
+                             void **bucket,
+                             std::atomic<std::size_t> **pending_chain_ptr,
+                             std::size_t *pending_chain_ptr_finish_value,
+                             HashTablePreallocationState *prealloc_state) {
   DEBUG_ASSERT((prealloc_state == nullptr) || allow_duplicate_keys);
   if (*bucket == nullptr) {
     *pending_chain_ptr = &(slots_[hash_code % header_->num_slots]);
   } else {
-    *pending_chain_ptr = static_cast<std::atomic<std::size_t>*>(*bucket);
+    *pending_chain_ptr = static_cast<std::atomic<std::size_t> *>(*bucket);
   }
   for (;;) {
     std::size_t existing_chain_ptr = 0;
-    if ((*pending_chain_ptr)->compare_exchange_strong(existing_chain_ptr,
-                        

<TRUNCATED>


[6/7] incubator-quickstep git commit: Modified Aggregation unit test. Ran clang-format.

Posted by ra...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/expressions/aggregation/AggregationHandleMin.hpp
----------------------------------------------------------------------
diff --git a/expressions/aggregation/AggregationHandleMin.hpp b/expressions/aggregation/AggregationHandleMin.hpp
index 119102b..cb46189 100644
--- a/expressions/aggregation/AggregationHandleMin.hpp
+++ b/expressions/aggregation/AggregationHandleMin.hpp
@@ -28,8 +28,8 @@
 #include "catalog/CatalogTypedefs.hpp"
 #include "expressions/aggregation/AggregationConcreteHandle.hpp"
 #include "expressions/aggregation/AggregationHandle.hpp"
-#include "storage/HashTableBase.hpp"
 #include "storage/FastHashTable.hpp"
+#include "storage/HashTableBase.hpp"
 #include "threading/SpinMutex.hpp"
 #include "types/Type.hpp"
 #include "types/TypedValue.hpp"
@@ -56,19 +56,18 @@ class AggregationStateMin : public AggregationState {
   /**
    * @brief Copy constructor (ignores mutex).
    */
-  AggregationStateMin(const AggregationStateMin &orig)
-      : min_(orig.min_) {
-  }
+  AggregationStateMin(const AggregationStateMin &orig) : min_(orig.min_) {}
 
   /**
    * @brief Destructor.
    */
   ~AggregationStateMin() override {}
 
-  size_t getPayloadSize() const {
-     return sizeof(TypedValue);
-  }
+  std::size_t getPayloadSize() const { return sizeof(TypedValue); }
 
+  const std::uint8_t *getPayloadAddress() const {
+    return reinterpret_cast<const uint8_t *>(&min_);
+  }
 
  private:
   friend class AggregationHandleMin;
@@ -76,9 +75,7 @@ class AggregationStateMin : public AggregationState {
   explicit AggregationStateMin(const Type &type)
       : min_(type.getNullableVersion().makeNullValue()) {}
 
-  explicit AggregationStateMin(TypedValue &&value)
-      : min_(std::move(value)) {
-  }
+  explicit AggregationStateMin(TypedValue &&value) : min_(std::move(value)) {}
 
   TypedValue min_;
   SpinMutex mutex_;
@@ -89,8 +86,7 @@ class AggregationStateMin : public AggregationState {
  **/
 class AggregationHandleMin : public AggregationConcreteHandle {
  public:
-  ~AggregationHandleMin() override {
-  }
+  ~AggregationHandleMin() override {}
 
   AggregationState* createInitialState() const override {
     return new AggregationStateMin(type_);
@@ -98,45 +94,46 @@ class AggregationHandleMin : public AggregationConcreteHandle {
 
   AggregationStateHashTableBase* createGroupByHashTable(
       const HashTableImplType hash_table_impl,
-      const std::vector<const Type*> &group_by_types,
+      const std::vector<const Type *> &group_by_types,
       const std::size_t estimated_num_groups,
       StorageManager *storage_manager) const override;
 
   /**
    * @brief Iterate with min aggregation state.
    */
-  inline void iterateUnaryInl(AggregationStateMin *state, const TypedValue &value) const {
+  inline void iterateUnaryInl(AggregationStateMin *state,
+                              const TypedValue &value) const {
     DCHECK(value.isPlausibleInstanceOf(type_.getSignature()));
     compareAndUpdate(state, value);
   }
 
-  inline void iterateUnaryInlFast(const TypedValue &value, uint8_t *byte_ptr) const {
-      DCHECK(value.isPlausibleInstanceOf(type_.getSignature()));
-      TypedValue *min_ptr = reinterpret_cast<TypedValue *>(byte_ptr);
-      compareAndUpdateFast(min_ptr, value);
+  inline void iterateUnaryInlFast(const TypedValue &value,
+                                  std::uint8_t *byte_ptr) const {
+    DCHECK(value.isPlausibleInstanceOf(type_.getSignature()));
+    TypedValue *min_ptr = reinterpret_cast<TypedValue *>(byte_ptr);
+    compareAndUpdateFast(min_ptr, value);
   }
 
-  inline void iterateInlFast(const std::vector<TypedValue> &arguments, uint8_t *byte_ptr) const override {
-    if (block_update) return;
-    iterateUnaryInlFast(arguments.front(), byte_ptr);
+  inline void updateState(const std::vector<TypedValue> &arguments,
+                          std::uint8_t *byte_ptr) const override {
+    if (!block_update_) {
+      iterateUnaryInlFast(arguments.front(), byte_ptr);
+    }
   }
 
-  void BlockUpdate() override {
-      block_update = true;
-  }
+  void blockUpdate() override { block_update_ = true; }
 
-  void AllowUpdate() override {
-      block_update = false;
-  }
+  void allowUpdate() override { block_update_ = false; }
 
-  void initPayload(uint8_t *byte_ptr) const override {
+  void initPayload(std::uint8_t *byte_ptr) const override {
     TypedValue *min_ptr = reinterpret_cast<TypedValue *>(byte_ptr);
     TypedValue t1 = (type_.getNullableVersion().makeNullValue());
     *min_ptr = t1;
   }
 
   AggregationState* accumulateColumnVectors(
-      const std::vector<std::unique_ptr<ColumnVector>> &column_vectors) const override;
+      const std::vector<std::unique_ptr<ColumnVector>> &column_vectors)
+      const override;
 
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
   AggregationState* accumulateValueAccessor(
@@ -153,18 +150,20 @@ class AggregationHandleMin : public AggregationConcreteHandle {
   void mergeStates(const AggregationState &source,
                    AggregationState *destination) const override;
 
-  void mergeStatesFast(const uint8_t *source,
-                   uint8_t *destination) const override;
+  void mergeStatesFast(const std::uint8_t *source,
+                       std::uint8_t *destination) const override;
 
   TypedValue finalize(const AggregationState &state) const override {
-    return static_cast<const AggregationStateMin&>(state).min_;
+    return static_cast<const AggregationStateMin &>(state).min_;
   }
 
-  inline TypedValue finalizeHashTableEntry(const AggregationState &state) const {
-    return static_cast<const AggregationStateMin&>(state).min_;
+  inline TypedValue finalizeHashTableEntry(
+      const AggregationState &state) const {
+    return static_cast<const AggregationStateMin &>(state).min_;
   }
 
-  inline TypedValue finalizeHashTableEntryFast(const std::uint8_t *byte_ptr) const {
+  inline TypedValue finalizeHashTableEntryFast(
+      const std::uint8_t *byte_ptr) const {
     const TypedValue *min_ptr = reinterpret_cast<const TypedValue *>(byte_ptr);
     return TypedValue(*min_ptr);
   }
@@ -175,24 +174,25 @@ class AggregationHandleMin : public AggregationConcreteHandle {
       int index) const override;
 
   /**
-   * @brief Implementation of AggregationHandle::aggregateOnDistinctifyHashTableForSingle()
+   * @brief Implementation of
+   * AggregationHandle::aggregateOnDistinctifyHashTableForSingle()
    *        for MIN aggregation.
    */
   AggregationState* aggregateOnDistinctifyHashTableForSingle(
-      const AggregationStateHashTableBase &distinctify_hash_table) const override;
+      const AggregationStateHashTableBase &distinctify_hash_table)
+      const override;
 
   /**
-   * @brief Implementation of AggregationHandle::aggregateOnDistinctifyHashTableForGroupBy()
+   * @brief Implementation of
+   * AggregationHandle::aggregateOnDistinctifyHashTableForGroupBy()
    *        for MIN aggregation.
    */
   void aggregateOnDistinctifyHashTableForGroupBy(
       const AggregationStateHashTableBase &distinctify_hash_table,
       AggregationStateHashTableBase *aggregation_hash_table,
-      int index) const override;
+      std::size_t index) const override;
 
-  size_t getPayloadSize() const override {
-      return sizeof(TypedValue);
-  }
+  std::size_t getPayloadSize() const override { return sizeof(TypedValue); }
 
  private:
   friend class AggregateFunctionMin;
@@ -205,23 +205,28 @@ class AggregationHandleMin : public AggregationConcreteHandle {
   explicit AggregationHandleMin(const Type &type);
 
   /**
-   * @brief compare the value with min_ and update it if the value is smaller than
+   * @brief compare the value with min_ and update it if the value is smaller
+   *than
    *        current minimum. NULLs are ignored.
    *
    * @param value A TypedValue to compare.
    **/
-  inline void compareAndUpdate(AggregationStateMin *state, const TypedValue &value) const {
+  inline void compareAndUpdate(AggregationStateMin *state,
+                               const TypedValue &value) const {
     if (value.isNull()) return;
 
     SpinMutexLock lock(state->mutex_);
-    if (state->min_.isNull() || fast_comparator_->compareTypedValues(value, state->min_)) {
+    if (state->min_.isNull() ||
+        fast_comparator_->compareTypedValues(value, state->min_)) {
       state->min_ = value;
     }
   }
 
-  inline void compareAndUpdateFast(TypedValue *min_ptr, const TypedValue &value) const {
+  inline void compareAndUpdateFast(TypedValue *min_ptr,
+                                   const TypedValue &value) const {
     if (value.isNull()) return;
-    if (min_ptr->isNull() || fast_comparator_->compareTypedValues(value, *min_ptr)) {
+    if (min_ptr->isNull() ||
+        fast_comparator_->compareTypedValues(value, *min_ptr)) {
       *min_ptr = value;
     }
   }
@@ -229,7 +234,7 @@ class AggregationHandleMin : public AggregationConcreteHandle {
   const Type &type_;
   std::unique_ptr<UncheckedComparator> fast_comparator_;
 
-  bool block_update;
+  bool block_update_;
 
   DISALLOW_COPY_AND_ASSIGN(AggregationHandleMin);
 };

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/expressions/aggregation/AggregationHandleSum.cpp
----------------------------------------------------------------------
diff --git a/expressions/aggregation/AggregationHandleSum.cpp b/expressions/aggregation/AggregationHandleSum.cpp
index e625fb1..e981200 100644
--- a/expressions/aggregation/AggregationHandleSum.cpp
+++ b/expressions/aggregation/AggregationHandleSum.cpp
@@ -43,7 +43,7 @@ namespace quickstep {
 class StorageManager;
 
 AggregationHandleSum::AggregationHandleSum(const Type &type)
-    : argument_type_(type), block_update(false) {
+    : argument_type_(type), block_update_(false) {
   // We sum Int as Long and Float as Double so that we have more headroom when
   // adding many values.
   TypeID type_precision_id;
@@ -66,11 +66,13 @@ AggregationHandleSum::AggregationHandleSum(const Type &type)
 
   // Make operators to do arithmetic:
   // Add operator for summing argument values.
-  fast_operator_.reset(BinaryOperationFactory::GetBinaryOperation(BinaryOperationID::kAdd)
-                       .makeUncheckedBinaryOperatorForTypes(sum_type, argument_type_));
+  fast_operator_.reset(
+      BinaryOperationFactory::GetBinaryOperation(BinaryOperationID::kAdd)
+          .makeUncheckedBinaryOperatorForTypes(sum_type, argument_type_));
   // Add operator for merging states.
-  merge_operator_.reset(BinaryOperationFactory::GetBinaryOperation(BinaryOperationID::kAdd)
-                        .makeUncheckedBinaryOperatorForTypes(sum_type, sum_type));
+  merge_operator_.reset(
+      BinaryOperationFactory::GetBinaryOperation(BinaryOperationID::kAdd)
+          .makeUncheckedBinaryOperatorForTypes(sum_type, sum_type));
 
   // Result is nullable, because SUM() over 0 values (or all NULL values) is
   // NULL.
@@ -79,14 +81,11 @@ AggregationHandleSum::AggregationHandleSum(const Type &type)
 
 AggregationStateHashTableBase* AggregationHandleSum::createGroupByHashTable(
     const HashTableImplType hash_table_impl,
-    const std::vector<const Type*> &group_by_types,
+    const std::vector<const Type *> &group_by_types,
     const std::size_t estimated_num_groups,
     StorageManager *storage_manager) const {
   return AggregationStateHashTableFactory<AggregationStateSum>::CreateResizable(
-      hash_table_impl,
-      group_by_types,
-      estimated_num_groups,
-      storage_manager);
+      hash_table_impl, group_by_types, estimated_num_groups, storage_manager);
 }
 
 AggregationState* AggregationHandleSum::accumulateColumnVectors(
@@ -95,9 +94,7 @@ AggregationState* AggregationHandleSum::accumulateColumnVectors(
       << "Got wrong number of ColumnVectors for SUM: " << column_vectors.size();
   std::size_t num_tuples = 0;
   TypedValue cv_sum = fast_operator_->accumulateColumnVector(
-      blank_state_.sum_,
-      *column_vectors.front(),
-      &num_tuples);
+      blank_state_.sum_, *column_vectors.front(), &num_tuples);
   return new AggregationStateSum(std::move(cv_sum), num_tuples == 0);
 }
 
@@ -110,10 +107,7 @@ AggregationState* AggregationHandleSum::accumulateValueAccessor(
 
   std::size_t num_tuples = 0;
   TypedValue va_sum = fast_operator_->accumulateValueAccessor(
-      blank_state_.sum_,
-      accessor,
-      accessor_ids.front(),
-      &num_tuples);
+      blank_state_.sum_, accessor, accessor_ids.front(), &num_tuples);
   return new AggregationStateSum(std::move(va_sum), num_tuples == 0);
 }
 #endif
@@ -127,31 +121,37 @@ void AggregationHandleSum::aggregateValueAccessorIntoHashTable(
       << "Got wrong number of arguments for SUM: " << argument_ids.size();
 }
 
-void AggregationHandleSum::mergeStates(
-    const AggregationState &source,
-    AggregationState *destination) const {
-  const AggregationStateSum &sum_source = static_cast<const AggregationStateSum&>(source);
-  AggregationStateSum *sum_destination = static_cast<AggregationStateSum*>(destination);
+void AggregationHandleSum::mergeStates(const AggregationState &source,
+                                       AggregationState *destination) const {
+  const AggregationStateSum &sum_source =
+      static_cast<const AggregationStateSum &>(source);
+  AggregationStateSum *sum_destination =
+      static_cast<AggregationStateSum *>(destination);
 
   SpinMutexLock lock(sum_destination->mutex_);
-  sum_destination->sum_ = merge_operator_->applyToTypedValues(sum_destination->sum_,
-                                                              sum_source.sum_);
+  sum_destination->sum_ = merge_operator_->applyToTypedValues(
+      sum_destination->sum_, sum_source.sum_);
   sum_destination->null_ = sum_destination->null_ && sum_source.null_;
 }
 
-void AggregationHandleSum::mergeStatesFast(
-    const uint8_t *source,
-    uint8_t *destination) const {
-    const TypedValue *src_sum_ptr = reinterpret_cast<const TypedValue *>(source+blank_state_.sum_offset);
-    const bool *src_null_ptr = reinterpret_cast<const bool *>(source+blank_state_.null_offset);
-    TypedValue *dst_sum_ptr = reinterpret_cast<TypedValue *>(destination+blank_state_.sum_offset);
-    bool *dst_null_ptr = reinterpret_cast<bool *>(destination+blank_state_.null_offset);
-    *dst_sum_ptr = merge_operator_->applyToTypedValues(*dst_sum_ptr, *src_sum_ptr);
-    *dst_null_ptr = (*dst_null_ptr) && (*src_null_ptr);
+void AggregationHandleSum::mergeStatesFast(const std::uint8_t *source,
+                                           std::uint8_t *destination) const {
+  const TypedValue *src_sum_ptr =
+      reinterpret_cast<const TypedValue *>(source + blank_state_.sum_offset_);
+  const bool *src_null_ptr =
+      reinterpret_cast<const bool *>(source + blank_state_.null_offset_);
+  TypedValue *dst_sum_ptr =
+      reinterpret_cast<TypedValue *>(destination + blank_state_.sum_offset_);
+  bool *dst_null_ptr =
+      reinterpret_cast<bool *>(destination + blank_state_.null_offset_);
+  *dst_sum_ptr =
+      merge_operator_->applyToTypedValues(*dst_sum_ptr, *src_sum_ptr);
+  *dst_null_ptr = (*dst_null_ptr) && (*src_null_ptr);
 }
 
 TypedValue AggregationHandleSum::finalize(const AggregationState &state) const {
-  const AggregationStateSum &agg_state = static_cast<const AggregationStateSum&>(state);
+  const AggregationStateSum &agg_state =
+      static_cast<const AggregationStateSum &>(state);
   if (agg_state.null_) {
     // SUM() over no values is NULL.
     return result_type_->makeNullValue();
@@ -165,31 +165,26 @@ ColumnVector* AggregationHandleSum::finalizeHashTable(
     std::vector<std::vector<TypedValue>> *group_by_keys,
     int index) const {
   return finalizeHashTableHelperFast<AggregationHandleSum,
-                                 AggregationStateFastHashTable>(
-      *result_type_,
-      hash_table,
-      group_by_keys,
-      index);
+                                     AggregationStateFastHashTable>(
+      *result_type_, hash_table, group_by_keys, index);
 }
 
-AggregationState* AggregationHandleSum::aggregateOnDistinctifyHashTableForSingle(
+AggregationState*
+AggregationHandleSum::aggregateOnDistinctifyHashTableForSingle(
     const AggregationStateHashTableBase &distinctify_hash_table) const {
   return aggregateOnDistinctifyHashTableForSingleUnaryHelperFast<
       AggregationHandleSum,
-      AggregationStateSum>(
-          distinctify_hash_table);
+      AggregationStateSum>(distinctify_hash_table);
 }
 
 void AggregationHandleSum::aggregateOnDistinctifyHashTableForGroupBy(
     const AggregationStateHashTableBase &distinctify_hash_table,
     AggregationStateHashTableBase *aggregation_hash_table,
-    int index) const {
+    std::size_t index) const {
   aggregateOnDistinctifyHashTableForGroupByUnaryHelperFast<
       AggregationHandleSum,
       AggregationStateFastHashTable>(
-          distinctify_hash_table,
-          aggregation_hash_table,
-          index);
+      distinctify_hash_table, aggregation_hash_table, index);
 }
 
 }  // namespace quickstep

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/expressions/aggregation/AggregationHandleSum.hpp
----------------------------------------------------------------------
diff --git a/expressions/aggregation/AggregationHandleSum.hpp b/expressions/aggregation/AggregationHandleSum.hpp
index bf0eab0..35b9f48 100644
--- a/expressions/aggregation/AggregationHandleSum.hpp
+++ b/expressions/aggregation/AggregationHandleSum.hpp
@@ -28,8 +28,8 @@
 #include "catalog/CatalogTypedefs.hpp"
 #include "expressions/aggregation/AggregationConcreteHandle.hpp"
 #include "expressions/aggregation/AggregationHandle.hpp"
-#include "storage/HashTableBase.hpp"
 #include "storage/FastHashTable.hpp"
+#include "storage/HashTableBase.hpp"
 #include "threading/SpinMutex.hpp"
 #include "types/Type.hpp"
 #include "types/TypedValue.hpp"
@@ -59,27 +59,31 @@ class AggregationStateSum : public AggregationState {
   AggregationStateSum(const AggregationStateSum &orig)
       : sum_(orig.sum_),
         null_(orig.null_),
-        sum_offset(orig.sum_offset),
-        null_offset(orig.null_offset) {
+        sum_offset_(orig.sum_offset_),
+        null_offset_(orig.null_offset_) {}
+
+  std::size_t getPayloadSize() const {
+    std::size_t p1 = reinterpret_cast<std::size_t>(&sum_);
+    std::size_t p2 = reinterpret_cast<std::size_t>(&mutex_);
+    return (p2 - p1);
+  }
+
+  const std::uint8_t* getPayloadAddress() const {
+    return reinterpret_cast<const uint8_t *>(&sum_);
   }
 
  private:
   friend class AggregationHandleSum;
 
   AggregationStateSum()
-      : sum_(0), null_(true), sum_offset(0),
-        null_offset(reinterpret_cast<uint8_t *>(&null_)-reinterpret_cast<uint8_t *>(&sum_)) {
-  }
+      : sum_(0),
+        null_(true),
+        sum_offset_(0),
+        null_offset_(reinterpret_cast<std::uint8_t *>(&null_) -
+                     reinterpret_cast<std::uint8_t *>(&sum_)) {}
 
   AggregationStateSum(TypedValue &&sum, const bool is_null)
-      : sum_(std::move(sum)), null_(is_null) {
-  }
-
-  size_t getPayloadSize() const {
-     size_t p1 = reinterpret_cast<size_t>(&sum_);
-     size_t p2 = reinterpret_cast<size_t>(&mutex_);
-     return (p2-p1);
-  }
+      : sum_(std::move(sum)), null_(is_null) {}
 
   // TODO(shoban): We might want to specialize sum_ to use atomics for int types
   // similar to in AggregationStateCount.
@@ -87,17 +91,15 @@ class AggregationStateSum : public AggregationState {
   bool null_;
   SpinMutex mutex_;
 
-  int sum_offset, null_offset;
+  int sum_offset_, null_offset_;
 };
 
-
 /**
  * @brief An aggregationhandle for sum.
  **/
 class AggregationHandleSum : public AggregationConcreteHandle {
  public:
-  ~AggregationHandleSum() override {
-  }
+  ~AggregationHandleSum() override {}
 
   AggregationState* createInitialState() const override {
     return new AggregationStateSum(blank_state_);
@@ -105,11 +107,12 @@ class AggregationHandleSum : public AggregationConcreteHandle {
 
   AggregationStateHashTableBase* createGroupByHashTable(
       const HashTableImplType hash_table_impl,
-      const std::vector<const Type*> &group_by_types,
+      const std::vector<const Type *> &group_by_types,
       const std::size_t estimated_num_groups,
       StorageManager *storage_manager) const override;
 
-  inline void iterateUnaryInl(AggregationStateSum *state, const TypedValue &value) const {
+  inline void iterateUnaryInl(AggregationStateSum *state,
+                              const TypedValue &value) const {
     DCHECK(value.isPlausibleInstanceOf(argument_type_.getSignature()));
     if (value.isNull()) return;
 
@@ -118,37 +121,41 @@ class AggregationHandleSum : public AggregationConcreteHandle {
     state->null_ = false;
   }
 
-  inline void iterateUnaryInlFast(const TypedValue &value, uint8_t *byte_ptr) const {
+  inline void iterateUnaryInlFast(const TypedValue &value,
+                                  std::uint8_t *byte_ptr) const {
     DCHECK(value.isPlausibleInstanceOf(argument_type_.getSignature()));
     if (value.isNull()) return;
-    TypedValue *sum_ptr = reinterpret_cast<TypedValue *>(byte_ptr + blank_state_.sum_offset);
-    bool *null_ptr = reinterpret_cast<bool *>(byte_ptr + blank_state_.null_offset);
+    TypedValue *sum_ptr =
+        reinterpret_cast<TypedValue *>(byte_ptr + blank_state_.sum_offset_);
+    bool *null_ptr =
+        reinterpret_cast<bool *>(byte_ptr + blank_state_.null_offset_);
     *sum_ptr = fast_operator_->applyToTypedValues(*sum_ptr, value);
     *null_ptr = false;
   }
 
-  inline void iterateInlFast(const std::vector<TypedValue> &arguments, uint8_t *byte_ptr) const override {
-     if (block_update) return;
-     iterateUnaryInlFast(arguments.front(), byte_ptr);
+  inline void updateState(const std::vector<TypedValue> &arguments,
+                          std::uint8_t *byte_ptr) const override {
+    if (!block_update_) {
+      iterateUnaryInlFast(arguments.front(), byte_ptr);
+    }
   }
 
-  void BlockUpdate() override {
-      block_update = true;
-  }
+  void blockUpdate() override { block_update_ = true; }
 
-  void AllowUpdate() override {
-      block_update = false;
-  }
+  void allowUpdate() override { block_update_ = false; }
 
-  void initPayload(uint8_t *byte_ptr) const override {
-    TypedValue *sum_ptr = reinterpret_cast<TypedValue *>(byte_ptr + blank_state_.sum_offset);
-    bool *null_ptr = reinterpret_cast<bool *>(byte_ptr + blank_state_.null_offset);
+  void initPayload(std::uint8_t *byte_ptr) const override {
+    TypedValue *sum_ptr =
+        reinterpret_cast<TypedValue *>(byte_ptr + blank_state_.sum_offset_);
+    bool *null_ptr =
+        reinterpret_cast<bool *>(byte_ptr + blank_state_.null_offset_);
     *sum_ptr = blank_state_.sum_;
     *null_ptr = true;
   }
 
   AggregationState* accumulateColumnVectors(
-      const std::vector<std::unique_ptr<ColumnVector>> &column_vectors) const override;
+      const std::vector<std::unique_ptr<ColumnVector>> &column_vectors)
+      const override;
 
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
   AggregationState* accumulateValueAccessor(
@@ -165,18 +172,21 @@ class AggregationHandleSum : public AggregationConcreteHandle {
   void mergeStates(const AggregationState &source,
                    AggregationState *destination) const override;
 
-  void mergeStatesFast(const uint8_t *source,
-                   uint8_t *destination) const override;
+  void mergeStatesFast(const std::uint8_t *source,
+                       std::uint8_t *destination) const override;
 
   TypedValue finalize(const AggregationState &state) const override;
 
-  inline TypedValue finalizeHashTableEntry(const AggregationState &state) const {
-    return static_cast<const AggregationStateSum&>(state).sum_;
+  inline TypedValue finalizeHashTableEntry(
+      const AggregationState &state) const {
+    return static_cast<const AggregationStateSum &>(state).sum_;
   }
 
-  inline TypedValue finalizeHashTableEntryFast(const uint8_t *byte_ptr) const {
-    uint8_t *value_ptr = const_cast<uint8_t*>(byte_ptr);
-    TypedValue *sum_ptr = reinterpret_cast<TypedValue *>(value_ptr + blank_state_.sum_offset);
+  inline TypedValue finalizeHashTableEntryFast(
+      const std::uint8_t *byte_ptr) const {
+    std::uint8_t *value_ptr = const_cast<std::uint8_t *>(byte_ptr);
+    TypedValue *sum_ptr =
+        reinterpret_cast<TypedValue *>(value_ptr + blank_state_.sum_offset_);
     return *sum_ptr;
   }
 
@@ -186,23 +196,26 @@ class AggregationHandleSum : public AggregationConcreteHandle {
       int index) const override;
 
   /**
-   * @brief Implementation of AggregationHandle::aggregateOnDistinctifyHashTableForSingle()
+   * @brief Implementation of
+   * AggregationHandle::aggregateOnDistinctifyHashTableForSingle()
    *        for SUM aggregation.
    */
   AggregationState* aggregateOnDistinctifyHashTableForSingle(
-      const AggregationStateHashTableBase &distinctify_hash_table) const override;
+      const AggregationStateHashTableBase &distinctify_hash_table)
+      const override;
 
   /**
-   * @brief Implementation of AggregationHandle::aggregateOnDistinctifyHashTableForGroupBy()
+   * @brief Implementation of
+   * AggregationHandle::aggregateOnDistinctifyHashTableForGroupBy()
    *        for SUM aggregation.
    */
   void aggregateOnDistinctifyHashTableForGroupBy(
       const AggregationStateHashTableBase &distinctify_hash_table,
       AggregationStateHashTableBase *aggregation_hash_table,
-      int index) const override;
+      std::size_t index) const override;
 
-  size_t getPayloadSize() const override {
-      return blank_state_.getPayloadSize();
+  std::size_t getPayloadSize() const override {
+    return blank_state_.getPayloadSize();
   }
 
  private:
@@ -221,7 +234,7 @@ class AggregationHandleSum : public AggregationConcreteHandle {
   std::unique_ptr<UncheckedBinaryOperator> fast_operator_;
   std::unique_ptr<UncheckedBinaryOperator> merge_operator_;
 
-  bool block_update;
+  bool block_update_;
 
   DISALLOW_COPY_AND_ASSIGN(AggregationHandleSum);
 };

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/expressions/aggregation/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/expressions/aggregation/CMakeLists.txt b/expressions/aggregation/CMakeLists.txt
index 33ce6e9..8e3a912 100644
--- a/expressions/aggregation/CMakeLists.txt
+++ b/expressions/aggregation/CMakeLists.txt
@@ -278,45 +278,46 @@ target_link_libraries(quickstep_expressions_aggregation
 # Tests:
 
 # Unified executable to ammortize cost of linking.
-# add_executable(AggregationHandle_tests
-#               "${CMAKE_CURRENT_SOURCE_DIR}/tests/AggregationHandleAvg_unittest.cpp"
-#               "${CMAKE_CURRENT_SOURCE_DIR}/tests/AggregationHandleCount_unittest.cpp"
-#               "${CMAKE_CURRENT_SOURCE_DIR}/tests/AggregationHandleMax_unittest.cpp"
-#               "${CMAKE_CURRENT_SOURCE_DIR}/tests/AggregationHandleMin_unittest.cpp"
-#               "${CMAKE_CURRENT_SOURCE_DIR}/tests/AggregationHandleSum_unittest.cpp")
-# target_link_libraries(AggregationHandle_tests
-#                      gtest
-#                      gtest_main
-#                      quickstep_catalog_CatalogTypedefs
-#                      quickstep_expressions_aggregation_AggregateFunction
-#                      quickstep_expressions_aggregation_AggregateFunctionFactory
-#                      quickstep_expressions_aggregation_AggregationHandle
-#                      quickstep_expressions_aggregation_AggregationHandleAvg
-#                      quickstep_expressions_aggregation_AggregationHandleCount
-#                      quickstep_expressions_aggregation_AggregationHandleMax
-#                      quickstep_expressions_aggregation_AggregationHandleMin
-#                      quickstep_expressions_aggregation_AggregationHandleSum
-#                      quickstep_expressions_aggregation_AggregationID
-#                      quickstep_storage_HashTableBase
-#                      quickstep_storage_StorageManager
-#                      quickstep_types_CharType
-#                      quickstep_types_DateOperatorOverloads
-#                      quickstep_types_DatetimeIntervalType
-#                      quickstep_types_DatetimeType
-#                      quickstep_types_DoubleType
-#                      quickstep_types_FloatType
-#                      quickstep_types_IntType
-#                      quickstep_types_IntervalLit
-#                      quickstep_types_LongType
-#                      quickstep_types_Type
-#                      quickstep_types_TypeFactory
-#                      quickstep_types_TypeID
-#                      quickstep_types_TypedValue
-#                      quickstep_types_VarCharType
-#                      quickstep_types_YearMonthIntervalType
-#                      quickstep_types_containers_ColumnVector
-#                      quickstep_types_containers_ColumnVectorsValueAccessor
-#                      quickstep_types_operations_comparisons_Comparison
-#                      quickstep_types_operations_comparisons_ComparisonFactory
-#                      quickstep_types_operations_comparisons_ComparisonID)
-#add_test(AggregationHandle_tests AggregationHandle_tests)
+add_executable(AggregationHandle_tests
+               "${CMAKE_CURRENT_SOURCE_DIR}/tests/AggregationHandleAvg_unittest.cpp"
+               "${CMAKE_CURRENT_SOURCE_DIR}/tests/AggregationHandleCount_unittest.cpp"
+               "${CMAKE_CURRENT_SOURCE_DIR}/tests/AggregationHandleMax_unittest.cpp"
+               "${CMAKE_CURRENT_SOURCE_DIR}/tests/AggregationHandleMin_unittest.cpp"
+               "${CMAKE_CURRENT_SOURCE_DIR}/tests/AggregationHandleSum_unittest.cpp")
+target_link_libraries(AggregationHandle_tests
+                      gtest
+                      gtest_main
+                      quickstep_catalog_CatalogTypedefs
+                      quickstep_expressions_aggregation_AggregateFunction
+                      quickstep_expressions_aggregation_AggregateFunctionFactory
+                      quickstep_expressions_aggregation_AggregationHandle
+                      quickstep_expressions_aggregation_AggregationHandleAvg
+                      quickstep_expressions_aggregation_AggregationHandleCount
+                      quickstep_expressions_aggregation_AggregationHandleMax
+                      quickstep_expressions_aggregation_AggregationHandleMin
+                      quickstep_expressions_aggregation_AggregationHandleSum
+                      quickstep_expressions_aggregation_AggregationID
+                      quickstep_storage_AggregationOperationState
+                      quickstep_storage_HashTableBase
+                      quickstep_storage_StorageManager
+                      quickstep_types_CharType
+                      quickstep_types_DateOperatorOverloads
+                      quickstep_types_DatetimeIntervalType
+                      quickstep_types_DatetimeType
+                      quickstep_types_DoubleType
+                      quickstep_types_FloatType
+                      quickstep_types_IntType
+                      quickstep_types_IntervalLit
+                      quickstep_types_LongType
+                      quickstep_types_Type
+                      quickstep_types_TypeFactory
+                      quickstep_types_TypeID
+                      quickstep_types_TypedValue
+                      quickstep_types_VarCharType
+                      quickstep_types_YearMonthIntervalType
+                      quickstep_types_containers_ColumnVector
+                      quickstep_types_containers_ColumnVectorsValueAccessor
+                      quickstep_types_operations_comparisons_Comparison
+                      quickstep_types_operations_comparisons_ComparisonFactory
+                      quickstep_types_operations_comparisons_ComparisonID)
+add_test(AggregationHandle_tests AggregationHandle_tests)

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/expressions/aggregation/tests/AggregationHandleAvg_unittest.cpp
----------------------------------------------------------------------
diff --git a/expressions/aggregation/tests/AggregationHandleAvg_unittest.cpp b/expressions/aggregation/tests/AggregationHandleAvg_unittest.cpp
index fd82cba..b616652 100644
--- a/expressions/aggregation/tests/AggregationHandleAvg_unittest.cpp
+++ b/expressions/aggregation/tests/AggregationHandleAvg_unittest.cpp
@@ -26,6 +26,8 @@
 #include "expressions/aggregation/AggregationHandle.hpp"
 #include "expressions/aggregation/AggregationHandleAvg.hpp"
 #include "expressions/aggregation/AggregationID.hpp"
+#include "storage/AggregationOperationState.hpp"
+#include "storage/FastHashTableFactory.hpp"
 #include "storage/StorageManager.hpp"
 #include "types/CharType.hpp"
 #include "types/DateOperatorOverloads.hpp"
@@ -51,51 +53,56 @@
 
 namespace quickstep {
 
-class AggregationHandleAvgTest : public::testing::Test {
+class AggregationHandleAvgTest : public ::testing::Test {
  protected:
   static const int kNumSamples = 100;
 
   // Helper method that calls AggregationHandleAvg::iterateUnaryInl() to
   // aggregate 'value' into '*state'.
   void iterateHandle(AggregationState *state, const TypedValue &value) {
-    static_cast<const AggregationHandleAvg&>(*aggregation_handle_avg_).iterateUnaryInl(
-        static_cast<AggregationStateAvg*>(state),
-        value);
+    static_cast<const AggregationHandleAvg &>(*aggregation_handle_avg_)
+        .iterateUnaryInl(static_cast<AggregationStateAvg *>(state), value);
   }
 
   void initializeHandle(const Type &type) {
     aggregation_handle_avg_.reset(
-        AggregateFunctionFactory::Get(AggregationID::kAvg).createHandle(
-            std::vector<const Type*>(1, &type)));
+        AggregateFunctionFactory::Get(AggregationID::kAvg)
+            .createHandle(std::vector<const Type *>(1, &type)));
     aggregation_handle_avg_state_.reset(
         aggregation_handle_avg_->createInitialState());
   }
 
   static bool ApplyToTypesTest(TypeID typeID) {
-    const Type &type = (typeID == kChar || typeID == kVarChar) ?
-        TypeFactory::GetType(typeID, static_cast<std::size_t>(10)) :
-        TypeFactory::GetType(typeID);
+    const Type &type =
+        (typeID == kChar || typeID == kVarChar)
+            ? TypeFactory::GetType(typeID, static_cast<std::size_t>(10))
+            : TypeFactory::GetType(typeID);
 
-    return AggregateFunctionFactory::Get(AggregationID::kAvg).canApplyToTypes(
-        std::vector<const Type*>(1, &type));
+    return AggregateFunctionFactory::Get(AggregationID::kAvg)
+        .canApplyToTypes(std::vector<const Type *>(1, &type));
   }
 
   static bool ResultTypeForArgumentTypeTest(TypeID input_type_id,
                                             TypeID output_type_id) {
-    const Type *result_type
-        = AggregateFunctionFactory::Get(AggregationID::kAvg).resultTypeForArgumentTypes(
-            std::vector<const Type*>(1, &TypeFactory::GetType(input_type_id)));
+    const Type *result_type =
+        AggregateFunctionFactory::Get(AggregationID::kAvg)
+            .resultTypeForArgumentTypes(std::vector<const Type *>(
+                1, &TypeFactory::GetType(input_type_id)));
     return (result_type->getTypeID() == output_type_id);
   }
 
   template <typename CppType>
-  static void CheckAvgValue(
-      CppType expected,
-      const AggregationHandle &handle,
-      const AggregationState &state) {
+  static void CheckAvgValue(CppType expected,
+                            const AggregationHandle &handle,
+                            const AggregationState &state) {
     EXPECT_EQ(expected, handle.finalize(state).getLiteral<CppType>());
   }
 
+  template <typename CppType>
+  static void CheckAvgValue(CppType expected, const TypedValue &value) {
+    EXPECT_EQ(expected, value.getLiteral<CppType>());
+  }
+
   // Static templated method for set a meaningful value to data types.
   template <typename CppType>
   static void SetDataType(int value, CppType *data) {
@@ -106,7 +113,9 @@ class AggregationHandleAvgTest : public::testing::Test {
   void checkAggregationAvgGeneric() {
     const GenericType &type = GenericType::Instance(true);
     initializeHandle(type);
-    EXPECT_TRUE(aggregation_handle_avg_->finalize(*aggregation_handle_avg_state_).isNull());
+    EXPECT_TRUE(
+        aggregation_handle_avg_->finalize(*aggregation_handle_avg_state_)
+            .isNull());
 
     typename GenericType::cpptype val;
     typename GenericType::cpptype sum;
@@ -117,15 +126,16 @@ class AggregationHandleAvgTest : public::testing::Test {
       if (type.getTypeID() == kInt || type.getTypeID() == kLong) {
         SetDataType(i - 10, &val);
       } else {
-        SetDataType(static_cast<float>(i - 10)/10, &val);
+        SetDataType(static_cast<float>(i - 10) / 10, &val);
       }
       iterateHandle(aggregation_handle_avg_state_.get(), type.makeValue(&val));
       sum += val;
     }
     iterateHandle(aggregation_handle_avg_state_.get(), type.makeNullValue());
-    CheckAvgValue<typename OutputType::cpptype>(static_cast<typename OutputType::cpptype>(sum) / kNumSamples,
-                                                *aggregation_handle_avg_,
-                                                *aggregation_handle_avg_state_);
+    CheckAvgValue<typename OutputType::cpptype>(
+        static_cast<typename OutputType::cpptype>(sum) / kNumSamples,
+        *aggregation_handle_avg_,
+        *aggregation_handle_avg_state_);
 
     // Test mergeStates().
     std::unique_ptr<AggregationState> merge_state(
@@ -138,7 +148,7 @@ class AggregationHandleAvgTest : public::testing::Test {
       if (type.getTypeID() == kInt || type.getTypeID() == kLong) {
         SetDataType(i - 10, &val);
       } else {
-        SetDataType(static_cast<float>(i - 10)/10, &val);
+        SetDataType(static_cast<float>(i - 10) / 10, &val);
       }
       iterateHandle(merge_state.get(), type.makeValue(&val));
       sum += val;
@@ -153,7 +163,8 @@ class AggregationHandleAvgTest : public::testing::Test {
   }
 
   template <typename GenericType>
-  ColumnVector *createColumnVectorGeneric(const Type &type, typename GenericType::cpptype *sum) {
+  ColumnVector* createColumnVectorGeneric(const Type &type,
+                                          typename GenericType::cpptype *sum) {
     NativeColumnVector *column = new NativeColumnVector(type, kNumSamples + 3);
 
     typename GenericType::cpptype val;
@@ -164,12 +175,12 @@ class AggregationHandleAvgTest : public::testing::Test {
       if (type.getTypeID() == kInt || type.getTypeID() == kLong) {
         SetDataType(i - 10, &val);
       } else {
-        SetDataType(static_cast<float>(i - 10)/10, &val);
+        SetDataType(static_cast<float>(i - 10) / 10, &val);
       }
       column->appendTypedValue(type.makeValue(&val));
       *sum += val;
       // One NULL in the middle.
-      if (i == kNumSamples/2) {
+      if (i == kNumSamples / 2) {
         column->appendTypedValue(type.makeNullValue());
       }
     }
@@ -182,12 +193,15 @@ class AggregationHandleAvgTest : public::testing::Test {
   void checkAggregationAvgGenericColumnVector() {
     const GenericType &type = GenericType::Instance(true);
     initializeHandle(type);
-    EXPECT_TRUE(aggregation_handle_avg_->finalize(*aggregation_handle_avg_state_).isNull());
+    EXPECT_TRUE(
+        aggregation_handle_avg_->finalize(*aggregation_handle_avg_state_)
+            .isNull());
 
     typename GenericType::cpptype sum;
     SetDataType(0, &sum);
     std::vector<std::unique_ptr<ColumnVector>> column_vectors;
-    column_vectors.emplace_back(createColumnVectorGeneric<GenericType>(type, &sum));
+    column_vectors.emplace_back(
+        createColumnVectorGeneric<GenericType>(type, &sum));
 
     std::unique_ptr<AggregationState> cv_state(
         aggregation_handle_avg_->accumulateColumnVectors(column_vectors));
@@ -199,7 +213,8 @@ class AggregationHandleAvgTest : public::testing::Test {
         *aggregation_handle_avg_,
         *cv_state);
 
-    aggregation_handle_avg_->mergeStates(*cv_state, aggregation_handle_avg_state_.get());
+    aggregation_handle_avg_->mergeStates(*cv_state,
+                                         aggregation_handle_avg_state_.get());
     CheckAvgValue<typename OutputType::cpptype>(
         static_cast<typename OutputType::cpptype>(sum) / kNumSamples,
         *aggregation_handle_avg_,
@@ -211,16 +226,19 @@ class AggregationHandleAvgTest : public::testing::Test {
   void checkAggregationAvgGenericValueAccessor() {
     const GenericType &type = GenericType::Instance(true);
     initializeHandle(type);
-    EXPECT_TRUE(aggregation_handle_avg_->finalize(*aggregation_handle_avg_state_).isNull());
+    EXPECT_TRUE(
+        aggregation_handle_avg_->finalize(*aggregation_handle_avg_state_)
+            .isNull());
 
     typename GenericType::cpptype sum;
     SetDataType(0, &sum);
-    std::unique_ptr<ColumnVectorsValueAccessor> accessor(new ColumnVectorsValueAccessor());
+    std::unique_ptr<ColumnVectorsValueAccessor> accessor(
+        new ColumnVectorsValueAccessor());
     accessor->addColumn(createColumnVectorGeneric<GenericType>(type, &sum));
 
     std::unique_ptr<AggregationState> va_state(
-        aggregation_handle_avg_->accumulateValueAccessor(accessor.get(),
-                                                         std::vector<attribute_id>(1, 0)));
+        aggregation_handle_avg_->accumulateValueAccessor(
+            accessor.get(), std::vector<attribute_id>(1, 0)));
 
     // Test the state generated directly by accumulateValueAccessor(), and also
     // test after merging back.
@@ -229,7 +247,8 @@ class AggregationHandleAvgTest : public::testing::Test {
         *aggregation_handle_avg_,
         *va_state);
 
-    aggregation_handle_avg_->mergeStates(*va_state, aggregation_handle_avg_state_.get());
+    aggregation_handle_avg_->mergeStates(*va_state,
+                                         aggregation_handle_avg_state_.get());
     CheckAvgValue<typename OutputType::cpptype>(
         static_cast<typename OutputType::cpptype>(sum) / kNumSamples,
         *aggregation_handle_avg_,
@@ -253,12 +272,14 @@ void AggregationHandleAvgTest::CheckAvgValue<double>(
 }
 
 template <>
-void AggregationHandleAvgTest::SetDataType<DatetimeIntervalLit>(int value, DatetimeIntervalLit *data) {
+void AggregationHandleAvgTest::SetDataType<DatetimeIntervalLit>(
+    int value, DatetimeIntervalLit *data) {
   data->interval_ticks = value;
 }
 
 template <>
-void AggregationHandleAvgTest::SetDataType<YearMonthIntervalLit>(int value, YearMonthIntervalLit *data) {
+void AggregationHandleAvgTest::SetDataType<YearMonthIntervalLit>(
+    int value, YearMonthIntervalLit *data) {
   data->months = value;
 }
 
@@ -305,11 +326,13 @@ TEST_F(AggregationHandleAvgTest, DoubleTypeColumnVectorTest) {
 }
 
 TEST_F(AggregationHandleAvgTest, DatetimeIntervalTypeColumnVectorTest) {
-  checkAggregationAvgGenericColumnVector<DatetimeIntervalType, DatetimeIntervalType>();
+  checkAggregationAvgGenericColumnVector<DatetimeIntervalType,
+                                         DatetimeIntervalType>();
 }
 
 TEST_F(AggregationHandleAvgTest, YearMonthIntervalTypeColumnVectorTest) {
-  checkAggregationAvgGenericColumnVector<YearMonthIntervalType, YearMonthIntervalType>();
+  checkAggregationAvgGenericColumnVector<YearMonthIntervalType,
+                                         YearMonthIntervalType>();
 }
 
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
@@ -330,11 +353,13 @@ TEST_F(AggregationHandleAvgTest, DoubleTypeValueAccessorTest) {
 }
 
 TEST_F(AggregationHandleAvgTest, DatetimeIntervalTypeValueAccessorTest) {
-  checkAggregationAvgGenericValueAccessor<DatetimeIntervalType, DatetimeIntervalType>();
+  checkAggregationAvgGenericValueAccessor<DatetimeIntervalType,
+                                          DatetimeIntervalType>();
 }
 
 TEST_F(AggregationHandleAvgTest, YearMonthIntervalTypeValueAccessorTest) {
-  checkAggregationAvgGenericValueAccessor<YearMonthIntervalType, YearMonthIntervalType>();
+  checkAggregationAvgGenericValueAccessor<YearMonthIntervalType,
+                                          YearMonthIntervalType>();
 }
 #endif  // QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
 
@@ -363,38 +388,53 @@ TEST_F(AggregationHandleAvgDeathTest, WrongTypeTest) {
   double double_val = 0;
   float float_val = 0;
 
-  iterateHandle(aggregation_handle_avg_state_.get(), int_non_null_type.makeValue(&int_val));
+  iterateHandle(aggregation_handle_avg_state_.get(),
+                int_non_null_type.makeValue(&int_val));
 
-  EXPECT_DEATH(iterateHandle(aggregation_handle_avg_state_.get(), long_type.makeValue(&long_val)), "");
-  EXPECT_DEATH(iterateHandle(aggregation_handle_avg_state_.get(), double_type.makeValue(&double_val)), "");
-  EXPECT_DEATH(iterateHandle(aggregation_handle_avg_state_.get(), float_type.makeValue(&float_val)), "");
-  EXPECT_DEATH(iterateHandle(aggregation_handle_avg_state_.get(), char_type.makeValue("asdf", 5)), "");
-  EXPECT_DEATH(iterateHandle(aggregation_handle_avg_state_.get(), varchar_type.makeValue("asdf", 5)), "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_avg_state_.get(),
+                             long_type.makeValue(&long_val)),
+               "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_avg_state_.get(),
+                             double_type.makeValue(&double_val)),
+               "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_avg_state_.get(),
+                             float_type.makeValue(&float_val)),
+               "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_avg_state_.get(),
+                             char_type.makeValue("asdf", 5)),
+               "");
+  EXPECT_DEATH(iterateHandle(aggregation_handle_avg_state_.get(),
+                             varchar_type.makeValue("asdf", 5)),
+               "");
 
   // Test mergeStates() with incorrectly typed handles.
   std::unique_ptr<AggregationHandle> aggregation_handle_avg_double(
-      AggregateFunctionFactory::Get(AggregationID::kAvg).createHandle(
-          std::vector<const Type*>(1, &double_type)));
+      AggregateFunctionFactory::Get(AggregationID::kAvg)
+          .createHandle(std::vector<const Type *>(1, &double_type)));
   std::unique_ptr<AggregationState> aggregation_state_avg_merge_double(
       aggregation_handle_avg_double->createInitialState());
-  static_cast<const AggregationHandleAvg&>(*aggregation_handle_avg_double).iterateUnaryInl(
-      static_cast<AggregationStateAvg*>(aggregation_state_avg_merge_double.get()),
-      double_type.makeValue(&double_val));
-  EXPECT_DEATH(aggregation_handle_avg_->mergeStates(*aggregation_state_avg_merge_double,
-                                                    aggregation_handle_avg_state_.get()),
-               "");
+  static_cast<const AggregationHandleAvg &>(*aggregation_handle_avg_double)
+      .iterateUnaryInl(static_cast<AggregationStateAvg *>(
+                           aggregation_state_avg_merge_double.get()),
+                       double_type.makeValue(&double_val));
+  EXPECT_DEATH(
+      aggregation_handle_avg_->mergeStates(*aggregation_state_avg_merge_double,
+                                           aggregation_handle_avg_state_.get()),
+      "");
 
   std::unique_ptr<AggregationHandle> aggregation_handle_avg_float(
-      AggregateFunctionFactory::Get(AggregationID::kAvg).createHandle(
-          std::vector<const Type*>(1, &float_type)));
+      AggregateFunctionFactory::Get(AggregationID::kAvg)
+          .createHandle(std::vector<const Type *>(1, &float_type)));
   std::unique_ptr<AggregationState> aggregation_state_avg_merge_float(
       aggregation_handle_avg_float->createInitialState());
-  static_cast<const AggregationHandleAvg&>(*aggregation_handle_avg_float).iterateUnaryInl(
-      static_cast<AggregationStateAvg*>(aggregation_state_avg_merge_float.get()),
-      float_type.makeValue(&float_val));
-  EXPECT_DEATH(aggregation_handle_avg_->mergeStates(*aggregation_state_avg_merge_float,
-                                                    aggregation_handle_avg_state_.get()),
-               "");
+  static_cast<const AggregationHandleAvg &>(*aggregation_handle_avg_float)
+      .iterateUnaryInl(static_cast<AggregationStateAvg *>(
+                           aggregation_state_avg_merge_float.get()),
+                       float_type.makeValue(&float_val));
+  EXPECT_DEATH(
+      aggregation_handle_avg_->mergeStates(*aggregation_state_avg_merge_float,
+                                           aggregation_handle_avg_state_.get()),
+      "");
 }
 #endif
 
@@ -415,8 +455,10 @@ TEST_F(AggregationHandleAvgTest, ResultTypeForArgumentTypeTest) {
   EXPECT_TRUE(ResultTypeForArgumentTypeTest(kLong, kDouble));
   EXPECT_TRUE(ResultTypeForArgumentTypeTest(kFloat, kDouble));
   EXPECT_TRUE(ResultTypeForArgumentTypeTest(kDouble, kDouble));
-  EXPECT_TRUE(ResultTypeForArgumentTypeTest(kDatetimeInterval, kDatetimeInterval));
-  EXPECT_TRUE(ResultTypeForArgumentTypeTest(kYearMonthInterval, kYearMonthInterval));
+  EXPECT_TRUE(
+      ResultTypeForArgumentTypeTest(kDatetimeInterval, kDatetimeInterval));
+  EXPECT_TRUE(
+      ResultTypeForArgumentTypeTest(kYearMonthInterval, kYearMonthInterval));
 }
 
 TEST_F(AggregationHandleAvgTest, GroupByTableMergeTestAvg) {
@@ -424,25 +466,28 @@ TEST_F(AggregationHandleAvgTest, GroupByTableMergeTestAvg) {
   initializeHandle(long_non_null_type);
   storage_manager_.reset(new StorageManager("./test_avg_data"));
   std::unique_ptr<AggregationStateHashTableBase> source_hash_table(
-      aggregation_handle_avg_->createGroupByHashTable(
-          HashTableImplType::kSimpleScalarSeparateChaining,
+      AggregationStateFastHashTableFactory::CreateResizable(
+          HashTableImplType::kSeparateChaining,
           std::vector<const Type *>(1, &long_non_null_type),
           10,
+          {aggregation_handle_avg_.get()->getPayloadSize()},
+          {aggregation_handle_avg_.get()},
           storage_manager_.get()));
   std::unique_ptr<AggregationStateHashTableBase> destination_hash_table(
-      aggregation_handle_avg_->createGroupByHashTable(
-          HashTableImplType::kSimpleScalarSeparateChaining,
+      AggregationStateFastHashTableFactory::CreateResizable(
+          HashTableImplType::kSeparateChaining,
           std::vector<const Type *>(1, &long_non_null_type),
           10,
+          {aggregation_handle_avg_.get()->getPayloadSize()},
+          {aggregation_handle_avg_.get()},
           storage_manager_.get()));
 
-  AggregationStateHashTable<AggregationStateAvg> *destination_hash_table_derived =
-      static_cast<AggregationStateHashTable<AggregationStateAvg> *>(
+  AggregationStateFastHashTable *destination_hash_table_derived =
+      static_cast<AggregationStateFastHashTable *>(
           destination_hash_table.get());
 
-  AggregationStateHashTable<AggregationStateAvg> *source_hash_table_derived =
-      static_cast<AggregationStateHashTable<AggregationStateAvg> *>(
-          source_hash_table.get());
+  AggregationStateFastHashTable *source_hash_table_derived =
+      static_cast<AggregationStateFastHashTable *>(source_hash_table.get());
 
   AggregationHandleAvg *aggregation_handle_avg_derived =
       static_cast<AggregationHandleAvg *>(aggregation_handle_avg_.get());
@@ -494,36 +539,56 @@ TEST_F(AggregationHandleAvgTest, GroupByTableMergeTestAvg) {
       exclusive_key_source_state.get(), exclusive_key_source_avg_val);
 
   // Add the key-state pairs to the hash tables.
-  source_hash_table_derived->putCompositeKey(common_key,
-                                             *common_key_source_state);
-  destination_hash_table_derived->putCompositeKey(
-      common_key, *common_key_destination_state);
-  source_hash_table_derived->putCompositeKey(exclusive_source_key,
-                                             *exclusive_key_source_state);
-  destination_hash_table_derived->putCompositeKey(
-      exclusive_destination_key, *exclusive_key_destination_state);
+  unsigned char buffer[100];
+  buffer[0] = '\0';
+  memcpy(buffer + 1,
+         common_key_source_state.get()->getPayloadAddress(),
+         aggregation_handle_avg_.get()->getPayloadSize());
+  source_hash_table_derived->putCompositeKeyFast(common_key, buffer);
+
+  memcpy(buffer + 1,
+         common_key_destination_state.get()->getPayloadAddress(),
+         aggregation_handle_avg_.get()->getPayloadSize());
+  destination_hash_table_derived->putCompositeKeyFast(common_key, buffer);
+
+  memcpy(buffer + 1,
+         exclusive_key_source_state.get()->getPayloadAddress(),
+         aggregation_handle_avg_.get()->getPayloadSize());
+  source_hash_table_derived->putCompositeKeyFast(exclusive_source_key, buffer);
+
+  memcpy(buffer + 1,
+         exclusive_key_destination_state.get()->getPayloadAddress(),
+         aggregation_handle_avg_.get()->getPayloadSize());
+  destination_hash_table_derived->putCompositeKeyFast(exclusive_destination_key,
+                                                      buffer);
 
   EXPECT_EQ(2u, destination_hash_table_derived->numEntries());
   EXPECT_EQ(2u, source_hash_table_derived->numEntries());
 
-  aggregation_handle_avg_->mergeGroupByHashTables(*source_hash_table,
-                                                  destination_hash_table.get());
+  AggregationOperationState::mergeGroupByHashTables(
+      source_hash_table.get(), destination_hash_table.get());
 
   EXPECT_EQ(3u, destination_hash_table_derived->numEntries());
 
   CheckAvgValue<double>(
       (common_key_destination_avg_val.getLiteral<std::int64_t>() +
-          common_key_source_avg_val.getLiteral<std::int64_t>()) / static_cast<double>(2),
-      *aggregation_handle_avg_derived,
-      *(destination_hash_table_derived->getSingleCompositeKey(common_key)));
-  CheckAvgValue<double>(exclusive_key_destination_avg_val.getLiteral<std::int64_t>(),
-                  *aggregation_handle_avg_derived,
-                  *(destination_hash_table_derived->getSingleCompositeKey(
-                      exclusive_destination_key)));
-  CheckAvgValue<double>(exclusive_key_source_avg_val.getLiteral<std::int64_t>(),
-                  *aggregation_handle_avg_derived,
-                  *(source_hash_table_derived->getSingleCompositeKey(
-                      exclusive_source_key)));
+       common_key_source_avg_val.getLiteral<std::int64_t>()) /
+          static_cast<double>(2),
+      aggregation_handle_avg_derived->finalizeHashTableEntryFast(
+          destination_hash_table_derived->getSingleCompositeKey(common_key) +
+          1));
+  CheckAvgValue<double>(
+      exclusive_key_destination_avg_val.getLiteral<std::int64_t>(),
+      aggregation_handle_avg_derived->finalizeHashTableEntryFast(
+          destination_hash_table_derived->getSingleCompositeKey(
+              exclusive_destination_key) +
+          1));
+  CheckAvgValue<double>(
+      exclusive_key_source_avg_val.getLiteral<std::int64_t>(),
+      aggregation_handle_avg_derived->finalizeHashTableEntryFast(
+          source_hash_table_derived->getSingleCompositeKey(
+              exclusive_source_key) +
+          1));
 }
 
 }  // namespace quickstep

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/expressions/aggregation/tests/AggregationHandleCount_unittest.cpp
----------------------------------------------------------------------
diff --git a/expressions/aggregation/tests/AggregationHandleCount_unittest.cpp b/expressions/aggregation/tests/AggregationHandleCount_unittest.cpp
index bf02523..96ead8a 100644
--- a/expressions/aggregation/tests/AggregationHandleCount_unittest.cpp
+++ b/expressions/aggregation/tests/AggregationHandleCount_unittest.cpp
@@ -27,6 +27,8 @@
 #include "expressions/aggregation/AggregationHandle.hpp"
 #include "expressions/aggregation/AggregationHandleCount.hpp"
 #include "expressions/aggregation/AggregationID.hpp"
+#include "storage/AggregationOperationState.hpp"
+#include "storage/FastHashTableFactory.hpp"
 #include "storage/StorageManager.hpp"
 #include "types/CharType.hpp"
 #include "types/DoubleType.hpp"
@@ -48,85 +50,94 @@
 
 namespace quickstep {
 
-class AggregationHandleCountTest : public::testing::Test {
+class AggregationHandleCountTest : public ::testing::Test {
  protected:
   const Type &dummy_type = TypeFactory::GetType(kInt);
 
   void iterateHandleNullary(AggregationState *state) {
-    static_cast<const AggregationHandleCount<true, false>&>(
-        *aggregation_handle_count_).iterateNullaryInl(
-            static_cast<AggregationStateCount*>(state));
+    static_cast<const AggregationHandleCount<true, false> &>(
+        *aggregation_handle_count_)
+        .iterateNullaryInl(static_cast<AggregationStateCount *>(state));
   }
 
   // Helper method that calls AggregationHandleCount::iterateUnaryInl() to
   // aggregate 'value' into '*state'.
   void iterateHandle(AggregationState *state, const TypedValue &value) {
-    static_cast<const AggregationHandleCount<false, true>&>(
-        *aggregation_handle_count_).iterateUnaryInl(
-            static_cast<AggregationStateCount*>(state),
-            value);
+    static_cast<const AggregationHandleCount<false, true> &>(
+        *aggregation_handle_count_)
+        .iterateUnaryInl(static_cast<AggregationStateCount *>(state), value);
   }
 
   void initializeHandle(const Type *argument_type) {
     if (argument_type == nullptr) {
       aggregation_handle_count_.reset(
-          AggregateFunctionFactory::Get(AggregationID::kCount).createHandle(
-              std::vector<const Type*>()));
+          AggregateFunctionFactory::Get(AggregationID::kCount)
+              .createHandle(std::vector<const Type *>()));
     } else {
       aggregation_handle_count_.reset(
-          AggregateFunctionFactory::Get(AggregationID::kCount).createHandle(
-              std::vector<const Type*>(1, argument_type)));
+          AggregateFunctionFactory::Get(AggregationID::kCount)
+              .createHandle(std::vector<const Type *>(1, argument_type)));
     }
     aggregation_handle_count_state_.reset(
         aggregation_handle_count_->createInitialState());
   }
 
   static bool ApplyToTypesTest(TypeID typeID) {
-    const Type &type = (typeID == kChar || typeID == kVarChar) ?
-        TypeFactory::GetType(typeID, static_cast<std::size_t>(10)) :
-        TypeFactory::GetType(typeID);
+    const Type &type =
+        (typeID == kChar || typeID == kVarChar)
+            ? TypeFactory::GetType(typeID, static_cast<std::size_t>(10))
+            : TypeFactory::GetType(typeID);
 
-    return AggregateFunctionFactory::Get(AggregationID::kCount).canApplyToTypes(
-        std::vector<const Type*>(1, &type));
+    return AggregateFunctionFactory::Get(AggregationID::kCount)
+        .canApplyToTypes(std::vector<const Type *>(1, &type));
   }
 
   static bool ResultTypeForArgumentTypeTest(TypeID input_type_id,
                                             TypeID output_type_id) {
-    const Type *result_type
-        = AggregateFunctionFactory::Get(AggregationID::kCount).resultTypeForArgumentTypes(
-            std::vector<const Type*>(1, &TypeFactory::GetType(input_type_id)));
+    const Type *result_type =
+        AggregateFunctionFactory::Get(AggregationID::kCount)
+            .resultTypeForArgumentTypes(std::vector<const Type *>(
+                1, &TypeFactory::GetType(input_type_id)));
     return (result_type->getTypeID() == output_type_id);
   }
 
-  static void CheckCountValue(
-      std::int64_t expected,
-      const AggregationHandle &handle,
-      const AggregationState &state) {
+  static void CheckCountValue(std::int64_t expected,
+                              const AggregationHandle &handle,
+                              const AggregationState &state) {
     EXPECT_EQ(expected, handle.finalize(state).getLiteral<std::int64_t>());
   }
 
+  static void CheckCountValue(std::int64_t expected, const TypedValue &value) {
+    EXPECT_EQ(expected, value.getLiteral<std::int64_t>());
+  }
+
   void checkAggregationCountNullary(int test_count) {
     initializeHandle(nullptr);
-    CheckCountValue(0, *aggregation_handle_count_, *aggregation_handle_count_state_);
+    CheckCountValue(
+        0, *aggregation_handle_count_, *aggregation_handle_count_state_);
 
     for (int i = 0; i < test_count; ++i) {
       iterateHandleNullary(aggregation_handle_count_state_.get());
     }
-    CheckCountValue(test_count, *aggregation_handle_count_, *aggregation_handle_count_state_);
+    CheckCountValue(test_count,
+                    *aggregation_handle_count_,
+                    *aggregation_handle_count_state_);
 
     // Test mergeStates.
     std::unique_ptr<AggregationState> merge_state(
         aggregation_handle_count_->createInitialState());
-    aggregation_handle_count_->mergeStates(*merge_state,
-                                           aggregation_handle_count_state_.get());
+    aggregation_handle_count_->mergeStates(
+        *merge_state, aggregation_handle_count_state_.get());
 
     for (int i = 0; i < test_count; ++i) {
       iterateHandleNullary(merge_state.get());
     }
 
-    aggregation_handle_count_->mergeStates(*merge_state,
-                                           aggregation_handle_count_state_.get());
-    CheckCountValue(2 * test_count, *aggregation_handle_count_, *aggregation_handle_count_state_);
+    aggregation_handle_count_->mergeStates(
+        *merge_state, aggregation_handle_count_state_.get());
+    CheckCountValue(2 * test_count,
+                    *aggregation_handle_count_,
+                    *aggregation_handle_count_state_);
   }
 
   void checkAggregationCountNullaryAccumulate(int test_count) {
@@ -137,12 +148,10 @@ class AggregationHandleCountTest : public::testing::Test {
 
     // Test the state generated directly by accumulateNullary(), and also test
     // after merging back.
-    CheckCountValue(test_count,
-                    *aggregation_handle_count_,
-                    *accumulated_state);
+    CheckCountValue(test_count, *aggregation_handle_count_, *accumulated_state);
 
-    aggregation_handle_count_->mergeStates(*accumulated_state,
-                                           aggregation_handle_count_state_.get());
+    aggregation_handle_count_->mergeStates(
+        *accumulated_state, aggregation_handle_count_state_.get());
     CheckCountValue(test_count,
                     *aggregation_handle_count_,
                     *aggregation_handle_count_state_);
@@ -152,24 +161,27 @@ class AggregationHandleCountTest : public::testing::Test {
   void checkAggregationCountNumeric(int test_count) {
     const NumericType &type = NumericType::Instance(true);
     initializeHandle(&type);
-    CheckCountValue(0, *aggregation_handle_count_, *aggregation_handle_count_state_);
+    CheckCountValue(
+        0, *aggregation_handle_count_, *aggregation_handle_count_state_);
 
     typename NumericType::cpptype val = 0;
     int count = 0;
 
     iterateHandle(aggregation_handle_count_state_.get(), type.makeNullValue());
     for (int i = 0; i < test_count; ++i) {
-      iterateHandle(aggregation_handle_count_state_.get(), type.makeValue(&val));
+      iterateHandle(aggregation_handle_count_state_.get(),
+                    type.makeValue(&val));
       ++count;
     }
     iterateHandle(aggregation_handle_count_state_.get(), type.makeNullValue());
-    CheckCountValue(count, *aggregation_handle_count_, *aggregation_handle_count_state_);
+    CheckCountValue(
+        count, *aggregation_handle_count_, *aggregation_handle_count_state_);
 
     // Test mergeStates.
     std::unique_ptr<AggregationState> merge_state(
         aggregation_handle_count_->createInitialState());
-    aggregation_handle_count_->mergeStates(*merge_state,
-                                           aggregation_handle_count_state_.get());
+    aggregation_handle_count_->mergeStates(
+        *merge_state, aggregation_handle_count_state_.get());
 
     iterateHandle(merge_state.get(), type.makeNullValue());
     for (int i = 0; i < test_count; ++i) {
@@ -178,13 +190,14 @@ class AggregationHandleCountTest : public::testing::Test {
     }
     iterateHandle(merge_state.get(), type.makeNullValue());
 
-    aggregation_handle_count_->mergeStates(*merge_state,
-                                           aggregation_handle_count_state_.get());
-    CheckCountValue(count, *aggregation_handle_count_, *aggregation_handle_count_state_);
+    aggregation_handle_count_->mergeStates(
+        *merge_state, aggregation_handle_count_state_.get());
+    CheckCountValue(
+        count, *aggregation_handle_count_, *aggregation_handle_count_state_);
   }
 
   template <typename NumericType>
-  ColumnVector *createColumnVectorNumeric(const Type &type, int test_count) {
+  ColumnVector* createColumnVectorNumeric(const Type &type, int test_count) {
     NativeColumnVector *column = new NativeColumnVector(type, test_count + 3);
 
     typename NumericType::cpptype val = 0;
@@ -192,7 +205,7 @@ class AggregationHandleCountTest : public::testing::Test {
     for (int i = 0; i < test_count; ++i) {
       column->appendTypedValue(type.makeValue(&val));
       // One NULL in the middle.
-      if (i == test_count/2) {
+      if (i == test_count / 2) {
         column->appendTypedValue(type.makeNullValue());
       }
     }
@@ -204,21 +217,22 @@ class AggregationHandleCountTest : public::testing::Test {
   void checkAggregationCountNumericColumnVector(int test_count) {
     const NumericType &type = NumericType::Instance(true);
     initializeHandle(&type);
-    CheckCountValue(0, *aggregation_handle_count_, *aggregation_handle_count_state_);
+    CheckCountValue(
+        0, *aggregation_handle_count_, *aggregation_handle_count_state_);
 
     std::vector<std::unique_ptr<ColumnVector>> column_vectors;
-    column_vectors.emplace_back(createColumnVectorNumeric<NumericType>(type, test_count));
+    column_vectors.emplace_back(
+        createColumnVectorNumeric<NumericType>(type, test_count));
 
     std::unique_ptr<AggregationState> cv_state(
         aggregation_handle_count_->accumulateColumnVectors(column_vectors));
 
     // Test the state generated directly by accumulateColumnVectors(), and also
     // test after merging back.
-    CheckCountValue(test_count,
-                    *aggregation_handle_count_,
-                    *cv_state);
+    CheckCountValue(test_count, *aggregation_handle_count_, *cv_state);
 
-    aggregation_handle_count_->mergeStates(*cv_state, aggregation_handle_count_state_.get());
+    aggregation_handle_count_->mergeStates(
+        *cv_state, aggregation_handle_count_state_.get());
     CheckCountValue(test_count,
                     *aggregation_handle_count_,
                     *aggregation_handle_count_state_);
@@ -229,22 +243,24 @@ class AggregationHandleCountTest : public::testing::Test {
   void checkAggregationCountNumericValueAccessor(int test_count) {
     const NumericType &type = NumericType::Instance(true);
     initializeHandle(&type);
-    CheckCountValue(0, *aggregation_handle_count_, *aggregation_handle_count_state_);
+    CheckCountValue(
+        0, *aggregation_handle_count_, *aggregation_handle_count_state_);
 
-    std::unique_ptr<ColumnVectorsValueAccessor> accessor(new ColumnVectorsValueAccessor());
-    accessor->addColumn(createColumnVectorNumeric<NumericType>(type, test_count));
+    std::unique_ptr<ColumnVectorsValueAccessor> accessor(
+        new ColumnVectorsValueAccessor());
+    accessor->addColumn(
+        createColumnVectorNumeric<NumericType>(type, test_count));
 
     std::unique_ptr<AggregationState> va_state(
-        aggregation_handle_count_->accumulateValueAccessor(accessor.get(),
-                                                           std::vector<attribute_id>(1, 0)));
+        aggregation_handle_count_->accumulateValueAccessor(
+            accessor.get(), std::vector<attribute_id>(1, 0)));
 
     // Test the state generated directly by accumulateValueAccessor(), and also
     // test after merging back.
-    CheckCountValue(test_count,
-                    *aggregation_handle_count_,
-                    *va_state);
+    CheckCountValue(test_count, *aggregation_handle_count_, *va_state);
 
-    aggregation_handle_count_->mergeStates(*va_state, aggregation_handle_count_state_.get());
+    aggregation_handle_count_->mergeStates(
+        *va_state, aggregation_handle_count_state_.get());
     CheckCountValue(test_count,
                     *aggregation_handle_count_,
                     *aggregation_handle_count_state_);
@@ -255,7 +271,8 @@ class AggregationHandleCountTest : public::testing::Test {
   void checkAggregationCountString(int test_count) {
     const StringType &type = StringType::Instance(10, true);
     initializeHandle(&type);
-    CheckCountValue(0, *aggregation_handle_count_, *aggregation_handle_count_state_);
+    CheckCountValue(
+        0, *aggregation_handle_count_, *aggregation_handle_count_state_);
 
     std::string string_literal = "test_str";
     int count = 0;
@@ -267,7 +284,8 @@ class AggregationHandleCountTest : public::testing::Test {
       ++count;
     }
     iterateHandle(aggregation_handle_count_state_.get(), type.makeNullValue());
-    CheckCountValue(count, *aggregation_handle_count_, *aggregation_handle_count_state_);
+    CheckCountValue(
+        count, *aggregation_handle_count_, *aggregation_handle_count_state_);
 
     // Test mergeStates().
     std::unique_ptr<AggregationState> merge_state(
@@ -275,18 +293,20 @@ class AggregationHandleCountTest : public::testing::Test {
 
     iterateHandle(merge_state.get(), type.makeNullValue());
     for (int i = 0; i < test_count; ++i) {
-      iterateHandle(merge_state.get(), type.makeValue(string_literal.c_str(), 10));
+      iterateHandle(merge_state.get(),
+                    type.makeValue(string_literal.c_str(), 10));
       ++count;
     }
     iterateHandle(merge_state.get(), type.makeNullValue());
 
-    aggregation_handle_count_->mergeStates(*merge_state,
-                                           aggregation_handle_count_state_.get());
-    CheckCountValue(count, *aggregation_handle_count_, *aggregation_handle_count_state_);
+    aggregation_handle_count_->mergeStates(
+        *merge_state, aggregation_handle_count_state_.get());
+    CheckCountValue(
+        count, *aggregation_handle_count_, *aggregation_handle_count_state_);
   }
 
   template <typename ColumnVectorType>
-  ColumnVector *createColumnVectorString(const Type &type, int test_count) {
+  ColumnVector* createColumnVectorString(const Type &type, int test_count) {
     ColumnVectorType *column = new ColumnVectorType(type, test_count + 3);
 
     std::string string_literal = "test_str";
@@ -294,7 +314,7 @@ class AggregationHandleCountTest : public::testing::Test {
     for (int i = 0; i < test_count; ++i) {
       column->appendTypedValue(type.makeValue(string_literal.c_str(), 10));
       // One NULL in the middle.
-      if (i == test_count/2) {
+      if (i == test_count / 2) {
         column->appendTypedValue(type.makeNullValue());
       }
     }
@@ -307,21 +327,22 @@ class AggregationHandleCountTest : public::testing::Test {
   void checkAggregationCountStringColumnVector(int test_count) {
     const StringType &type = StringType::Instance(10, true);
     initializeHandle(&type);
-    CheckCountValue(0, *aggregation_handle_count_, *aggregation_handle_count_state_);
+    CheckCountValue(
+        0, *aggregation_handle_count_, *aggregation_handle_count_state_);
 
     std::vector<std::unique_ptr<ColumnVector>> column_vectors;
-    column_vectors.emplace_back(createColumnVectorString<ColumnVectorType>(type, test_count));
+    column_vectors.emplace_back(
+        createColumnVectorString<ColumnVectorType>(type, test_count));
 
     std::unique_ptr<AggregationState> cv_state(
         aggregation_handle_count_->accumulateColumnVectors(column_vectors));
 
     // Test the state generated directly by accumulateColumnVectors(), and also
     // test after merging back.
-    CheckCountValue(test_count,
-                    *aggregation_handle_count_,
-                    *cv_state);
+    CheckCountValue(test_count, *aggregation_handle_count_, *cv_state);
 
-    aggregation_handle_count_->mergeStates(*cv_state, aggregation_handle_count_state_.get());
+    aggregation_handle_count_->mergeStates(
+        *cv_state, aggregation_handle_count_state_.get());
     CheckCountValue(test_count,
                     *aggregation_handle_count_,
                     *aggregation_handle_count_state_);
@@ -332,22 +353,24 @@ class AggregationHandleCountTest : public::testing::Test {
   void checkAggregationCountStringValueAccessor(int test_count) {
     const StringType &type = StringType::Instance(10, true);
     initializeHandle(&type);
-    CheckCountValue(0, *aggregation_handle_count_, *aggregation_handle_count_state_);
+    CheckCountValue(
+        0, *aggregation_handle_count_, *aggregation_handle_count_state_);
 
-    std::unique_ptr<ColumnVectorsValueAccessor> accessor(new ColumnVectorsValueAccessor());
-    accessor->addColumn(createColumnVectorString<ColumnVectorType>(type, test_count));
+    std::unique_ptr<ColumnVectorsValueAccessor> accessor(
+        new ColumnVectorsValueAccessor());
+    accessor->addColumn(
+        createColumnVectorString<ColumnVectorType>(type, test_count));
 
     std::unique_ptr<AggregationState> va_state(
-        aggregation_handle_count_->accumulateValueAccessor(accessor.get(),
-                                                           std::vector<attribute_id>(1, 0)));
+        aggregation_handle_count_->accumulateValueAccessor(
+            accessor.get(), std::vector<attribute_id>(1, 0)));
 
     // Test the state generated directly by accumulateValueAccessor(), and also
     // test after merging back.
-    CheckCountValue(test_count,
-                    *aggregation_handle_count_,
-                    *va_state);
+    CheckCountValue(test_count, *aggregation_handle_count_, *va_state);
 
-    aggregation_handle_count_->mergeStates(*va_state, aggregation_handle_count_state_.get());
+    aggregation_handle_count_->mergeStates(
+        *va_state, aggregation_handle_count_state_.get());
     CheckCountValue(test_count,
                     *aggregation_handle_count_,
                     *aggregation_handle_count_state_);
@@ -362,13 +385,12 @@ class AggregationHandleCountTest : public::testing::Test {
 typedef AggregationHandleCountTest AggregationHandleCountDeathTest;
 
 TEST_F(AggregationHandleCountTest, CountStarTest) {
-  checkAggregationCountNullary(0),
-  checkAggregationCountNullary(10000);
+  checkAggregationCountNullary(0), checkAggregationCountNullary(10000);
 }
 
 TEST_F(AggregationHandleCountTest, CountStarAccumulateTest) {
   checkAggregationCountNullaryAccumulate(0),
-  checkAggregationCountNullaryAccumulate(10000);
+      checkAggregationCountNullaryAccumulate(10000);
 }
 
 TEST_F(AggregationHandleCountTest, IntTypeTest) {
@@ -428,7 +450,8 @@ TEST_F(AggregationHandleCountTest, CharTypeColumnVectorTest) {
 
 TEST_F(AggregationHandleCountTest, VarCharTypeColumnVectorTest) {
   checkAggregationCountStringColumnVector<VarCharType, IndirectColumnVector>(0);
-  checkAggregationCountStringColumnVector<VarCharType, IndirectColumnVector>(10000);
+  checkAggregationCountStringColumnVector<VarCharType, IndirectColumnVector>(
+      10000);
 }
 
 #ifdef QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
@@ -458,8 +481,10 @@ TEST_F(AggregationHandleCountTest, CharTypeValueAccessorTest) {
 }
 
 TEST_F(AggregationHandleCountTest, VarCharTypeValueAccessorTest) {
-  checkAggregationCountStringValueAccessor<VarCharType, IndirectColumnVector>(0);
-  checkAggregationCountStringValueAccessor<VarCharType, IndirectColumnVector>(10000);
+  checkAggregationCountStringValueAccessor<VarCharType, IndirectColumnVector>(
+      0);
+  checkAggregationCountStringValueAccessor<VarCharType, IndirectColumnVector>(
+      10000);
 }
 #endif  // QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
 
@@ -484,25 +509,28 @@ TEST_F(AggregationHandleCountTest, GroupByTableMergeTestCount) {
   initializeHandle(&long_non_null_type);
   storage_manager_.reset(new StorageManager("./test_count_data"));
   std::unique_ptr<AggregationStateHashTableBase> source_hash_table(
-      aggregation_handle_count_->createGroupByHashTable(
-          HashTableImplType::kSimpleScalarSeparateChaining,
+      AggregationStateFastHashTableFactory::CreateResizable(
+          HashTableImplType::kSeparateChaining,
           std::vector<const Type *>(1, &long_non_null_type),
           10,
+          {aggregation_handle_count_.get()->getPayloadSize()},
+          {aggregation_handle_count_.get()},
           storage_manager_.get()));
   std::unique_ptr<AggregationStateHashTableBase> destination_hash_table(
-      aggregation_handle_count_->createGroupByHashTable(
-          HashTableImplType::kSimpleScalarSeparateChaining,
+      AggregationStateFastHashTableFactory::CreateResizable(
+          HashTableImplType::kSeparateChaining,
           std::vector<const Type *>(1, &long_non_null_type),
           10,
+          {aggregation_handle_count_.get()->getPayloadSize()},
+          {aggregation_handle_count_.get()},
           storage_manager_.get()));
 
-  AggregationStateHashTable<AggregationStateCount> *destination_hash_table_derived =
-      static_cast<AggregationStateHashTable<AggregationStateCount> *>(
+  AggregationStateFastHashTable *destination_hash_table_derived =
+      static_cast<AggregationStateFastHashTable *>(
           destination_hash_table.get());
 
-  AggregationStateHashTable<AggregationStateCount> *source_hash_table_derived =
-      static_cast<AggregationStateHashTable<AggregationStateCount> *>(
-          source_hash_table.get());
+  AggregationStateFastHashTable *source_hash_table_derived =
+      static_cast<AggregationStateFastHashTable *>(source_hash_table.get());
 
   // TODO(harshad) - Use TemplateUtil::CreateBoolInstantiatedInstance to
   // generate all the combinations of the bool template arguments and test them.
@@ -528,7 +556,8 @@ TEST_F(AggregationHandleCountTest, GroupByTableMergeTestCount) {
   TypedValue exclusive_key_source_count_val(exclusive_key_source_count);
 
   const std::int64_t exclusive_key_destination_count = 1;
-  TypedValue exclusive_key_destination_count_val(exclusive_key_destination_count);
+  TypedValue exclusive_key_destination_count_val(
+      exclusive_key_destination_count);
 
   std::unique_ptr<AggregationStateCount> common_key_source_state(
       static_cast<AggregationStateCount *>(
@@ -544,62 +573,86 @@ TEST_F(AggregationHandleCountTest, GroupByTableMergeTestCount) {
           aggregation_handle_count_->createInitialState()));
 
   // Create count value states for keys.
-  aggregation_handle_count_derived->iterateUnaryInl(common_key_source_state.get(),
-                                                  common_key_source_count_val);
-  std::int64_t actual_val = aggregation_handle_count_->finalize(*common_key_source_state)
-                       .getLiteral<std::int64_t>();
+  aggregation_handle_count_derived->iterateUnaryInl(
+      common_key_source_state.get(), common_key_source_count_val);
+  std::int64_t actual_val =
+      aggregation_handle_count_->finalize(*common_key_source_state)
+          .getLiteral<std::int64_t>();
   EXPECT_EQ(common_key_source_count_val.getLiteral<std::int64_t>(), actual_val);
 
   aggregation_handle_count_derived->iterateUnaryInl(
       common_key_destination_state.get(), common_key_destination_count_val);
-  actual_val = aggregation_handle_count_->finalize(*common_key_destination_state)
-                   .getLiteral<std::int64_t>();
-  EXPECT_EQ(common_key_destination_count_val.getLiteral<std::int64_t>(), actual_val);
+  actual_val =
+      aggregation_handle_count_->finalize(*common_key_destination_state)
+          .getLiteral<std::int64_t>();
+  EXPECT_EQ(common_key_destination_count_val.getLiteral<std::int64_t>(),
+            actual_val);
 
   aggregation_handle_count_derived->iterateUnaryInl(
-      exclusive_key_destination_state.get(), exclusive_key_destination_count_val);
+      exclusive_key_destination_state.get(),
+      exclusive_key_destination_count_val);
   actual_val =
       aggregation_handle_count_->finalize(*exclusive_key_destination_state)
           .getLiteral<std::int64_t>();
-  EXPECT_EQ(exclusive_key_destination_count_val.getLiteral<std::int64_t>(), actual_val);
+  EXPECT_EQ(exclusive_key_destination_count_val.getLiteral<std::int64_t>(),
+            actual_val);
 
   aggregation_handle_count_derived->iterateUnaryInl(
       exclusive_key_source_state.get(), exclusive_key_source_count_val);
   actual_val = aggregation_handle_count_->finalize(*exclusive_key_source_state)
                    .getLiteral<std::int64_t>();
-  EXPECT_EQ(exclusive_key_source_count_val.getLiteral<std::int64_t>(), actual_val);
+  EXPECT_EQ(exclusive_key_source_count_val.getLiteral<std::int64_t>(),
+            actual_val);
 
   // Add the key-state pairs to the hash tables.
-  source_hash_table_derived->putCompositeKey(common_key,
-                                             *common_key_source_state);
-  destination_hash_table_derived->putCompositeKey(
-      common_key, *common_key_destination_state);
-  source_hash_table_derived->putCompositeKey(exclusive_source_key,
-                                             *exclusive_key_source_state);
-  destination_hash_table_derived->putCompositeKey(
-      exclusive_destination_key, *exclusive_key_destination_state);
+  unsigned char buffer[100];
+  buffer[0] = '\0';
+  memcpy(buffer + 1,
+         common_key_source_state.get()->getPayloadAddress(),
+         aggregation_handle_count_.get()->getPayloadSize());
+  source_hash_table_derived->putCompositeKeyFast(common_key, buffer);
+
+  memcpy(buffer + 1,
+         common_key_destination_state.get()->getPayloadAddress(),
+         aggregation_handle_count_.get()->getPayloadSize());
+  destination_hash_table_derived->putCompositeKeyFast(common_key, buffer);
+
+  memcpy(buffer + 1,
+         exclusive_key_source_state.get()->getPayloadAddress(),
+         aggregation_handle_count_.get()->getPayloadSize());
+  source_hash_table_derived->putCompositeKeyFast(exclusive_source_key, buffer);
+
+  memcpy(buffer + 1,
+         exclusive_key_destination_state.get()->getPayloadAddress(),
+         aggregation_handle_count_.get()->getPayloadSize());
+  destination_hash_table_derived->putCompositeKeyFast(exclusive_destination_key,
+                                                      buffer);
 
   EXPECT_EQ(2u, destination_hash_table_derived->numEntries());
   EXPECT_EQ(2u, source_hash_table_derived->numEntries());
 
-  aggregation_handle_count_->mergeGroupByHashTables(*source_hash_table,
-                                                  destination_hash_table.get());
+  AggregationOperationState::mergeGroupByHashTables(
+      source_hash_table.get(), destination_hash_table.get());
 
   EXPECT_EQ(3u, destination_hash_table_derived->numEntries());
 
   CheckCountValue(
       common_key_destination_count_val.getLiteral<std::int64_t>() +
           common_key_source_count_val.getLiteral<std::int64_t>(),
-      *aggregation_handle_count_derived,
-      *(destination_hash_table_derived->getSingleCompositeKey(common_key)));
-  CheckCountValue(exclusive_key_destination_count_val.getLiteral<std::int64_t>(),
-                  *aggregation_handle_count_derived,
-                  *(destination_hash_table_derived->getSingleCompositeKey(
-                      exclusive_destination_key)));
+      aggregation_handle_count_derived->finalizeHashTableEntryFast(
+          destination_hash_table_derived->getSingleCompositeKey(common_key) +
+          1));
+  CheckCountValue(
+      exclusive_key_destination_count_val.getLiteral<std::int64_t>(),
+      aggregation_handle_count_derived->finalizeHashTableEntryFast(
+          destination_hash_table_derived->getSingleCompositeKey(
+              exclusive_destination_key) +
+          1));
   CheckCountValue(exclusive_key_source_count_val.getLiteral<std::int64_t>(),
-                  *aggregation_handle_count_derived,
-                  *(source_hash_table_derived->getSingleCompositeKey(
-                      exclusive_source_key)));
+                  aggregation_handle_count_derived->finalizeHashTableEntryFast(
+                      source_hash_table_derived->getSingleCompositeKey(
+                          exclusive_source_key) +
+                      1));
 }
 
 }  // namespace quickstep


[3/7] incubator-quickstep git commit: Modified Aggregation unit test. Ran clang-format.

Posted by ra...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/dad7d6f3/storage/FastHashTable.hpp
----------------------------------------------------------------------
diff --git a/storage/FastHashTable.hpp b/storage/FastHashTable.hpp
index 909fcc0..9b67734 100644
--- a/storage/FastHashTable.hpp
+++ b/storage/FastHashTable.hpp
@@ -35,8 +35,8 @@
 #include "storage/TupleReference.hpp"
 #include "storage/ValueAccessor.hpp"
 #include "storage/ValueAccessorUtil.hpp"
-#include "threading/SpinSharedMutex.hpp"
 #include "threading/SpinMutex.hpp"
+#include "threading/SpinSharedMutex.hpp"
 #include "types/Type.hpp"
 #include "types/TypedValue.hpp"
 #include "utility/BloomFilter.hpp"
@@ -115,9 +115,9 @@ template <bool resizable,
           bool force_key_copy,
           bool allow_duplicate_keys>
 class FastHashTable : public HashTableBase<resizable,
-                                       serializable,
-                                       force_key_copy,
-                                       allow_duplicate_keys> {
+                                           serializable,
+                                           force_key_copy,
+                                           allow_duplicate_keys> {
   static_assert(!(serializable && resizable && !force_key_copy),
                 "A HashTable must have force_key_copy=true when serializable "
                 "and resizable are both true.");
@@ -129,7 +129,7 @@ class FastHashTable : public HashTableBase<resizable,
 
  public:
   // Shadow template parameters. This is useful for shared test harnesses.
-//  typedef ValueT value_type;
+  //  typedef ValueT value_type;
   static constexpr bool template_resizable = resizable;
   static constexpr bool template_serializable = serializable;
   static constexpr bool template_force_key_copy = force_key_copy;
@@ -162,8 +162,9 @@ class FastHashTable : public HashTableBase<resizable,
     if (resizable) {
       if (blob_.valid()) {
         if (serializable) {
-          DEV_WARNING("Destroying a resizable serializable HashTable's underlying "
-                      "StorageBlob.");
+          DEV_WARNING(
+              "Destroying a resizable serializable HashTable's underlying "
+              "StorageBlob.");
         }
         const block_id blob_id = blob_->getID();
         blob_.release();
@@ -212,8 +213,7 @@ class FastHashTable : public HashTableBase<resizable,
    *         resizable is false and storage space for the hash table has been
    *         exhausted.
    **/
-  HashTablePutResult put(const TypedValue &key,
-                         const uint8_t &value);
+  HashTablePutResult put(const TypedValue &key, const std::uint8_t &value);
 
   /**
    * @brief Add a new entry into the hash table (composite key version).
@@ -238,7 +238,7 @@ class FastHashTable : public HashTableBase<resizable,
    **/
 
   HashTablePutResult putCompositeKeyFast(const std::vector<TypedValue> &key,
-                                     const uint8_t *value_ptr);
+                                         const std::uint8_t *value_ptr);
 
   /**
    * @brief Add (multiple) new entries into the hash table from a
@@ -378,7 +378,7 @@ class FastHashTable : public HashTableBase<resizable,
    **/
   template <typename FunctorT>
   bool upsert(const TypedValue &key,
-              const uint8_t *initial_value_ptr,
+              const std::uint8_t *initial_value_ptr,
               FunctorT *functor);
 
   /**
@@ -421,18 +421,18 @@ class FastHashTable : public HashTableBase<resizable,
    **/
   template <typename FunctorT>
   bool upsertCompositeKeyFast(const std::vector<TypedValue> &key,
-                          const uint8_t *init_value_ptr,
-                          FunctorT *functor);
+                              const std::uint8_t *init_value_ptr,
+                              FunctorT *functor);
 
   template <typename FunctorT>
   bool upsertCompositeKeyFast(const std::vector<TypedValue> &key,
-                          const uint8_t *init_value_ptr,
-                          FunctorT *functor,
-                          int index);
+                              const std::uint8_t *init_value_ptr,
+                              FunctorT *functor,
+                              int index);
 
   bool upsertCompositeKeyFast(const std::vector<TypedValue> &key,
-                          const uint8_t *init_value_ptr,
-                          const uint8_t *source_state);
+                              const std::uint8_t *init_value_ptr,
+                              const std::uint8_t *source_state);
 
   /**
    * @brief Apply a functor to (multiple) entries in this hash table, with keys
@@ -481,10 +481,11 @@ class FastHashTable : public HashTableBase<resizable,
    *         accessor's iteration will be left on the first tuple which could
    *         not be inserted).
    **/
-  bool upsertValueAccessorFast(const std::vector<std::vector<attribute_id>> &argument_ids,
-                           ValueAccessor *accessor,
-                           const attribute_id key_attr_id,
-                           const bool check_for_null_keys);
+  bool upsertValueAccessorFast(
+      const std::vector<std::vector<attribute_id>> &argument_ids,
+      ValueAccessor *accessor,
+      const attribute_id key_attr_id,
+      const bool check_for_null_keys);
 
   /**
    * @brief Apply a functor to (multiple) entries in this hash table, with keys
@@ -582,7 +583,7 @@ class FastHashTable : public HashTableBase<resizable,
    * @return The value of a matched entry if a matching key is found.
    *         Otherwise, return NULL.
    **/
-  virtual const uint8_t* getSingle(const TypedValue &key) const = 0;
+  virtual const std::uint8_t* getSingle(const TypedValue &key) const = 0;
 
   /**
    * @brief Lookup a composite key against this hash table to find a matching
@@ -607,9 +608,10 @@ class FastHashTable : public HashTableBase<resizable,
    * @return The value of a matched entry if a matching key is found.
    *         Otherwise, return NULL.
    **/
-  virtual const uint8_t* getSingleCompositeKey(const std::vector<TypedValue> &key) const = 0;
-  virtual const uint8_t* getSingleCompositeKey(const std::vector<TypedValue> &key,
-                                               int index) const = 0;
+  virtual const std::uint8_t* getSingleCompositeKey(
+      const std::vector<TypedValue> &key) const = 0;
+  virtual const std::uint8_t *getSingleCompositeKey(
+      const std::vector<TypedValue> &key, int index) const = 0;
 
   /**
    * @brief Lookup a key against this hash table to find matching entries.
@@ -634,7 +636,8 @@ class FastHashTable : public HashTableBase<resizable,
    * @param values A vector to hold values of all matching entries. Matches
    *        will be appended to the vector.
    **/
-  virtual void getAll(const TypedValue &key, std::vector<const uint8_t*> *values) const = 0;
+  virtual void getAll(const TypedValue &key,
+                      std::vector<const std::uint8_t *> *values) const = 0;
 
   /**
    * @brief Lookup a composite key against this hash table to find matching
@@ -659,8 +662,9 @@ class FastHashTable : public HashTableBase<resizable,
    * @param values A vector to hold values of all matching entries. Matches
    *        will be appended to the vector.
    **/
-  virtual void getAllCompositeKey(const std::vector<TypedValue> &key,
-                                  std::vector<const uint8_t*> *values) const = 0;
+  virtual void getAllCompositeKey(
+      const std::vector<TypedValue> &key,
+      std::vector<const std::uint8_t *> *values) const = 0;
 
   /**
    * @brief Lookup (multiple) keys from a ValueAccessor and apply a functor to
@@ -726,7 +730,8 @@ class FastHashTable : public HashTableBase<resizable,
    *        set to true if some of the keys that will be read from accessor may
    *        be null.
    * @param functor A pointer to a functor, which should provide two functions:
-   *        1) An operator that takes 2 arguments: const ValueAccessor& (or better
+   *        1) An operator that takes 2 arguments: const ValueAccessor& (or
+   * better
    *        yet, a templated call operator which takes a const reference to
    *        some subclass of ValueAccessor as its first argument) and
    *        const ValueT&. The operator will be invoked once for each pair of a
@@ -767,7 +772,8 @@ class FastHashTable : public HashTableBase<resizable,
    *        set to true if some of the keys that will be read from accessor may
    *        be null.
    * @param functor A pointer to a functor, which should provide two functions:
-   *        1) An operator that takes 2 arguments: const ValueAccessor& (or better
+   *        1) An operator that takes 2 arguments: const ValueAccessor& (or
+   * better
    *        yet, a templated call operator which takes a const reference to
    *        some subclass of ValueAccessor as its first argument) and
    *        const ValueT&. The operator will be invoked once for each pair of a
@@ -817,10 +823,11 @@ class FastHashTable : public HashTableBase<resizable,
    *        key taken from accessor and matching value.
    **/
   template <typename FunctorT>
-  void getAllFromValueAccessorCompositeKey(ValueAccessor *accessor,
-                                           const std::vector<attribute_id> &key_attr_ids,
-                                           const bool check_for_null_keys,
-                                           FunctorT *functor) const;
+  void getAllFromValueAccessorCompositeKey(
+      ValueAccessor *accessor,
+      const std::vector<attribute_id> &key_attr_ids,
+      const bool check_for_null_keys,
+      FunctorT *functor) const;
 
   /**
    * @brief Apply the functor to each key with a match in the hash table.
@@ -842,10 +849,8 @@ class FastHashTable : public HashTableBase<resizable,
                                                 const attribute_id key_attr_id,
                                                 const bool check_for_null_keys,
                                                 FunctorT *functor) const {
-    return runOverKeysFromValueAccessor<true>(accessor,
-                                              key_attr_id,
-                                              check_for_null_keys,
-                                              functor);
+    return runOverKeysFromValueAccessor<true>(
+        accessor, key_attr_id, check_for_null_keys, functor);
   }
 
   /**
@@ -869,10 +874,8 @@ class FastHashTable : public HashTableBase<resizable,
       const std::vector<attribute_id> &key_attr_ids,
       const bool check_for_null_keys,
       FunctorT *functor) const {
-    return runOverKeysFromValueAccessorCompositeKey<true>(accessor,
-                                                          key_attr_ids,
-                                                          check_for_null_keys,
-                                                          functor);
+    return runOverKeysFromValueAccessorCompositeKey<true>(
+        accessor, key_attr_ids, check_for_null_keys, functor);
   }
 
   /**
@@ -896,10 +899,8 @@ class FastHashTable : public HashTableBase<resizable,
       const attribute_id key_attr_id,
       const bool check_for_null_keys,
       FunctorT *functor) const {
-    return runOverKeysFromValueAccessor<false>(accessor,
-                                               key_attr_id,
-                                               check_for_null_keys,
-                                               functor);
+    return runOverKeysFromValueAccessor<false>(
+        accessor, key_attr_id, check_for_null_keys, functor);
   }
 
   /**
@@ -923,10 +924,8 @@ class FastHashTable : public HashTableBase<resizable,
       const std::vector<attribute_id> &key_attr_ids,
       const bool check_for_null_keys,
       FunctorT *functor) const {
-    return runOverKeysFromValueAccessorCompositeKey<false>(accessor,
-                                                           key_attr_ids,
-                                                           check_for_null_keys,
-                                                           functor);
+    return runOverKeysFromValueAccessorCompositeKey<false>(
+        accessor, key_attr_ids, check_for_null_keys, functor);
   }
 
   /**
@@ -983,8 +982,7 @@ class FastHashTable : public HashTableBase<resizable,
   std::size_t forEachCompositeKeyFast(FunctorT *functor) const;
 
   template <typename FunctorT>
-  std::size_t forEachCompositeKeyFast(FunctorT *functor,
-                                      int index) const;
+  std::size_t forEachCompositeKeyFast(FunctorT *functor, int index) const;
 
   /**
    * @brief A call to this function will cause a bloom filter to be built
@@ -1037,7 +1035,8 @@ class FastHashTable : public HashTableBase<resizable,
    * @param probe_attribute_ids The vector of attribute ids to use for probing
    *        the bloom filter.
    **/
-  inline void addProbeSideAttributeIds(std::vector<attribute_id> &&probe_attribute_ids) {
+  inline void addProbeSideAttributeIds(
+      std::vector<attribute_id> &&probe_attribute_ids) {
     probe_attribute_ids_.push_back(probe_attribute_ids);
   }
 
@@ -1065,30 +1064,32 @@ class FastHashTable : public HashTableBase<resizable,
    *        pass when bulk-inserting entries. If false, resources are allocated
    *        on the fly for each entry.
    **/
-  FastHashTable(const std::vector<const Type*> &key_types,
-            const std::size_t num_entries,
-            const std::vector<AggregationHandle *> &handles,
-            const std::vector<std::size_t> &payload_sizes,
-            StorageManager *storage_manager,
-            const bool adjust_hashes,
-            const bool use_scalar_literal_hash,
-            const bool preallocate_supported)
-        : key_types_(key_types),
-          scalar_key_inline_(true),
-          key_inline_(nullptr),
-          adjust_hashes_(adjust_hashes),
-          use_scalar_literal_hash_(use_scalar_literal_hash),
-          preallocate_supported_(preallocate_supported),
-          handles_(handles),
-          total_payload_size_(std::accumulate(payload_sizes.begin(), payload_sizes.end(), sizeof(SpinMutex))),
-          storage_manager_(storage_manager),
-          hash_table_memory_(nullptr),
-          hash_table_memory_size_(0) {
+  FastHashTable(const std::vector<const Type *> &key_types,
+                const std::size_t num_entries,
+                const std::vector<AggregationHandle *> &handles,
+                const std::vector<std::size_t> &payload_sizes,
+                StorageManager *storage_manager,
+                const bool adjust_hashes,
+                const bool use_scalar_literal_hash,
+                const bool preallocate_supported)
+      : key_types_(key_types),
+        scalar_key_inline_(true),
+        key_inline_(nullptr),
+        adjust_hashes_(adjust_hashes),
+        use_scalar_literal_hash_(use_scalar_literal_hash),
+        preallocate_supported_(preallocate_supported),
+        handles_(handles),
+        num_handles_(handles.size()),
+        total_payload_size_(std::accumulate(
+            payload_sizes.begin(), payload_sizes.end(), sizeof(SpinMutex))),
+        storage_manager_(storage_manager),
+        hash_table_memory_(nullptr),
+        hash_table_memory_size_(0) {
     DEBUG_ASSERT(resizable);
     std::size_t running_sum = sizeof(SpinMutex);
     for (auto size : payload_sizes) {
-        payload_offsets_.emplace_back(running_sum);
-        running_sum+=size;
+      payload_offsets_.emplace_back(running_sum);
+      running_sum += size;
     }
   }
 
@@ -1122,14 +1123,14 @@ class FastHashTable : public HashTableBase<resizable,
    *        pass when bulk-inserting entries. If false, resources are allocated
    *        on the fly for each entry.
    **/
-  FastHashTable(const std::vector<const Type*> &key_types,
-            void *hash_table_memory,
-            const std::size_t hash_table_memory_size,
-            const bool new_hash_table,
-            const bool hash_table_memory_zeroed,
-            const bool adjust_hashes,
-            const bool use_scalar_literal_hash,
-            const bool preallocate_supported)
+  FastHashTable(const std::vector<const Type *> &key_types,
+                void *hash_table_memory,
+                const std::size_t hash_table_memory_size,
+                const bool new_hash_table,
+                const bool hash_table_memory_zeroed,
+                const bool adjust_hashes,
+                const bool use_scalar_literal_hash,
+                const bool preallocate_supported)
       : key_types_(key_types),
         scalar_key_inline_(true),
         key_inline_(nullptr),
@@ -1169,16 +1170,17 @@ class FastHashTable : public HashTableBase<resizable,
 
   // Helpers for put. If this HashTable is resizable, 'resize_shared_mutex_'
   // should be locked in shared mode before calling either of these methods.
-  virtual HashTablePutResult putInternal(const TypedValue &key,
-                                         const std::size_t variable_key_size,
-                                         const uint8_t &value,
-                                         HashTablePreallocationState *prealloc_state) = 0;
-
-  virtual HashTablePutResult putCompositeKeyInternalFast(const std::vector<TypedValue> &key,
-                                                     const std::size_t variable_key_size,
-                                                     const std::uint8_t *init_value_ptr,
-                                                     HashTablePreallocationState *prealloc_state) = 0;
-
+  virtual HashTablePutResult putInternal(
+      const TypedValue &key,
+      const std::size_t variable_key_size,
+      const std::uint8_t &value,
+      HashTablePreallocationState *prealloc_state) = 0;
+
+  virtual HashTablePutResult putCompositeKeyInternalFast(
+      const std::vector<TypedValue> &key,
+      const std::size_t variable_key_size,
+      const std::uint8_t *init_value_ptr,
+      HashTablePreallocationState *prealloc_state) = 0;
 
   // Helpers for upsert. Both return a pointer to the value corresponding to
   // 'key'. If this HashTable is resizable, 'resize_shared_mutex_' should be
@@ -1186,13 +1188,15 @@ class FastHashTable : public HashTableBase<resizable,
   // return NULL if there is not enough space to insert a new key, in which
   // case a resizable HashTable should release the 'resize_shared_mutex_' and
   // call resize(), then try again.
-  virtual uint8_t* upsertInternalFast(const TypedValue &key,
-                                 const std::size_t variable_key_size,
-                                 const std::uint8_t *init_value_ptr) = 0;
+  virtual std::uint8_t *upsertInternalFast(
+      const TypedValue &key,
+      const std::size_t variable_key_size,
+      const std::uint8_t *init_value_ptr) = 0;
 
-  virtual uint8_t* upsertCompositeKeyInternalFast(const std::vector<TypedValue> &key,
-                                                  const std::uint8_t *init_value_ptr,
-                                                  const std::size_t variable_key_size) = 0;
+  virtual std::uint8_t *upsertCompositeKeyInternalFast(
+      const std::vector<TypedValue> &key,
+      const std::uint8_t *init_value_ptr,
+      const std::size_t variable_key_size) = 0;
 
   // Helpers for forEach. Each return true on success, false if no more entries
   // exist to iterate over. After a successful call, '*key' is overwritten with
@@ -1200,10 +1204,10 @@ class FastHashTable : public HashTableBase<resizable,
   // '*entry_num' is incremented to the next (implementation defined) entry to
   // check ('*entry_num' should initially be set to zero).
   virtual bool getNextEntry(TypedValue *key,
-                            const uint8_t **value,
+                            const std::uint8_t **value,
                             std::size_t *entry_num) const = 0;
   virtual bool getNextEntryCompositeKey(std::vector<TypedValue> *key,
-                                        const uint8_t **value,
+                                        const std::uint8_t **value,
                                         std::size_t *entry_num) const = 0;
 
   // Helpers for getAllFromValueAccessor. Each return true on success, false if
@@ -1213,11 +1217,11 @@ class FastHashTable : public HashTableBase<resizable,
   // initially be set to zero).
   virtual bool getNextEntryForKey(const TypedValue &key,
                                   const std::size_t hash_code,
-                                  const uint8_t **value,
+                                  const std::uint8_t **value,
                                   std::size_t *entry_num) const = 0;
   virtual bool getNextEntryForCompositeKey(const std::vector<TypedValue> &key,
                                            const std::size_t hash_code,
-                                           const uint8_t **value,
+                                           const std::uint8_t **value,
                                            std::size_t *entry_num) const = 0;
 
   // Return true if key exists in the hash table.
@@ -1250,15 +1254,17 @@ class FastHashTable : public HashTableBase<resizable,
   // method is intended to support that. Returns true and fills in
   // '*prealloc_state' if pre-allocation was successful. Returns false if a
   // resize() is needed.
-  virtual bool preallocateForBulkInsert(const std::size_t total_entries,
-                                        const std::size_t total_variable_key_size,
-                                        HashTablePreallocationState *prealloc_state) {
-    FATAL_ERROR("Called HashTable::preallocateForBulkInsert() on a HashTable "
-                "implementation that does not support preallocation.");
+  virtual bool preallocateForBulkInsert(
+      const std::size_t total_entries,
+      const std::size_t total_variable_key_size,
+      HashTablePreallocationState *prealloc_state) {
+    FATAL_ERROR(
+        "Called HashTable::preallocateForBulkInsert() on a HashTable "
+        "implementation that does not support preallocation.");
   }
 
   // Type(s) of keys.
-  const std::vector<const Type*> key_types_;
+  const std::vector<const Type *> key_types_;
 
   // Information about whether key components are stored inline or in a
   // separate variable-length storage region. This is usually determined by a
@@ -1275,6 +1281,7 @@ class FastHashTable : public HashTableBase<resizable,
   const bool preallocate_supported_;
 
   const std::vector<AggregationHandle *> handles_;
+  const unsigned int num_handles_;
   const std::size_t total_payload_size_;
   std::vector<std::size_t> payload_offsets_;
 
@@ -1341,12 +1348,11 @@ class FastHashTable : public HashTableBase<resizable,
   bool has_build_side_bloom_filter_ = false;
   bool has_probe_side_bloom_filter_ = false;
   BloomFilter *build_bloom_filter_;
-  std::vector<const BloomFilter*> probe_bloom_filters_;
+  std::vector<const BloomFilter *> probe_bloom_filters_;
   std::vector<std::vector<attribute_id>> probe_attribute_ids_;
   DISALLOW_COPY_AND_ASSIGN(FastHashTable);
 };
 
-
 /**
  * @brief An instantiation of the HashTable template for use in aggregations.
  * @note This has force_key_copy = true, so that we don't have dangling pointers
@@ -1363,11 +1369,11 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-HashTablePutResult FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::put(const TypedValue &key,
-          const uint8_t &value) {
-  const std::size_t variable_size = (force_key_copy && !scalar_key_inline_) ? key.getDataSize()
-                                                                            : 0;
+HashTablePutResult
+FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>::
+    put(const TypedValue &key, const std::uint8_t &value) {
+  const std::size_t variable_size =
+      (force_key_copy && !scalar_key_inline_) ? key.getDataSize() : 0;
   if (resizable) {
     HashTablePutResult result = HashTablePutResult::kOutOfSpace;
     while (result == HashTablePutResult::kOutOfSpace) {
@@ -1389,16 +1395,19 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-HashTablePutResult FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::putCompositeKeyFast(const std::vector<TypedValue> &key,
-                      const std::uint8_t* init_value_ptr) {
-  const std::size_t variable_size = calculateVariableLengthCompositeKeyCopySize(key);
+HashTablePutResult
+FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>::
+    putCompositeKeyFast(const std::vector<TypedValue> &key,
+                        const std::uint8_t *init_value_ptr) {
+  const std::size_t variable_size =
+      calculateVariableLengthCompositeKeyCopySize(key);
   if (resizable) {
     HashTablePutResult result = HashTablePutResult::kOutOfSpace;
     while (result == HashTablePutResult::kOutOfSpace) {
       {
         SpinSharedMutexSharedLock<true> lock(resize_shared_mutex_);
-        result = putCompositeKeyInternalFast(key, variable_size, init_value_ptr, nullptr);
+        result = putCompositeKeyInternalFast(
+            key, variable_size, init_value_ptr, nullptr);
       }
       if (result == HashTablePutResult::kOutOfSpace) {
         resize(0, variable_size);
@@ -1406,21 +1415,22 @@ HashTablePutResult FastHashTable<resizable, serializable, force_key_copy, allow_
     }
     return result;
   } else {
-    return putCompositeKeyInternalFast(key, variable_size, init_value_ptr, nullptr);
+    return putCompositeKeyInternalFast(
+        key, variable_size, init_value_ptr, nullptr);
   }
 }
 
-
 template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
 template <typename FunctorT>
-HashTablePutResult FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::putValueAccessor(ValueAccessor *accessor,
-                       const attribute_id key_attr_id,
-                       const bool check_for_null_keys,
-                       FunctorT *functor) {
+HashTablePutResult
+FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>::
+    putValueAccessor(ValueAccessor *accessor,
+                     const attribute_id key_attr_id,
+                     const bool check_for_null_keys,
+                     FunctorT *functor) {
   HashTablePutResult result = HashTablePutResult::kOutOfSpace;
   std::size_t variable_size;
   HashTablePreallocationState prealloc_state;
@@ -1428,111 +1438,120 @@ HashTablePutResult FastHashTable<resizable, serializable, force_key_copy, allow_
   return InvokeOnAnyValueAccessor(
       accessor,
       [&](auto *accessor) -> HashTablePutResult {  // NOLINT(build/c++11)
-    if (using_prealloc) {
-      std::size_t total_entries = 0;
-      std::size_t total_variable_key_size = 0;
-      if (check_for_null_keys || (force_key_copy && !scalar_key_inline_)) {
-        // If we need to filter out nulls OR make variable copies, make a
-        // prepass over the ValueAccessor.
-        while (accessor->next()) {
-          TypedValue key = accessor->getTypedValue(key_attr_id);
-          if (check_for_null_keys && key.isNull()) {
-            continue;
-          }
-          ++total_entries;
-          total_variable_key_size += (force_key_copy && !scalar_key_inline_) ? key.getDataSize() : 0;
-        }
-        accessor->beginIteration();
-      } else {
-        total_entries = accessor->getNumTuples();
-      }
-      if (resizable) {
-        bool prealloc_succeeded = false;
-        while (!prealloc_succeeded) {
-          {
-            SpinSharedMutexSharedLock<true> lock(resize_shared_mutex_);
-            prealloc_succeeded = this->preallocateForBulkInsert(total_entries,
-                                                                total_variable_key_size,
-                                                                &prealloc_state);
+        if (using_prealloc) {
+          std::size_t total_entries = 0;
+          std::size_t total_variable_key_size = 0;
+          if (check_for_null_keys || (force_key_copy && !scalar_key_inline_)) {
+            // If we need to filter out nulls OR make variable copies, make a
+            // prepass over the ValueAccessor.
+            while (accessor->next()) {
+              TypedValue key = accessor->getTypedValue(key_attr_id);
+              if (check_for_null_keys && key.isNull()) {
+                continue;
+              }
+              ++total_entries;
+              total_variable_key_size += (force_key_copy && !scalar_key_inline_)
+                                             ? key.getDataSize()
+                                             : 0;
+            }
+            accessor->beginIteration();
+          } else {
+            total_entries = accessor->getNumTuples();
           }
-          if (!prealloc_succeeded) {
-            this->resize(total_entries, total_variable_key_size);
+          if (resizable) {
+            bool prealloc_succeeded = false;
+            while (!prealloc_succeeded) {
+              {
+                SpinSharedMutexSharedLock<true> lock(resize_shared_mutex_);
+                prealloc_succeeded = this->preallocateForBulkInsert(
+                    total_entries, total_variable_key_size, &prealloc_state);
+              }
+              if (!prealloc_succeeded) {
+                this->resize(total_entries, total_variable_key_size);
+              }
+            }
+          } else {
+            using_prealloc = this->preallocateForBulkInsert(
+                total_entries, total_variable_key_size, &prealloc_state);
           }
         }
-      } else {
-        using_prealloc = this->preallocateForBulkInsert(total_entries,
-                                                        total_variable_key_size,
-                                                        &prealloc_state);
-      }
-    }
-    std::unique_ptr<BloomFilter> thread_local_bloom_filter;
-    if (has_build_side_bloom_filter_) {
-      thread_local_bloom_filter.reset(new BloomFilter(build_bloom_filter_->getRandomSeed(),
-                                                      build_bloom_filter_->getNumberOfHashes(),
-                                                      build_bloom_filter_->getBitArraySize()));
-    }
-    if (resizable) {
-      while (result == HashTablePutResult::kOutOfSpace) {
-        {
-          result = HashTablePutResult::kOK;
-          SpinSharedMutexSharedLock<true> lock(resize_shared_mutex_);
+        std::unique_ptr<BloomFilter> thread_local_bloom_filter;
+        if (has_build_side_bloom_filter_) {
+          thread_local_bloom_filter.reset(
+              new BloomFilter(build_bloom_filter_->getRandomSeed(),
+                              build_bloom_filter_->getNumberOfHashes(),
+                              build_bloom_filter_->getBitArraySize()));
+        }
+        if (resizable) {
+          while (result == HashTablePutResult::kOutOfSpace) {
+            {
+              result = HashTablePutResult::kOK;
+              SpinSharedMutexSharedLock<true> lock(resize_shared_mutex_);
+              while (accessor->next()) {
+                TypedValue key = accessor->getTypedValue(key_attr_id);
+                if (check_for_null_keys && key.isNull()) {
+                  continue;
+                }
+                variable_size = (force_key_copy && !scalar_key_inline_)
+                                    ? key.getDataSize()
+                                    : 0;
+                result = this->putInternal(
+                    key,
+                    variable_size,
+                    (*functor)(*accessor),
+                    using_prealloc ? &prealloc_state : nullptr);
+                // Insert into bloom filter, if enabled.
+                if (has_build_side_bloom_filter_) {
+                  thread_local_bloom_filter->insertUnSafe(
+                      static_cast<const std::uint8_t *>(key.getDataPtr()),
+                      key.getDataSize());
+                }
+                if (result == HashTablePutResult::kDuplicateKey) {
+                  DEBUG_ASSERT(!using_prealloc);
+                  return result;
+                } else if (result == HashTablePutResult::kOutOfSpace) {
+                  DEBUG_ASSERT(!using_prealloc);
+                  break;
+                }
+              }
+            }
+            if (result == HashTablePutResult::kOutOfSpace) {
+              this->resize(0, variable_size);
+              accessor->previous();
+            }
+          }
+        } else {
           while (accessor->next()) {
             TypedValue key = accessor->getTypedValue(key_attr_id);
             if (check_for_null_keys && key.isNull()) {
               continue;
             }
-            variable_size = (force_key_copy && !scalar_key_inline_) ? key.getDataSize() : 0;
-            result = this->putInternal(key,
-                                       variable_size,
-                                       (*functor)(*accessor),
-                                       using_prealloc ? &prealloc_state : nullptr);
+            variable_size =
+                (force_key_copy && !scalar_key_inline_) ? key.getDataSize() : 0;
+            result =
+                this->putInternal(key,
+                                  variable_size,
+                                  (*functor)(*accessor),
+                                  using_prealloc ? &prealloc_state : nullptr);
             // Insert into bloom filter, if enabled.
             if (has_build_side_bloom_filter_) {
-              thread_local_bloom_filter->insertUnSafe(static_cast<const std::uint8_t *>(key.getDataPtr()),
-                                                      key.getDataSize());
+              thread_local_bloom_filter->insertUnSafe(
+                  static_cast<const std::uint8_t *>(key.getDataPtr()),
+                  key.getDataSize());
             }
-            if (result == HashTablePutResult::kDuplicateKey) {
-              DEBUG_ASSERT(!using_prealloc);
+            if (result != HashTablePutResult::kOK) {
               return result;
-            } else if (result == HashTablePutResult::kOutOfSpace) {
-              DEBUG_ASSERT(!using_prealloc);
-              break;
             }
           }
         }
-        if (result == HashTablePutResult::kOutOfSpace) {
-          this->resize(0, variable_size);
-          accessor->previous();
-        }
-      }
-    } else {
-      while (accessor->next()) {
-        TypedValue key = accessor->getTypedValue(key_attr_id);
-        if (check_for_null_keys && key.isNull()) {
-          continue;
-        }
-        variable_size = (force_key_copy && !scalar_key_inline_) ? key.getDataSize() : 0;
-        result = this->putInternal(key,
-                                   variable_size,
-                                   (*functor)(*accessor),
-                                   using_prealloc ? &prealloc_state : nullptr);
-        // Insert into bloom filter, if enabled.
+        // Update the build side bloom filter with thread local copy, if
+        // available.
         if (has_build_side_bloom_filter_) {
-          thread_local_bloom_filter->insertUnSafe(static_cast<const std::uint8_t *>(key.getDataPtr()),
-                                                  key.getDataSize());
+          build_bloom_filter_->bitwiseOr(thread_local_bloom_filter.get());
         }
-        if (result != HashTablePutResult::kOK) {
-          return result;
-        }
-      }
-    }
-    // Update the build side bloom filter with thread local copy, if available.
-    if (has_build_side_bloom_filter_) {
-      build_bloom_filter_->bitwiseOr(thread_local_bloom_filter.get());
-    }
 
-    return HashTablePutResult::kOK;
-  });
+        return HashTablePutResult::kOK;
+      });
 }
 
 template <bool resizable,
@@ -1540,11 +1559,12 @@ template <bool resizable,
           bool force_key_copy,
           bool allow_duplicate_keys>
 template <typename FunctorT>
-HashTablePutResult FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::putValueAccessorCompositeKey(ValueAccessor *accessor,
-                                   const std::vector<attribute_id> &key_attr_ids,
-                                   const bool check_for_null_keys,
-                                   FunctorT *functor) {
+HashTablePutResult
+FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>::
+    putValueAccessorCompositeKey(ValueAccessor *accessor,
+                                 const std::vector<attribute_id> &key_attr_ids,
+                                 const bool check_for_null_keys,
+                                 FunctorT *functor) {
   DEBUG_ASSERT(key_types_.size() == key_attr_ids.size());
   HashTablePutResult result = HashTablePutResult::kOutOfSpace;
   std::size_t variable_size;
@@ -1555,50 +1575,79 @@ HashTablePutResult FastHashTable<resizable, serializable, force_key_copy, allow_
   return InvokeOnAnyValueAccessor(
       accessor,
       [&](auto *accessor) -> HashTablePutResult {  // NOLINT(build/c++11)
-    if (using_prealloc) {
-      std::size_t total_entries = 0;
-      std::size_t total_variable_key_size = 0;
-      if (check_for_null_keys || force_key_copy) {
-        // If we need to filter out nulls OR make variable copies, make a
-        // prepass over the ValueAccessor.
-        while (accessor->next()) {
-          if (this->GetCompositeKeyFromValueAccessor(*accessor,
-                                                     key_attr_ids,
-                                                     check_for_null_keys,
-                                                     &key_vector)) {
-            continue;
-          }
-          ++total_entries;
-          total_variable_key_size += this->calculateVariableLengthCompositeKeyCopySize(key_vector);
-        }
-        accessor->beginIteration();
-      } else {
-        total_entries = accessor->getNumTuples();
-      }
-      if (resizable) {
-        bool prealloc_succeeded = false;
-        while (!prealloc_succeeded) {
-          {
-            SpinSharedMutexSharedLock<true> lock(resize_shared_mutex_);
-            prealloc_succeeded = this->preallocateForBulkInsert(total_entries,
-                                                                total_variable_key_size,
-                                                                &prealloc_state);
+        if (using_prealloc) {
+          std::size_t total_entries = 0;
+          std::size_t total_variable_key_size = 0;
+          if (check_for_null_keys || force_key_copy) {
+            // If we need to filter out nulls OR make variable copies, make a
+            // prepass over the ValueAccessor.
+            while (accessor->next()) {
+              if (this->GetCompositeKeyFromValueAccessor(*accessor,
+                                                         key_attr_ids,
+                                                         check_for_null_keys,
+                                                         &key_vector)) {
+                continue;
+              }
+              ++total_entries;
+              total_variable_key_size +=
+                  this->calculateVariableLengthCompositeKeyCopySize(key_vector);
+            }
+            accessor->beginIteration();
+          } else {
+            total_entries = accessor->getNumTuples();
           }
-          if (!prealloc_succeeded) {
-            this->resize(total_entries, total_variable_key_size);
+          if (resizable) {
+            bool prealloc_succeeded = false;
+            while (!prealloc_succeeded) {
+              {
+                SpinSharedMutexSharedLock<true> lock(resize_shared_mutex_);
+                prealloc_succeeded = this->preallocateForBulkInsert(
+                    total_entries, total_variable_key_size, &prealloc_state);
+              }
+              if (!prealloc_succeeded) {
+                this->resize(total_entries, total_variable_key_size);
+              }
+            }
+          } else {
+            using_prealloc = this->preallocateForBulkInsert(
+                total_entries, total_variable_key_size, &prealloc_state);
           }
         }
-      } else {
-        using_prealloc = this->preallocateForBulkInsert(total_entries,
-                                                        total_variable_key_size,
-                                                        &prealloc_state);
-      }
-    }
-    if (resizable) {
-      while (result == HashTablePutResult::kOutOfSpace) {
-        {
-          result = HashTablePutResult::kOK;
-          SpinSharedMutexSharedLock<true> lock(resize_shared_mutex_);
+        if (resizable) {
+          while (result == HashTablePutResult::kOutOfSpace) {
+            {
+              result = HashTablePutResult::kOK;
+              SpinSharedMutexSharedLock<true> lock(resize_shared_mutex_);
+              while (accessor->next()) {
+                if (this->GetCompositeKeyFromValueAccessor(*accessor,
+                                                           key_attr_ids,
+                                                           check_for_null_keys,
+                                                           &key_vector)) {
+                  continue;
+                }
+                variable_size =
+                    this->calculateVariableLengthCompositeKeyCopySize(
+                        key_vector);
+                result = this->putCompositeKeyInternal(
+                    key_vector,
+                    variable_size,
+                    (*functor)(*accessor),
+                    using_prealloc ? &prealloc_state : nullptr);
+                if (result == HashTablePutResult::kDuplicateKey) {
+                  DEBUG_ASSERT(!using_prealloc);
+                  return result;
+                } else if (result == HashTablePutResult::kOutOfSpace) {
+                  DEBUG_ASSERT(!using_prealloc);
+                  break;
+                }
+              }
+            }
+            if (result == HashTablePutResult::kOutOfSpace) {
+              this->resize(0, variable_size);
+              accessor->previous();
+            }
+          }
+        } else {
           while (accessor->next()) {
             if (this->GetCompositeKeyFromValueAccessor(*accessor,
                                                        key_attr_ids,
@@ -1606,46 +1655,21 @@ HashTablePutResult FastHashTable<resizable, serializable, force_key_copy, allow_
                                                        &key_vector)) {
               continue;
             }
-            variable_size = this->calculateVariableLengthCompositeKeyCopySize(key_vector);
-            result = this->putCompositeKeyInternal(key_vector,
-                                                   variable_size,
-                                                   (*functor)(*accessor),
-                                                   using_prealloc ? &prealloc_state : nullptr);
-            if (result == HashTablePutResult::kDuplicateKey) {
-              DEBUG_ASSERT(!using_prealloc);
+            variable_size =
+                this->calculateVariableLengthCompositeKeyCopySize(key_vector);
+            result = this->putCompositeKeyInternal(
+                key_vector,
+                variable_size,
+                (*functor)(*accessor),
+                using_prealloc ? &prealloc_state : nullptr);
+            if (result != HashTablePutResult::kOK) {
               return result;
-            } else if (result == HashTablePutResult::kOutOfSpace) {
-              DEBUG_ASSERT(!using_prealloc);
-              break;
             }
           }
         }
-        if (result == HashTablePutResult::kOutOfSpace) {
-          this->resize(0, variable_size);
-          accessor->previous();
-        }
-      }
-    } else {
-      while (accessor->next()) {
-        if (this->GetCompositeKeyFromValueAccessor(*accessor,
-                                                   key_attr_ids,
-                                                   check_for_null_keys,
-                                                   &key_vector)) {
-          continue;
-        }
-        variable_size = this->calculateVariableLengthCompositeKeyCopySize(key_vector);
-        result = this->putCompositeKeyInternal(key_vector,
-                                               variable_size,
-                                               (*functor)(*accessor),
-                                               using_prealloc ? &prealloc_state : nullptr);
-        if (result != HashTablePutResult::kOK) {
-          return result;
-        }
-      }
-    }
 
-    return HashTablePutResult::kOK;
-  });
+        return HashTablePutResult::kOK;
+      });
 }
 
 template <bool resizable,
@@ -1653,17 +1677,22 @@ template <bool resizable,
           bool force_key_copy,
           bool allow_duplicate_keys>
 template <typename FunctorT>
-bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::upsert(const TypedValue &key,
-             const uint8_t *initial_value_ptr,
-             FunctorT *functor) {
+bool FastHashTable<resizable,
+                   serializable,
+                   force_key_copy,
+                   allow_duplicate_keys>::upsert(const TypedValue &key,
+                                                 const std::uint8_t
+                                                     *initial_value_ptr,
+                                                 FunctorT *functor) {
   DEBUG_ASSERT(!allow_duplicate_keys);
-  const std::size_t variable_size = (force_key_copy && !scalar_key_inline_) ? key.getDataSize() : 0;
+  const std::size_t variable_size =
+      (force_key_copy && !scalar_key_inline_) ? key.getDataSize() : 0;
   if (resizable) {
     for (;;) {
       {
         SpinSharedMutexSharedLock<true> resize_lock(resize_shared_mutex_);
-        uint8_t *value = upsertInternalFast(key, variable_size, initial_value_ptr);
+        std::uint8_t *value =
+            upsertInternalFast(key, variable_size, initial_value_ptr);
         if (value != nullptr) {
           (*functor)(value);
           return true;
@@ -1672,7 +1701,8 @@ bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys
       resize(0, force_key_copy && !scalar_key_inline_ ? key.getDataSize() : 0);
     }
   } else {
-    uint8_t *value = upsertInternalFast(key, variable_size, initial_value_ptr);
+    std::uint8_t *value =
+        upsertInternalFast(key, variable_size, initial_value_ptr);
     if (value == nullptr) {
       return false;
     } else {
@@ -1691,8 +1721,11 @@ class HashTableMergerFast {
    * @param destination_hash_table The destination hash table to which other
    *        hash tables will be merged.
    **/
-  explicit HashTableMergerFast(AggregationStateHashTableBase *destination_hash_table)
-      : destination_hash_table_(static_cast<FastHashTable<true, false, true, false> *>(destination_hash_table)) {}
+  explicit HashTableMergerFast(
+      AggregationStateHashTableBase *destination_hash_table)
+      : destination_hash_table_(
+            static_cast<FastHashTable<true, false, true, false> *>(
+                destination_hash_table)) {}
 
   /**
    * @brief The operator for the functor.
@@ -1702,8 +1735,8 @@ class HashTableMergerFast {
    *        aggregation hash table.
    **/
   inline void operator()(const std::vector<TypedValue> &group_by_key,
-                         const uint8_t *source_state) {
-    const uint8_t *original_state =
+                         const std::uint8_t *source_state) {
+    const std::uint8_t *original_state =
         destination_hash_table_->getSingleCompositeKey(group_by_key);
     if (original_state != nullptr) {
       // The CHECK is required as upsertCompositeKey can return false if the
@@ -1722,23 +1755,27 @@ class HashTableMergerFast {
   DISALLOW_COPY_AND_ASSIGN(HashTableMergerFast);
 };
 
-
 template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
 template <typename FunctorT>
-bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::upsertCompositeKeyFast(const std::vector<TypedValue> &key,
-                         const std::uint8_t *init_value_ptr,
-                         FunctorT *functor) {
+bool FastHashTable<resizable,
+                   serializable,
+                   force_key_copy,
+                   allow_duplicate_keys>::
+    upsertCompositeKeyFast(const std::vector<TypedValue> &key,
+                           const std::uint8_t *init_value_ptr,
+                           FunctorT *functor) {
   DEBUG_ASSERT(!allow_duplicate_keys);
-  const std::size_t variable_size = calculateVariableLengthCompositeKeyCopySize(key);
+  const std::size_t variable_size =
+      calculateVariableLengthCompositeKeyCopySize(key);
   if (resizable) {
     for (;;) {
       {
         SpinSharedMutexSharedLock<true> resize_lock(resize_shared_mutex_);
-        uint8_t *value = upsertCompositeKeyInternalFast(key, init_value_ptr, variable_size);
+        std::uint8_t *value =
+            upsertCompositeKeyInternalFast(key, init_value_ptr, variable_size);
         if (value != nullptr) {
           (*functor)(value);
           return true;
@@ -1747,7 +1784,8 @@ bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys
       resize(0, variable_size);
     }
   } else {
-    uint8_t *value = upsertCompositeKeyInternalFast(key, init_value_ptr, variable_size);
+    std::uint8_t *value =
+        upsertCompositeKeyInternalFast(key, init_value_ptr, variable_size);
     if (value == nullptr) {
       return false;
     } else {
@@ -1762,70 +1800,83 @@ template <bool resizable,
           bool force_key_copy,
           bool allow_duplicate_keys>
 template <typename FunctorT>
-bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::upsertCompositeKeyFast(const std::vector<TypedValue> &key,
-                         const std::uint8_t *init_value_ptr,
-                         FunctorT *functor,
-                         int index) {
+bool FastHashTable<resizable,
+                   serializable,
+                   force_key_copy,
+                   allow_duplicate_keys>::
+    upsertCompositeKeyFast(const std::vector<TypedValue> &key,
+                           const std::uint8_t *init_value_ptr,
+                           FunctorT *functor,
+                           int index) {
   DEBUG_ASSERT(!allow_duplicate_keys);
-  const std::size_t variable_size = calculateVariableLengthCompositeKeyCopySize(key);
+  const std::size_t variable_size =
+      calculateVariableLengthCompositeKeyCopySize(key);
   if (resizable) {
     for (;;) {
       {
         SpinSharedMutexSharedLock<true> resize_lock(resize_shared_mutex_);
-        uint8_t *value = upsertCompositeKeyInternalFast(key, init_value_ptr, variable_size);
+        std::uint8_t *value =
+            upsertCompositeKeyInternalFast(key, init_value_ptr, variable_size);
         if (value != nullptr) {
-          (*functor)(value+payload_offsets_[index]);
+          (*functor)(value + payload_offsets_[index]);
           return true;
         }
       }
       resize(0, variable_size);
     }
   } else {
-    uint8_t *value = upsertCompositeKeyInternalFast(key, init_value_ptr, variable_size);
+    std::uint8_t *value =
+        upsertCompositeKeyInternalFast(key, init_value_ptr, variable_size);
     if (value == nullptr) {
       return false;
     } else {
-      (*functor)(value+payload_offsets_[index]);
+      (*functor)(value + payload_offsets_[index]);
       return true;
     }
   }
 }
 
-
 template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::upsertCompositeKeyFast(const std::vector<TypedValue> &key,
-                         const std::uint8_t *init_value_ptr,
-                         const std::uint8_t *source_state) {
+bool FastHashTable<resizable,
+                   serializable,
+                   force_key_copy,
+                   allow_duplicate_keys>::
+    upsertCompositeKeyFast(const std::vector<TypedValue> &key,
+                           const std::uint8_t *init_value_ptr,
+                           const std::uint8_t *source_state) {
   DEBUG_ASSERT(!allow_duplicate_keys);
-  const std::size_t variable_size = calculateVariableLengthCompositeKeyCopySize(key);
+  const std::size_t variable_size =
+      calculateVariableLengthCompositeKeyCopySize(key);
   if (resizable) {
     for (;;) {
       {
         SpinSharedMutexSharedLock<true> resize_lock(resize_shared_mutex_);
-        uint8_t *value = upsertCompositeKeyInternalFast(key, init_value_ptr, variable_size);
+        std::uint8_t *value =
+            upsertCompositeKeyInternalFast(key, init_value_ptr, variable_size);
         if (value != nullptr) {
-            SpinMutexLock lock(*(reinterpret_cast<SpinMutex *>(value)));
-            for (unsigned int k = 0; k < handles_.size(); ++k) {
-                handles_[k]->mergeStatesFast(source_state + payload_offsets_[k], value + payload_offsets_[k]);
-            }
+          SpinMutexLock lock(*(reinterpret_cast<SpinMutex *>(value)));
+          for (unsigned int k = 0; k < num_handles_; ++k) {
+            handles_[k]->mergeStatesFast(source_state + payload_offsets_[k],
+                                         value + payload_offsets_[k]);
+          }
           return true;
         }
       }
       resize(0, variable_size);
     }
   } else {
-    uint8_t *value = upsertCompositeKeyInternalFast(key, init_value_ptr, variable_size);
+    std::uint8_t *value =
+        upsertCompositeKeyInternalFast(key, init_value_ptr, variable_size);
     if (value == nullptr) {
       return false;
     } else {
       SpinMutexLock lock(*(reinterpret_cast<SpinMutex *>(value)));
-      for (unsigned int k = 0; k < handles_.size(); ++k) {
-          handles_[k]->mergeStatesFast(source_state + payload_offsets_[k], value + payload_offsets_[k]);
+      for (unsigned int k = 0; k < num_handles_; ++k) {
+        handles_[k]->mergeStatesFast(source_state + payload_offsets_[k],
+                                     value + payload_offsets_[k]);
       }
       return true;
     }
@@ -1836,86 +1887,102 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::upsertValueAccessorFast(const std::vector<std::vector<attribute_id>> &argument_ids,
-                          ValueAccessor *accessor,
-                          const attribute_id key_attr_id,
-                          const bool check_for_null_keys) {
+bool FastHashTable<resizable,
+                   serializable,
+                   force_key_copy,
+                   allow_duplicate_keys>::
+    upsertValueAccessorFast(
+        const std::vector<std::vector<attribute_id>> &argument_ids,
+        ValueAccessor *accessor,
+        const attribute_id key_attr_id,
+        const bool check_for_null_keys) {
   DEBUG_ASSERT(!allow_duplicate_keys);
   std::size_t variable_size;
   std::vector<TypedValue> local;
   return InvokeOnAnyValueAccessor(
       accessor,
       [&](auto *accessor) -> bool {  // NOLINT(build/c++11)
-    if (resizable) {
-      bool continuing = true;
-      while (continuing) {
-        {
-          continuing = false;
-          SpinSharedMutexSharedLock<true> lock(resize_shared_mutex_);
+        if (resizable) {
+          bool continuing = true;
+          while (continuing) {
+            {
+              continuing = false;
+              SpinSharedMutexSharedLock<true> lock(resize_shared_mutex_);
+              while (accessor->next()) {
+                TypedValue key = accessor->getTypedValue(key_attr_id);
+                if (check_for_null_keys && key.isNull()) {
+                  continue;
+                }
+                variable_size = (force_key_copy && !scalar_key_inline_)
+                                    ? key.getDataSize()
+                                    : 0;
+                std::uint8_t *value =
+                    this->upsertInternalFast(key, variable_size, nullptr);
+                if (value == nullptr) {
+                  continuing = true;
+                  break;
+                } else {
+                  SpinMutexLock lock(*(reinterpret_cast<SpinMutex *>(value)));
+                  for (unsigned int k = 0; k < num_handles_; ++k) {
+                    local.clear();
+                    if (argument_ids[k].size()) {
+                      local.emplace_back(
+                          accessor->getTypedValue(argument_ids[k].front()));
+                    }
+                    handles_[k]->updateState(local,
+                                             value + payload_offsets_[k]);
+                  }
+                }
+              }
+            }
+            if (continuing) {
+              this->resize(0, variable_size);
+              accessor->previous();
+            }
+          }
+        } else {
           while (accessor->next()) {
             TypedValue key = accessor->getTypedValue(key_attr_id);
             if (check_for_null_keys && key.isNull()) {
               continue;
             }
-            variable_size = (force_key_copy && !scalar_key_inline_) ? key.getDataSize() : 0;
-            uint8_t *value = this->upsertInternalFast(key, variable_size, nullptr);
+            variable_size =
+                (force_key_copy && !scalar_key_inline_) ? key.getDataSize() : 0;
+            std::uint8_t *value =
+                this->upsertInternalFast(key, variable_size, nullptr);
             if (value == nullptr) {
-              continuing = true;
-              break;
+              return false;
             } else {
               SpinMutexLock lock(*(reinterpret_cast<SpinMutex *>(value)));
-              for (unsigned int k = 0; k < handles_.size(); ++k) {
-                  local.clear();
-                  if (argument_ids[k].size()) {
-                    local.emplace_back(accessor->getTypedValue(argument_ids[k].front()));
-                  }
-                  handles_[k]->iterateInlFast(local, value + payload_offsets_[k]);
+              for (unsigned int k = 0; k < num_handles_; ++k) {
+                local.clear();
+                if (argument_ids[k].size()) {
+                  local.emplace_back(
+                      accessor->getTypedValue(argument_ids[k].front()));
+                }
+                handles_[k]->updateState(local, value + payload_offsets_[k]);
               }
             }
           }
         }
-        if (continuing) {
-          this->resize(0, variable_size);
-          accessor->previous();
-        }
-      }
-    } else {
-      while (accessor->next()) {
-        TypedValue key = accessor->getTypedValue(key_attr_id);
-        if (check_for_null_keys && key.isNull()) {
-          continue;
-        }
-        variable_size = (force_key_copy && !scalar_key_inline_) ? key.getDataSize() : 0;
-        uint8_t *value = this->upsertInternalFast(key, variable_size, nullptr);
-        if (value == nullptr) {
-          return false;
-        } else {
-          SpinMutexLock lock(*(reinterpret_cast<SpinMutex *>(value)));
-          for (unsigned int k = 0; k < handles_.size(); ++k) {
-              local.clear();
-              if (argument_ids[k].size()) {
-                 local.emplace_back(accessor->getTypedValue(argument_ids[k].front()));
-              }
-              handles_[k]->iterateInlFast(local, value + payload_offsets_[k]);
-          }
-        }
-      }
-    }
 
-    return true;
-  });
+        return true;
+      });
 }
 
 template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::upsertValueAccessorCompositeKeyFast(const std::vector<std::vector<attribute_id>> &argument_ids,
-                                      ValueAccessor *accessor,
-                                      const std::vector<attribute_id> &key_attr_ids,
-                                      const bool check_for_null_keys) {
+bool FastHashTable<resizable,
+                   serializable,
+                   force_key_copy,
+                   allow_duplicate_keys>::
+    upsertValueAccessorCompositeKeyFast(
+        const std::vector<std::vector<attribute_id>> &argument_ids,
+        ValueAccessor *accessor,
+        const std::vector<attribute_id> &key_attr_ids,
+        const bool check_for_null_keys) {
   DEBUG_ASSERT(!allow_duplicate_keys);
   std::size_t variable_size;
   std::vector<TypedValue> key_vector;
@@ -1924,12 +1991,47 @@ bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys
   return InvokeOnAnyValueAccessor(
       accessor,
       [&](auto *accessor) -> bool {  // NOLINT(build/c++11)
-    if (resizable) {
-      bool continuing = true;
-      while (continuing) {
-        {
-          continuing = false;
-          SpinSharedMutexSharedLock<true> lock(resize_shared_mutex_);
+        if (resizable) {
+          bool continuing = true;
+          while (continuing) {
+            {
+              continuing = false;
+              SpinSharedMutexSharedLock<true> lock(resize_shared_mutex_);
+              while (accessor->next()) {
+                if (this->GetCompositeKeyFromValueAccessor(*accessor,
+                                                           key_attr_ids,
+                                                           check_for_null_keys,
+                                                           &key_vector)) {
+                  continue;
+                }
+                variable_size =
+                    this->calculateVariableLengthCompositeKeyCopySize(
+                        key_vector);
+                std::uint8_t *value = this->upsertCompositeKeyInternalFast(
+                    key_vector, nullptr, variable_size);
+                if (value == nullptr) {
+                  continuing = true;
+                  break;
+                } else {
+                  SpinMutexLock lock(*(reinterpret_cast<SpinMutex *>(value)));
+                  for (unsigned int k = 0; k < num_handles_; ++k) {
+                    local.clear();
+                    if (argument_ids[k].size()) {
+                      local.emplace_back(
+                          accessor->getTypedValue(argument_ids[k].front()));
+                    }
+                    handles_[k]->updateState(local,
+                                             value + payload_offsets_[k]);
+                  }
+                }
+              }
+            }
+            if (continuing) {
+              this->resize(0, variable_size);
+              accessor->previous();
+            }
+          }
+        } else {
           while (accessor->next()) {
             if (this->GetCompositeKeyFromValueAccessor(*accessor,
                                                        key_attr_ids,
@@ -1937,59 +2039,28 @@ bool FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys
                                                        &key_vector)) {
               continue;
             }
-            variable_size = this->calculateVariableLengthCompositeKeyCopySize(key_vector);
-            uint8_t *value = this->upsertCompositeKeyInternalFast(key_vector,
-                                                             nullptr,
-                                                             variable_size);
+            variable_size =
+                this->calculateVariableLengthCompositeKeyCopySize(key_vector);
+            std::uint8_t *value = this->upsertCompositeKeyInternalFast(
+                key_vector, nullptr, variable_size);
             if (value == nullptr) {
-              continuing = true;
-              break;
+              return false;
             } else {
               SpinMutexLock lock(*(reinterpret_cast<SpinMutex *>(value)));
-              for (unsigned int k = 0; k < handles_.size(); ++k) {
-                  local.clear();
-                  if (argument_ids[k].size()) {
-                      local.emplace_back(accessor->getTypedValue(argument_ids[k].front()));
-                  }
-                  handles_[k]->iterateInlFast(local, value + payload_offsets_[k]);
+              for (unsigned int k = 0; k < num_handles_; ++k) {
+                local.clear();
+                if (argument_ids[k].size()) {
+                  local.emplace_back(
+                      accessor->getTypedValue(argument_ids[k].front()));
+                }
+                handles_[k]->updateState(local, value + payload_offsets_[k]);
               }
             }
           }
         }
-        if (continuing) {
-          this->resize(0, variable_size);
-          accessor->previous();
-        }
-      }
-    } else {
-      while (accessor->next()) {
-        if (this->GetCompositeKeyFromValueAccessor(*accessor,
-                                                   key_attr_ids,
-                                                   check_for_null_keys,
-                                                   &key_vector)) {
-          continue;
-        }
-        variable_size = this->calculateVariableLengthCompositeKeyCopySize(key_vector);
-        uint8_t *value = this->upsertCompositeKeyInternalFast(key_vector,
-                                                         nullptr,
-                                                         variable_size);
-        if (value == nullptr) {
-          return false;
-        } else {
-          SpinMutexLock lock(*(reinterpret_cast<SpinMutex *>(value)));
-          for (unsigned int k = 0; k < handles_.size(); ++k) {
-              local.clear();
-              if (argument_ids[k].size()) {
-                 local.emplace_back(accessor->getTypedValue(argument_ids[k].front()));
-              }
-              handles_[k]->iterateInlFast(local, value + payload_offsets_[k]);
-          }
-        }
-      }
-    }
 
-    return true;
-  });
+        return true;
+      });
 }
 
 template <bool resizable,
@@ -1997,11 +2068,14 @@ template <bool resizable,
           bool force_key_copy,
           bool allow_duplicate_keys>
 template <typename FunctorT>
-void FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getAllFromValueAccessor(ValueAccessor *accessor,
-                              const attribute_id key_attr_id,
-                              const bool check_for_null_keys,
-                              FunctorT *functor) const {
+void FastHashTable<resizable,
+                   serializable,
+                   force_key_copy,
+                   allow_duplicate_keys>::
+    getAllFromValueAccessor(ValueAccessor *accessor,
+                            const attribute_id key_attr_id,
+                            const bool check_for_null_keys,
+                            FunctorT *functor) const {
   // Pass through to method with additional template parameters for less
   // branching in inner loop.
   if (check_for_null_keys) {
@@ -2048,45 +2122,52 @@ template <bool resizable,
           bool force_key_copy,
           bool allow_duplicate_keys>
 template <typename FunctorT>
-void FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getAllFromValueAccessorCompositeKey(ValueAccessor *accessor,
-                                          const std::vector<attribute_id> &key_attr_ids,
-                                          const bool check_for_null_keys,
-                                          FunctorT *functor) const {
+void FastHashTable<resizable,
+                   serializable,
+                   force_key_copy,
+                   allow_duplicate_keys>::
+    getAllFromValueAccessorCompositeKey(
+        ValueAccessor *accessor,
+        const std::vector<attribute_id> &key_attr_ids,
+        const bool check_for_null_keys,
+        FunctorT *functor) const {
   DEBUG_ASSERT(key_types_.size() == key_attr_ids.size());
   std::vector<TypedValue> key_vector;
   key_vector.resize(key_attr_ids.size());
   InvokeOnAnyValueAccessor(
       accessor,
       [&](auto *accessor) -> void {  // NOLINT(build/c++11)
-    while (accessor->next()) {
-      bool null_key = false;
-      for (std::vector<attribute_id>::size_type key_idx = 0;
-           key_idx < key_types_.size();
-           ++key_idx) {
-        key_vector[key_idx] = accessor->getTypedValue(key_attr_ids[key_idx]);
-        if (check_for_null_keys && key_vector[key_idx].isNull()) {
-          null_key = true;
-          break;
-        }
-      }
-      if (null_key) {
-        continue;
-      }
+        while (accessor->next()) {
+          bool null_key = false;
+          for (std::vector<attribute_id>::size_type key_idx = 0;
+               key_idx < key_types_.size();
+               ++key_idx) {
+            key_vector[key_idx] =
+                accessor->getTypedValue(key_attr_ids[key_idx]);
+            if (check_for_null_keys && key_vector[key_idx].isNull()) {
+              null_key = true;
+              break;
+            }
+          }
+          if (null_key) {
+            continue;
+          }
 
-      const std::size_t hash_code
-          = adjust_hashes_ ? this->AdjustHash(this->hashCompositeKey(key_vector))
-                           : this->hashCompositeKey(key_vector);
-      std::size_t entry_num = 0;
-      const uint8_t *value;
-      while (this->getNextEntryForCompositeKey(key_vector, hash_code, &value, &entry_num)) {
-        (*functor)(*accessor, *value);
-        if (!allow_duplicate_keys) {
-          break;
+          const std::size_t hash_code =
+              adjust_hashes_
+                  ? this->AdjustHash(this->hashCompositeKey(key_vector))
+                  : this->hashCompositeKey(key_vector);
+          std::size_t entry_num = 0;
+          const std::uint8_t *value;
+          while (this->getNextEntryForCompositeKey(
+              key_vector, hash_code, &value, &entry_num)) {
+            (*functor)(*accessor, *value);
+            if (!allow_duplicate_keys) {
+              break;
+            }
+          }
         }
-      }
-    }
-  });
+      });
 }
 
 template <bool resizable,
@@ -2095,9 +2176,9 @@ template <bool resizable,
           bool allow_duplicate_keys>
 template <typename FunctorT>
 void FastHashTable<resizable,
-               serializable,
-               force_key_copy,
-               allow_duplicate_keys>::
+                   serializable,
+                   force_key_copy,
+                   allow_duplicate_keys>::
     getAllFromValueAccessorWithExtraWorkForFirstMatch(
         ValueAccessor *accessor,
         const attribute_id key_attr_id,
@@ -2106,29 +2187,34 @@ void FastHashTable<resizable,
   InvokeOnAnyValueAccessor(
       accessor,
       [&](auto *accessor) -> void {  // NOLINT(build/c++11)
-    while (accessor->next()) {
-      TypedValue key = accessor->getTypedValue(key_attr_id);
-      if (check_for_null_keys && key.isNull()) {
-        continue;
-      }
-      const std::size_t hash_code =
-          adjust_hashes_ ? FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-                               ::AdjustHash(key.getHash())
-                         : key.getHash();
-      std::size_t entry_num = 0;
-      const uint8_t *value;
-      if (this->getNextEntryForKey(key, hash_code, &value, &entry_num)) {
-        functor->recordMatch(*accessor);
-        (*functor)(*accessor, *value);
-        if (!allow_duplicate_keys) {
-           continue;
-        }
-        while (this->getNextEntryForKey(key, hash_code, &value, &entry_num)) {
-          (*functor)(*accessor, *value);
+        while (accessor->next()) {
+          TypedValue key = accessor->getTypedValue(key_attr_id);
+          if (check_for_null_keys && key.isNull()) {
+            continue;
+          }
+          const std::size_t hash_code =
+              adjust_hashes_
+                  ? FastHashTable<
+                        resizable,
+                        serializable,
+                        force_key_copy,
+                        allow_duplicate_keys>::AdjustHash(key.getHash())
+                  : key.getHash();
+          std::size_t entry_num = 0;
+          const std::uint8_t *value;
+          if (this->getNextEntryForKey(key, hash_code, &value, &entry_num)) {
+            functor->recordMatch(*accessor);
+            (*functor)(*accessor, *value);
+            if (!allow_duplicate_keys) {
+              continue;
+            }
+            while (
+                this->getNextEntryForKey(key, hash_code, &value, &entry_num)) {
+              (*functor)(*accessor, *value);
+            }
+          }
         }
-      }
-    }
-  });  // NOLINT(whitespace/parens)
+      });  // NOLINT(whitespace/parens)
 }
 
 template <bool resizable,
@@ -2136,8 +2222,11 @@ template <bool resizable,
           bool force_key_copy,
           bool allow_duplicate_keys>
 template <typename FunctorT>
-void FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getAllFromValueAccessorCompositeKeyWithExtraWorkForFirstMatch(
+void FastHashTable<resizable,
+                   serializable,
+                   force_key_copy,
+                   allow_duplicate_keys>::
+    getAllFromValueAccessorCompositeKeyWithExtraWorkForFirstMatch(
         ValueAccessor *accessor,
         const std::vector<attribute_id> &key_attr_ids,
         const bool check_for_null_keys,
@@ -2148,39 +2237,46 @@ void FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys
   InvokeOnAnyValueAccessor(
       accessor,
       [&](auto *accessor) -> void {  // NOLINT(build/c++11)
-    while (accessor->next()) {
-      bool null_key = false;
-      for (std::vector<attribute_id>::size_type key_idx = 0;
-           key_idx < key_types_.size();
-           ++key_idx) {
-        key_vector[key_idx] = accessor->getTypedValue(key_attr_ids[key_idx]);
-        if (check_for_null_keys && key_vector[key_idx].isNull()) {
-          null_key = true;
-          break;
-        }
-      }
-      if (null_key) {
-        continue;
-      }
+        while (accessor->next()) {
+          bool null_key = false;
+          for (std::vector<attribute_id>::size_type key_idx = 0;
+               key_idx < key_types_.size();
+               ++key_idx) {
+            key_vector[key_idx] =
+                accessor->getTypedValue(key_attr_ids[key_idx]);
+            if (check_for_null_keys && key_vector[key_idx].isNull()) {
+              null_key = true;
+              break;
+            }
+          }
+          if (null_key) {
+            continue;
+          }
 
-      const std::size_t hash_code =
-          adjust_hashes_ ? FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-                               ::AdjustHash(this->hashCompositeKey(key_vector))
-                         : this->hashCompositeKey(key_vector);
-      std::size_t entry_num = 0;
-      const uint8_t *value;
-      if (this->getNextEntryForCompositeKey(key_vector, hash_code, &value, &entry_num)) {
-        functor->recordMatch(*accessor);
-        (*functor)(*accessor, *value);
-        if (!allow_duplicate_keys) {
-          continue;
-        }
-        while (this->getNextEntryForCompositeKey(key_vector, hash_code, &value, &entry_num)) {
-          (*functor)(*accessor, *value);
+          const std::size_t hash_code =
+              adjust_hashes_
+                  ? FastHashTable<resizable,
+                                  serializable,
+                                  force_key_copy,
+                                  allow_duplicate_keys>::
+                        AdjustHash(this->hashCompositeKey(key_vector))
+                  : this->hashCompositeKey(key_vector);
+          std::size_t entry_num = 0;
+          const std::uint8_t *value;
+          if (this->getNextEntryForCompositeKey(
+                  key_vector, hash_code, &value, &entry_num)) {
+            functor->recordMatch(*accessor);
+            (*functor)(*accessor, *value);
+            if (!allow_duplicate_keys) {
+              continue;
+            }
+            while (this->getNextEntryForCompositeKey(
+                key_vector, hash_code, &value, &entry_num)) {
+              (*functor)(*accessor, *value);
+            }
+          }
         }
-      }
-    }
-  });  // NOLINT(whitespace/parens)
+      });  // NOLINT(whitespace/parens)
 }
 
 template <bool resizable,
@@ -2189,35 +2285,35 @@ template <bool resizable,
           bool allow_duplicate_keys>
 template <bool run_if_match_found, typename FunctorT>
 void FastHashTable<resizable,
-               serializable,
-               force_key_copy,
-               allow_duplicate_keys>::
+                   serializable,
+                   force_key_copy,
+                   allow_duplicate_keys>::
     runOverKeysFromValueAccessor(ValueAccessor *accessor,
                                  const attribute_id key_attr_id,
                                  const bool check_for_null_keys,
                                  FunctorT *functor) const {
-  InvokeOnAnyValueAccessor(
-      accessor,
-      [&](auto *accessor) -> void {  // NOLINT(build/c++11)
-    while (accessor->next()) {
-      TypedValue key = accessor->getTypedValue(key_attr_id);
-      if (check_for_null_keys && key.isNull()) {
-        if (!run_if_match_found) {
-          (*functor)(*accessor);
-          continue;
-        }
-      }
-      if (run_if_match_found) {
-        if (this->hasKey(key)) {
-          (*functor)(*accessor);
-        }
-      } else {
-        if (!this->hasKey(key)) {
-          (*functor)(*accessor);
-        }
-      }
-    }
-  });  // NOLINT(whitespace/parens)
+  InvokeOnAnyValueAccessor(accessor,
+                           [&](auto *accessor) -> void {  // NOLINT(build/c++11)
+                             while (accessor->next()) {
+                               TypedValue key =
+                                   accessor->getTypedValue(key_attr_id);
+                               if (check_for_null_keys && key.isNull()) {
+                                 if (!run_if_match_found) {
+                                   (*functor)(*accessor);
+                                   continue;
+                                 }
+                               }
+                               if (run_if_match_found) {
+                                 if (this->hasKey(key)) {
+                                   (*functor)(*accessor);
+                                 }
+                               } else {
+                                 if (!this->hasKey(key)) {
+                                   (*functor)(*accessor);
+                                 }
+                               }
+                             }
+                           });  // NOLINT(whitespace/parens)
 }
 
 template <bool resizable,
@@ -2225,44 +2321,49 @@ template <bool resizable,
           bool force_key_copy,
           bool allow_duplicate_keys>
 template <bool run_if_match_found, typename FunctorT>
-void FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::runOverKeysFromValueAccessorCompositeKey(ValueAccessor *accessor,
-                                               const std::vector<attribute_id> &key_attr_ids,
-                                               const bool check_for_null_keys,
-                                               FunctorT *functor) const {
+void FastHashTable<resizable,
+                   serializable,
+                   force_key_copy,
+                   allow_duplicate_keys>::
+    runOverKeysFromValueAccessorCompositeKey(
+        ValueAccessor *accessor,
+        const std::vector<attribute_id> &key_attr_ids,
+        const bool check_for_null_keys,
+        FunctorT *functor) const {
   DEBUG_ASSERT(key_types_.size() == key_attr_ids.size());
   std::vector<TypedValue> key_vector;
   key_vector.resize(key_attr_ids.size());
   InvokeOnAnyValueAccessor(
       accessor,
       [&](auto *accessor) -> void {  // NOLINT(build/c++11)
-    while (accessor->next()) {
-      bool null_key = false;
-      for (std::vector<attribute_id>::size_type key_idx = 0;
-           key_idx < key_types_.size();
-           ++key_idx) {
-        key_vector[key_idx] = accessor->getTypedValue(key_attr_ids[key_idx]);
-        if (check_for_null_keys && key_vector[key_idx].isNull()) {
-          null_key = true;
-          break;
-        }
-      }
-      if (null_key) {
-        if (!run_if_match_found) {
-          (*functor)(*accessor);
-          continue;
-        }
-      }
+        while (accessor->next()) {
+          bool null_key = false;
+          for (std::vector<attribute_id>::size_type key_idx = 0;
+               key_idx < key_types_.size();
+               ++key_idx) {
+            key_vector[key_idx] =
+                accessor->getTypedValue(key_attr_ids[key_idx]);
+            if (check_for_null_keys && key_vector[key_idx].isNull()) {
+              null_key = true;
+              break;
+            }
+          }
+          if (null_key) {
+            if (!run_if_match_found) {
+              (*functor)(*accessor);
+              continue;
+            }
+          }
 
-      if (run_if_match_found) {
-        if (this->hasCompositeKey(key_vector)) {
-          (*functor)(*accessor);
+          if (run_if_match_found) {
+            if (this->hasCompositeKey(key_vector)) {
+              (*functor)(*accessor);
+            }
+          } else if (!this->hasCompositeKey(key_vector)) {
+            (*functor)(*accessor);
+          }
         }
-      } else if (!this->hasCompositeKey(key_vector)) {
-        (*functor)(*accessor);
-      }
-    }
-  });  // NOLINT(whitespace/parens)
+      });  // NOLINT(whitespace/parens)
 }
 
 template <bool resizable,
@@ -2270,12 +2371,13 @@ template <bool resizable,
           bool force_key_copy,
           bool allow_duplicate_keys>
 template <typename FunctorT>
-std::size_t FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::forEach(FunctorT *functor) const {
+std::size_t
+FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>::
+    forEach(FunctorT *functor) const {
   std::size_t entries_visited = 0;
   std::size_t entry_num = 0;
   TypedValue key;
-  const uint8_t *value_ptr;
+  const std::uint8_t *value_ptr;
   while (getNextEntry(&key, &value_ptr, &entry_num)) {
     ++entries_visited;
     (*functor)(key, *value_ptr);
@@ -2288,12 +2390,13 @@ template <bool resizable,
           bool force_key_copy,
           bool allow_duplicate_keys>
 template <typename FunctorT>
-std::size_t FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::forEachCompositeKeyFast(FunctorT *functor) const {
+std::size_t
+FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>::
+    forEachCompositeKeyFast(FunctorT *functor) const {
   std::size_t entries_visited = 0;
   std::size_t entry_num = 0;
   std::vector<TypedValue> key;
-  const uint8_t *value_ptr;
+  const std::uint8_t *value_ptr;
   while (getNextEntryCompositeKey(&key, &value_ptr, &entry_num)) {
     ++entries_visited;
     (*functor)(key, value_ptr);
@@ -2302,22 +2405,21 @@ std::size_t FastHashTable<resizable, serializable, force_key_copy, allow_duplica
   return entries_visited;
 }
 
-
 template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
 template <typename FunctorT>
-std::size_t FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::forEachCompositeKeyFast(FunctorT *functor,
-                              int index) const {
+std::size_t
+FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>::
+    forEachCompositeKeyFast(FunctorT *functor, int index) const {
   std::size_t entries_visited = 0;
   std::size_t entry_num = 0;
   std::vector<TypedValue> key;
-  const uint8_t *value_ptr;
+  const std::uint8_t *value_ptr;
   while (getNextEntryCompositeKey(&key, &value_ptr, &entry_num)) {
     ++entries_visited;
-    (*functor)(key, value_ptr+payload_offsets_[index]);
+    (*functor)(key, value_ptr + payload_offsets_[index]);
     key.clear();
   }
   return entries_visited;
@@ -2327,8 +2429,9 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-inline std::size_t FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::hashCompositeKey(const std::vector<TypedValue> &key) const {
+inline std::size_t
+FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>::
+    hashCompositeKey(const std::vector<TypedValue> &key) const {
   DEBUG_ASSERT(!key.empty());
   DEBUG_ASSERT(key.size() == key_types_.size());
   std::size_t hash = key.front().getHash();
@@ -2344,15 +2447,15 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-inline std::size_t FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::calculateVariableLengthCompositeKeyCopySize(const std::vector<TypedValue> &key) const {
+inline std::size_t
+FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>::
+    calculateVariableLengthCompositeKeyCopySize(
+        const std::vector<TypedValue> &key) const {
   DEBUG_ASSERT(!key.empty());
   DEBUG_ASSERT(key.size() == key_types_.size());
   if (force_key_copy) {
     std::size_t total = 0;
-    for (std::vector<TypedValue>::size_type idx = 0;
-         idx < key.size();
-         ++idx) {
+    for (std::vector<TypedValue>::size_type idx = 0; idx < key.size(); ++idx) {
       if (!(*key_inline_)[idx]) {
         total += key[idx].getDataSize();
       }
@@ -2371,54 +2474,62 @@ template <typename FunctorT,
           bool check_for_null_keys,
           bool adjust_hashes_template,
           bool use_scalar_literal_hash_template>
-void FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getAllFromValueAccessorImpl(
-        ValueAccessor *accessor,
-        const attribute_id key_attr_id,
-        FunctorT *functor) const {
+void FastHashTable<resizable,
+                   serializable,
+                   force_key_copy,
+                   allow_duplicate_keys>::
+    getAllFromValueAccessorImpl(ValueAccessor *accessor,
+                                const attribute_id key_attr_id,
+                                FunctorT *functor) const {
   InvokeOnAnyValueAccessor(
       accessor,
       [&](auto *accessor) -> void {  // NOLINT(build/c++11)
-    while (accessor->next()) {
-      // Probe any bloom filters, if enabled.
-      if (has_probe_side_bloom_filter_) {
-        DCHECK_EQ(probe_bloom_filters_.size(), probe_attribute_ids_.size());
-        // Check if the key is contained in the BloomFilters or not.
-        bool bloom_miss = false;
-        for (std::size_t i = 0; i < probe_bloom_filters_.size() && !bloom_miss; ++i) {
-          const BloomFilter *bloom_filter = probe_bloom_filters_[i];
-          for (const attribute_id &attr_id : probe_attribute_ids_[i]) {
-            TypedValue bloom_key = accessor->getTypedValue(attr_id);
-            if (!bloom_filter->contains(static_cast<const std::uint8_t*>(bloom_key.getDataPtr()),
-                                        bloom_key.getDataSize())) {
-              bloom_miss = true;
-              break;
+        while (accessor->next()) {
+          // Probe any bloom filters, if enabled.
+          if (has_probe_side_bloom_filter_) {
+            DCHECK_EQ(probe_bloom_filters_.size(), probe_attribute_ids_.size());
+            // Check if the key is contained in the BloomFilters or not.
+            bool bloom_miss = false;
+            for (std::size_t i = 0;
+                 i < probe_bloom_filters_.size() && !bloom_miss;
+                 ++i) {
+              const BloomFilter *bloom_filter = probe_bloom_filters_[i];
+              for (const attribute_id &attr_id : probe_attribute_ids_[i]) {
+                TypedValue bloom_key = accessor->getTypedValue(attr_id);
+                if (!bloom_filter->contains(static_cast<const std::uint8_t *>(
+                                                bloom_key.getDataPtr()),
+                                            bloom_key.getDataSize())) {
+                  bloom_miss = true;
+                  break;
+                }
+              }
+            }
+            if (bloom_miss) {
+              continue;  

<TRUNCATED>