You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@quickstep.apache.org by hb...@apache.org on 2016/09/20 17:56:51 UTC

[13/20] incubator-quickstep git commit: Modified Aggregation unit test. Ran clang-format.

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/c123bd49/storage/FastSeparateChainingHashTable.hpp
----------------------------------------------------------------------
diff --git a/storage/FastSeparateChainingHashTable.hpp b/storage/FastSeparateChainingHashTable.hpp
index 0670993..886a8ca 100644
--- a/storage/FastSeparateChainingHashTable.hpp
+++ b/storage/FastSeparateChainingHashTable.hpp
@@ -27,8 +27,8 @@
 #include <utility>
 #include <vector>
 
-#include "storage/HashTable.hpp"
 #include "storage/FastHashTable.hpp"
+#include "storage/HashTable.hpp"
 #include "storage/HashTableBase.hpp"
 #include "storage/HashTableKeyManager.hpp"
 #include "storage/StorageBlob.hpp"
@@ -55,43 +55,42 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-class FastSeparateChainingHashTable : public FastHashTable<resizable,
-                                                   serializable,
-                                                   force_key_copy,
-                                                   allow_duplicate_keys> {
+class FastSeparateChainingHashTable
+    : public FastHashTable<resizable,
+                           serializable,
+                           force_key_copy,
+                           allow_duplicate_keys> {
  public:
-  FastSeparateChainingHashTable(const std::vector<const Type*> &key_types,
-                            const std::size_t num_entries,
-                            const std::vector<std::size_t> &payload_sizes,
-                            const std::vector<AggregationHandle *> &handles,
-                            StorageManager *storage_manager);
-
-  FastSeparateChainingHashTable(const std::vector<const Type*> &key_types,
-                            void *hash_table_memory,
-                            const std::size_t hash_table_memory_size,
-                            const bool new_hash_table,
-                            const bool hash_table_memory_zeroed);
+  FastSeparateChainingHashTable(const std::vector<const Type *> &key_types,
+                                const std::size_t num_entries,
+                                const std::vector<std::size_t> &payload_sizes,
+                                const std::vector<AggregationHandle *> &handles,
+                                StorageManager *storage_manager);
+
+  FastSeparateChainingHashTable(const std::vector<const Type *> &key_types,
+                                void *hash_table_memory,
+                                const std::size_t hash_table_memory_size,
+                                const bool new_hash_table,
+                                const bool hash_table_memory_zeroed);
 
   // Delegating constructors for single scalar keys.
   FastSeparateChainingHashTable(const Type &key_type,
-                            const std::size_t num_entries,
-                            StorageManager *storage_manager)
-      : FastSeparateChainingHashTable(std::vector<const Type*>(1, &key_type),
-                                  num_entries,
-                                  storage_manager) {
-  }
+                                const std::size_t num_entries,
+                                StorageManager *storage_manager)
+      : FastSeparateChainingHashTable(std::vector<const Type *>(1, &key_type),
+                                      num_entries,
+                                      storage_manager) {}
 
   FastSeparateChainingHashTable(const Type &key_type,
-                            void *hash_table_memory,
-                            const std::size_t hash_table_memory_size,
-                            const bool new_hash_table,
-                            const bool hash_table_memory_zeroed)
-      : FastSeparateChainingHashTable(std::vector<const Type*>(1, &key_type),
-                                  hash_table_memory,
-                                  hash_table_memory_size,
-                                  new_hash_table,
-                                  hash_table_memory_zeroed) {
-  }
+                                void *hash_table_memory,
+                                const std::size_t hash_table_memory_size,
+                                const bool new_hash_table,
+                                const bool hash_table_memory_zeroed)
+      : FastSeparateChainingHashTable(std::vector<const Type *>(1, &key_type),
+                                      hash_table_memory,
+                                      hash_table_memory_size,
+                                      new_hash_table,
+                                      hash_table_memory_zeroed) {}
 
   ~FastSeparateChainingHashTable() override {
     DestroyValues(buckets_,
@@ -106,48 +105,54 @@ class FastSeparateChainingHashTable : public FastHashTable<resizable,
     return header_->buckets_allocated.load(std::memory_order_relaxed);
   }
 
-  const uint8_t* getSingle(const TypedValue &key) const override;
-  const uint8_t* getSingleCompositeKey(const std::vector<TypedValue> &key) const override;
-  const uint8_t* getSingleCompositeKey(const std::vector<TypedValue> &key, int index) const override;
+  const std::uint8_t* getSingle(const TypedValue &key) const override;
+  const std::uint8_t* getSingleCompositeKey(
+      const std::vector<TypedValue> &key) const override;
+  const std::uint8_t* getSingleCompositeKey(const std::vector<TypedValue> &key,
+                                            int index) const override;
 
   void getAll(const TypedValue &key,
-              std::vector<const uint8_t*> *values) const override;
-  void getAllCompositeKey(const std::vector<TypedValue> &key,
-                          std::vector<const uint8_t*> *values) const override;
+              std::vector<const std::uint8_t *> *values) const override;
+  void getAllCompositeKey(
+      const std::vector<TypedValue> &key,
+      std::vector<const std::uint8_t *> *values) const override;
 
  protected:
-  HashTablePutResult putInternal(const TypedValue &key,
-                                 const std::size_t variable_key_size,
-                                 const uint8_t &value,
-                                 HashTablePreallocationState *prealloc_state) override;
-
-  HashTablePutResult putCompositeKeyInternalFast(const std::vector<TypedValue> &key,
-                                             const std::size_t variable_key_size,
-                                             const std::uint8_t *init_value_ptr,
-                                             HashTablePreallocationState *prealloc_state) override;
-
-  uint8_t* upsertInternalFast(const TypedValue &key,
-                         const std::size_t variable_key_size,
-                         const std::uint8_t *init_value_ptr) override;
-
-  uint8_t* upsertCompositeKeyInternalFast(const std::vector<TypedValue> &key,
-                                     const std::uint8_t *init_value_ptr,
-                                     const std::size_t variable_key_size) override;
+  HashTablePutResult putInternal(
+      const TypedValue &key,
+      const std::size_t variable_key_size,
+      const std::uint8_t &value,
+      HashTablePreallocationState *prealloc_state) override;
+
+  HashTablePutResult putCompositeKeyInternalFast(
+      const std::vector<TypedValue> &key,
+      const std::size_t variable_key_size,
+      const std::uint8_t *init_value_ptr,
+      HashTablePreallocationState *prealloc_state) override;
+
+  std::uint8_t* upsertInternalFast(const TypedValue &key,
+                                   const std::size_t variable_key_size,
+                                   const std::uint8_t *init_value_ptr) override;
+
+  std::uint8_t* upsertCompositeKeyInternalFast(
+      const std::vector<TypedValue> &key,
+      const std::uint8_t *init_value_ptr,
+      const std::size_t variable_key_size) override;
 
   bool getNextEntry(TypedValue *key,
-                    const uint8_t **value,
+                    const std::uint8_t **value,
                     std::size_t *entry_num) const override;
   bool getNextEntryCompositeKey(std::vector<TypedValue> *key,
-                                const uint8_t **value,
+                                const std::uint8_t **value,
                                 std::size_t *entry_num) const override;
 
   bool getNextEntryForKey(const TypedValue &key,
                           const std::size_t hash_code,
-                          const uint8_t **value,
+                          const std::uint8_t **value,
                           std::size_t *entry_num) const override;
   bool getNextEntryForCompositeKey(const std::vector<TypedValue> &key,
                                    const std::size_t hash_code,
-                                   const uint8_t **value,
+                                   const std::uint8_t **value,
                                    std::size_t *entry_num) const override;
 
   bool hasKey(const TypedValue &key) const override;
@@ -157,15 +162,16 @@ class FastSeparateChainingHashTable : public FastHashTable<resizable,
               const std::size_t extra_variable_storage,
               const std::size_t retry_num = 0) override;
 
-  bool preallocateForBulkInsert(const std::size_t total_entries,
-                                const std::size_t total_variable_key_size,
-                                HashTablePreallocationState *prealloc_state) override;
+  bool preallocateForBulkInsert(
+      const std::size_t total_entries,
+      const std::size_t total_variable_key_size,
+      HashTablePreallocationState *prealloc_state) override;
+
  private:
   struct Header {
     std::size_t num_slots;
     std::size_t num_buckets;
-    alignas(kCacheLineBytes)
-        std::atomic<std::size_t> buckets_allocated;
+    alignas(kCacheLineBytes) std::atomic<std::size_t> buckets_allocated;
     alignas(kCacheLineBytes)
         std::atomic<std::size_t> variable_length_bytes_allocated;
   };
@@ -179,16 +185,18 @@ class FastSeparateChainingHashTable : public FastHashTable<resizable,
 
   // Round bucket size up to a multiple of kBucketAlignment.
   constexpr std::size_t ComputeBucketSize(const std::size_t fixed_key_size) {
-    return (((kValueOffset + this->total_payload_size_ + fixed_key_size - 1) / kBucketAlignment) + 1)
-           * kBucketAlignment;
+    return (((kValueOffset + this->total_payload_size_ + fixed_key_size - 1) /
+             kBucketAlignment) +
+            1) *
+           kBucketAlignment;
   }
   // If ValueT is not trivially destructible, invoke its destructor for all
   // values held in the specified buckets (including those in "empty" buckets
   // that were default constructed). If ValueT is trivially destructible, this
   // is a no-op.
   void DestroyValues(void *buckets,
-                            const std::size_t num_buckets,
-                            const std::size_t bucket_size);
+                     const std::size_t num_buckets,
+                     const std::size_t bucket_size);
 
   // Attempt to find an empty bucket to insert 'hash_code' into, starting after
   // '*bucket' in the chain (or, if '*bucket' is NULL, starting from the slot
@@ -201,30 +209,33 @@ class FastSeparateChainingHashTable : public FastHashTable<resizable,
   // attempt to allocate storage for a variable-length key BEFORE allocating a
   // bucket, so that no bucket number below 'header_->num_buckets' is ever
   // deallocated after being allocated.
-  inline bool locateBucketForInsertion(const std::size_t hash_code,
-                                       const std::size_t variable_key_allocation_required,
-                                       void **bucket,
-                                       std::atomic<std::size_t> **pending_chain_ptr,
-                                       std::size_t *pending_chain_ptr_finish_value,
-                                       HashTablePreallocationState *prealloc_state);
+  inline bool locateBucketForInsertion(
+      const std::size_t hash_code,
+      const std::size_t variable_key_allocation_required,
+      void **bucket,
+      std::atomic<std::size_t> **pending_chain_ptr,
+      std::size_t *pending_chain_ptr_finish_value,
+      HashTablePreallocationState *prealloc_state);
 
   // Write a scalar 'key' and its 'hash_code' into the '*bucket', which was
   // found by locateBucketForInsertion(). Assumes that storage for a
   // variable-length key copy (if any) was already allocated by a successful
   // call to allocateVariableLengthKeyStorage().
-  inline void writeScalarKeyToBucket(const TypedValue &key,
-                                     const std::size_t hash_code,
-                                     void *bucket,
-                                     HashTablePreallocationState *prealloc_state);
+  inline void writeScalarKeyToBucket(
+      const TypedValue &key,
+      const std::size_t hash_code,
+      void *bucket,
+      HashTablePreallocationState *prealloc_state);
 
   // Write a composite 'key' and its 'hash_code' into the '*bucket', which was
   // found by locateBucketForInsertion(). Assumes that storage for
   // variable-length key copies (if any) was already allocated by a successful
   // call to allocateVariableLengthKeyStorage().
-  inline void writeCompositeKeyToBucket(const std::vector<TypedValue> &key,
-                                        const std::size_t hash_code,
-                                        void *bucket,
-                                        HashTablePreallocationState *prealloc_state);
+  inline void writeCompositeKeyToBucket(
+      const std::vector<TypedValue> &key,
+      const std::size_t hash_code,
+      void *bucket,
+      HashTablePreallocationState *prealloc_state);
 
   // Determine whether it is actually necessary to resize this hash table.
   // Checks that there is at least one unallocated bucket, and that there is
@@ -275,30 +286,37 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::FastSeparateChainingHashTable(const std::vector<const Type*> &key_types,
-                                const std::size_t num_entries,
-                                const std::vector<std::size_t> &payload_sizes,
-                                const std::vector<AggregationHandle *> &handles,
-                                StorageManager *storage_manager)
-        : FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>(
-              key_types,
-              num_entries,
-              handles,
-              payload_sizes,
-              storage_manager,
-              false,
-              false,
-              true),
-          kBucketAlignment(alignof(std::atomic<std::size_t>)),
-          kValueOffset(sizeof(std::atomic<std::size_t>) + sizeof(std::size_t)),
-          key_manager_(this->key_types_, kValueOffset + this->total_payload_size_),
-          bucket_size_(ComputeBucketSize(key_manager_.getFixedKeySize())) {
-  init_payload_ = static_cast<std::uint8_t *>(calloc(this->total_payload_size_, 1));
+FastSeparateChainingHashTable<resizable,
+                              serializable,
+                              force_key_copy,
+                              allow_duplicate_keys>::
+    FastSeparateChainingHashTable(
+        const std::vector<const Type *> &key_types,
+        const std::size_t num_entries,
+        const std::vector<std::size_t> &payload_sizes,
+        const std::vector<AggregationHandle *> &handles,
+        StorageManager *storage_manager)
+    : FastHashTable<resizable,
+                    serializable,
+                    force_key_copy,
+                    allow_duplicate_keys>(key_types,
+                                          num_entries,
+                                          handles,
+                                          payload_sizes,
+                                          storage_manager,
+                                          false,
+                                          false,
+                                          true),
+      kBucketAlignment(alignof(std::atomic<std::size_t>)),
+      kValueOffset(sizeof(std::atomic<std::size_t>) + sizeof(std::size_t)),
+      key_manager_(this->key_types_, kValueOffset + this->total_payload_size_),
+      bucket_size_(ComputeBucketSize(key_manager_.getFixedKeySize())) {
+  init_payload_ =
+      static_cast<std::uint8_t *>(calloc(this->total_payload_size_, 1));
   int k = 0;
   for (auto handle : handles) {
-      handle->initPayload(init_payload_+this->payload_offsets_[k]);
-      k++;
+    handle->initPayload(init_payload_ + this->payload_offsets_[k]);
+    k++;
   }
   // Bucket size always rounds up to the alignment requirement of the atomic
   // size_t "next" pointer at the front or a ValueT, whichever is larger.
@@ -308,19 +326,23 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
   this->setKeyInline(key_manager_.getKeyInline());
 
   // Pick out a prime number of slots and calculate storage requirements.
-  std::size_t num_slots_tmp = get_next_prime_number(num_entries * kHashTableLoadFactor);
-  std::size_t required_memory = sizeof(Header)
-                                + num_slots_tmp * sizeof(std::atomic<std::size_t>)
-                                + (num_slots_tmp / kHashTableLoadFactor)
-                                    * (bucket_size_ + key_manager_.getEstimatedVariableKeySize());
-  std::size_t num_storage_slots = this->storage_manager_->SlotsNeededForBytes(required_memory);
+  std::size_t num_slots_tmp =
+      get_next_prime_number(num_entries * kHashTableLoadFactor);
+  std::size_t required_memory =
+      sizeof(Header) + num_slots_tmp * sizeof(std::atomic<std::size_t>) +
+      (num_slots_tmp / kHashTableLoadFactor) *
+          (bucket_size_ + key_manager_.getEstimatedVariableKeySize());
+  std::size_t num_storage_slots =
+      this->storage_manager_->SlotsNeededForBytes(required_memory);
   if (num_storage_slots == 0) {
-    FATAL_ERROR("Storage requirement for SeparateChainingHashTable "
-                "exceeds maximum allocation size.");
+    FATAL_ERROR(
+        "Storage requirement for SeparateChainingHashTable "
+        "exceeds maximum allocation size.");
   }
 
   // Get a StorageBlob to hold the hash table.
-  const block_id blob_id = this->storage_manager_->createBlob(num_storage_slots);
+  const block_id blob_id =
+      this->storage_manager_->createBlob(num_storage_slots);
   this->blob_ = this->storage_manager_->getBlobMutable(blob_id);
 
   void *aligned_memory_start = this->blob_->getMemoryMutable();
@@ -328,14 +350,14 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
   if (align(alignof(Header),
             sizeof(Header),
             aligned_memory_start,
-            available_memory)
-          == nullptr) {
+            available_memory) == nullptr) {
     // With current values from StorageConstants.hpp, this should be
     // impossible. A blob is at least 1 MB, while a Header has alignment
     // requirement of just kCacheLineBytes (64 bytes).
-    FATAL_ERROR("StorageBlob used to hold resizable "
-                "SeparateChainingHashTable is too small to meet alignment "
-                "requirements of SeparateChainingHashTable::Header.");
+    FATAL_ERROR(
+        "StorageBlob used to hold resizable "
+        "SeparateChainingHashTable is too small to meet alignment "
+        "requirements of SeparateChainingHashTable::Header.");
   } else if (aligned_memory_start != this->blob_->getMemoryMutable()) {
     // This should also be impossible, since the StorageManager allocates slots
     // aligned to kCacheLineBytes.
@@ -346,8 +368,9 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
   }
 
   // Locate the header.
-  header_ = static_cast<Header*>(aligned_memory_start);
-  aligned_memory_start = static_cast<char*>(aligned_memory_start) + sizeof(Header);
+  header_ = static_cast<Header *>(aligned_memory_start);
+  aligned_memory_start =
+      static_cast<char *>(aligned_memory_start) + sizeof(Header);
   available_memory -= sizeof(Header);
 
   // Recompute the number of slots & buckets using the actual available memory.
@@ -355,19 +378,20 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
   // the storage blob's size. It's also possible (though very unlikely) that we
   // will wind up with fewer buckets than we initially wanted because of screwy
   // alignment requirements for ValueT.
-  std::size_t num_buckets_tmp
-      = available_memory / (kHashTableLoadFactor * sizeof(std::atomic<std::size_t>)
-                            + bucket_size_
-                            + key_manager_.getEstimatedVariableKeySize());
-  num_slots_tmp = get_previous_prime_number(num_buckets_tmp * kHashTableLoadFactor);
+  std::size_t num_buckets_tmp =
+      available_memory /
+      (kHashTableLoadFactor * sizeof(std::atomic<std::size_t>) + bucket_size_ +
+       key_manager_.getEstimatedVariableKeySize());
+  num_slots_tmp =
+      get_previous_prime_number(num_buckets_tmp * kHashTableLoadFactor);
   num_buckets_tmp = num_slots_tmp / kHashTableLoadFactor;
   DEBUG_ASSERT(num_slots_tmp > 0);
   DEBUG_ASSERT(num_buckets_tmp > 0);
 
   // Locate the slot array.
-  slots_ = static_cast<std::atomic<std::size_t>*>(aligned_memory_start);
-  aligned_memory_start = static_cast<char*>(aligned_memory_start)
-                         + sizeof(std::atomic<std::size_t>) * num_slots_tmp;
+  slots_ = static_cast<std::atomic<std::size_t> *>(aligned_memory_start);
+  aligned_memory_start = static_cast<char *>(aligned_memory_start) +
+                         sizeof(std::atomic<std::size_t>) * num_slots_tmp;
   available_memory -= sizeof(std::atomic<std::size_t>) * num_slots_tmp;
 
   // Locate the buckets.
@@ -375,17 +399,16 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
   // Extra-paranoid: If ValueT has an alignment requirement greater than that
   // of std::atomic<std::size_t>, we may need to adjust the start of the bucket
   // array.
-  if (align(kBucketAlignment,
-            bucket_size_,
-            buckets_,
-            available_memory)
-          == nullptr) {
-    FATAL_ERROR("StorageBlob used to hold resizable "
-                "SeparateChainingHashTable is too small to meet "
-                "alignment requirements of buckets.");
+  if (align(kBucketAlignment, bucket_size_, buckets_, available_memory) ==
+      nullptr) {
+    FATAL_ERROR(
+        "StorageBlob used to hold resizable "
+        "SeparateChainingHashTable is too small to meet "
+        "alignment requirements of buckets.");
   } else if (buckets_ != aligned_memory_start) {
-    DEV_WARNING("Bucket array start position adjusted to meet alignment "
-                "requirement for SeparateChainingHashTable's value type.");
+    DEV_WARNING(
+        "Bucket array start position adjusted to meet alignment "
+        "requirement for SeparateChainingHashTable's value type.");
     if (num_buckets_tmp * bucket_size_ > available_memory) {
       --num_buckets_tmp;
     }
@@ -401,7 +424,7 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
   // Locate variable-length key storage region, and give it all the remaining
   // bytes in the blob.
   key_manager_.setVariableLengthStorageInfo(
-      static_cast<char*>(buckets_) + header_->num_buckets * bucket_size_,
+      static_cast<char *>(buckets_) + header_->num_buckets * bucket_size_,
       available_memory,
       &(header_->variable_length_bytes_allocated));
 }
@@ -410,36 +433,43 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::FastSeparateChainingHashTable(const std::vector<const Type*> &key_types,
-                                void *hash_table_memory,
-                                const std::size_t hash_table_memory_size,
-                                const bool new_hash_table,
-                                const bool hash_table_memory_zeroed)
-        : FastHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>(
-              key_types,
-              hash_table_memory,
-              hash_table_memory_size,
-              new_hash_table,
-              hash_table_memory_zeroed,
-              false,
-              false,
-              true),
-          kBucketAlignment(alignof(std::atomic<std::size_t>) < alignof(uint8_t) ? alignof(uint8_t)
-                                                  : alignof(std::atomic<std::size_t>)),
-          kValueOffset(sizeof(std::atomic<std::size_t>) + sizeof(std::size_t)),
-          key_manager_(this->key_types_, kValueOffset + sizeof(uint8_t)),
-          bucket_size_(ComputeBucketSize(key_manager_.getFixedKeySize())) {
+FastSeparateChainingHashTable<resizable,
+                              serializable,
+                              force_key_copy,
+                              allow_duplicate_keys>::
+    FastSeparateChainingHashTable(const std::vector<const Type *> &key_types,
+                                  void *hash_table_memory,
+                                  const std::size_t hash_table_memory_size,
+                                  const bool new_hash_table,
+                                  const bool hash_table_memory_zeroed)
+    : FastHashTable<resizable,
+                    serializable,
+                    force_key_copy,
+                    allow_duplicate_keys>(key_types,
+                                          hash_table_memory,
+                                          hash_table_memory_size,
+                                          new_hash_table,
+                                          hash_table_memory_zeroed,
+                                          false,
+                                          false,
+                                          true),
+      kBucketAlignment(alignof(std::atomic<std::size_t>) < alignof(std::uint8_t)
+                           ? alignof(std::uint8_t)
+                           : alignof(std::atomic<std::size_t>)),
+      kValueOffset(sizeof(std::atomic<std::size_t>) + sizeof(std::size_t)),
+      key_manager_(this->key_types_, kValueOffset + sizeof(std::uint8_t)),
+      bucket_size_(ComputeBucketSize(key_manager_.getFixedKeySize())) {
   // Bucket size always rounds up to the alignment requirement of the atomic
   // size_t "next" pointer at the front or a ValueT, whichever is larger.
   //
   // Make sure that the larger of the two alignment requirements also satisfies
   // the smaller.
-  static_assert(alignof(std::atomic<std::size_t>) < alignof(uint8_t)
-                    ? alignof(uint8_t) % alignof(std::atomic<std::size_t>) == 0
-                    : alignof(std::atomic<std::size_t>) % alignof(uint8_t) == 0,
-                "Alignment requirement of std::atomic<std::size_t> does not "
-                "evenly divide with alignment requirement of ValueT.");
+  static_assert(
+      alignof(std::atomic<std::size_t>) < alignof(std::uint8_t)
+          ? alignof(std::uint8_t) % alignof(std::atomic<std::size_t>) == 0
+          : alignof(std::atomic<std::size_t>) % alignof(std::uint8_t) == 0,
+      "Alignment requirement of std::atomic<std::size_t> does not "
+      "evenly divide with alignment requirement of ValueT.");
 
   // Give base HashTable information about what key components are stored
   // inline from 'key_manager_'.
@@ -460,12 +490,13 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
   if (align(alignof(Header),
             sizeof(Header),
             aligned_memory_start,
-            available_memory)
-          == nullptr) {
+            available_memory) == nullptr) {
     FATAL_ERROR("Attempted to create a non-resizable "
                 << "SeparateChainingHashTable with "
-                << available_memory << " bytes of memory at "
-                << aligned_memory_start << " which either can not fit a "
+                << available_memory
+                << " bytes of memory at "
+                << aligned_memory_start
+                << " which either can not fit a "
                 << "SeparateChainingHashTable::Header or meet its alignement "
                 << "requirement.");
   } else if (aligned_memory_start != this->hash_table_memory_) {
@@ -477,32 +508,36 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
                 << "SeparateChainingHashTable::Header.");
   }
 
-  header_ = static_cast<Header*>(aligned_memory_start);
-  aligned_memory_start = static_cast<char*>(aligned_memory_start) + sizeof(Header);
+  header_ = static_cast<Header *>(aligned_memory_start);
+  aligned_memory_start =
+      static_cast<char *>(aligned_memory_start) + sizeof(Header);
   available_memory -= sizeof(Header);
 
   if (new_hash_table) {
-    std::size_t estimated_bucket_capacity
-        = available_memory / (kHashTableLoadFactor * sizeof(std::atomic<std::size_t>)
-                              + bucket_size_
-                              + key_manager_.getEstimatedVariableKeySize());
-    std::size_t num_slots = get_previous_prime_number(estimated_bucket_capacity * kHashTableLoadFactor);
+    std::size_t estimated_bucket_capacity =
+        available_memory /
+        (kHashTableLoadFactor * sizeof(std::atomic<std::size_t>) +
+         bucket_size_ + key_manager_.getEstimatedVariableKeySize());
+    std::size_t num_slots = get_previous_prime_number(
+        estimated_bucket_capacity * kHashTableLoadFactor);
 
     // Fill in the header.
     header_->num_slots = num_slots;
     header_->num_buckets = num_slots / kHashTableLoadFactor;
     header_->buckets_allocated.store(0, std::memory_order_relaxed);
-    header_->variable_length_bytes_allocated.store(0, std::memory_order_relaxed);
+    header_->variable_length_bytes_allocated.store(0,
+                                                   std::memory_order_relaxed);
   }
 
   // Locate the slot array.
-  slots_ = static_cast<std::atomic<std::size_t>*>(aligned_memory_start);
-  aligned_memory_start = static_cast<char*>(aligned_memory_start)
-                         + sizeof(std::atomic<std::size_t>) * header_->num_slots;
+  slots_ = static_cast<std::atomic<std::size_t> *>(aligned_memory_start);
+  aligned_memory_start = static_cast<char *>(aligned_memory_start) +
+                         sizeof(std::atomic<std::size_t>) * header_->num_slots;
   available_memory -= sizeof(std::atomic<std::size_t>) * header_->num_slots;
 
   if (new_hash_table && !hash_table_memory_zeroed) {
-    std::memset(slots_, 0x0, sizeof(std::atomic<std::size_t>) * header_->num_slots);
+    std::memset(
+        slots_, 0x0, sizeof(std::atomic<std::size_t>) * header_->num_slots);
   }
 
   // Locate the buckets.
@@ -510,20 +545,20 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
   // Extra-paranoid: sizeof(Header) should almost certainly be a multiple of
   // kBucketAlignment, unless ValueT has some members with seriously big
   // (> kCacheLineBytes) alignment requirements specified using alignas().
-  if (align(kBucketAlignment,
-            bucket_size_,
-            buckets_,
-            available_memory)
-          == nullptr) {
+  if (align(kBucketAlignment, bucket_size_, buckets_, available_memory) ==
+      nullptr) {
     FATAL_ERROR("Attempted to create a non-resizable "
                 << "SeparateChainingHashTable with "
-                << this->hash_table_memory_size_ << " bytes of memory at "
-                << this->hash_table_memory_ << ", which can hold an aligned "
+                << this->hash_table_memory_size_
+                << " bytes of memory at "
+                << this->hash_table_memory_
+                << ", which can hold an aligned "
                 << "SeparateChainingHashTable::Header but does not have "
                 << "enough remaining space for even a single hash bucket.");
   } else if (buckets_ != aligned_memory_start) {
-    DEV_WARNING("Bucket array start position adjusted to meet alignment "
-                "requirement for SeparateChainingHashTable's value type.");
+    DEV_WARNING(
+        "Bucket array start position adjusted to meet alignment "
+        "requirement for SeparateChainingHashTable's value type.");
     if (header_->num_buckets * bucket_size_ > available_memory) {
       DEBUG_ASSERT(new_hash_table);
       --(header_->num_buckets);
@@ -538,7 +573,7 @@ FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_dup
 
   // Locate variable-length key storage region.
   key_manager_.setVariableLengthStorageInfo(
-      static_cast<char*>(buckets_) + header_->num_buckets * bucket_size_,
+      static_cast<char *>(buckets_) + header_->num_buckets * bucket_size_,
       available_memory,
       &(header_->variable_length_bytes_allocated));
 }
@@ -547,16 +582,18 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::clear() {
-  const std::size_t used_buckets = header_->buckets_allocated.load(std::memory_order_relaxed);
+void FastSeparateChainingHashTable<resizable,
+                                   serializable,
+                                   force_key_copy,
+                                   allow_duplicate_keys>::clear() {
+  const std::size_t used_buckets =
+      header_->buckets_allocated.load(std::memory_order_relaxed);
   // Destroy existing values, if necessary.
-  DestroyValues(buckets_,
-                used_buckets,
-                bucket_size_);
+  DestroyValues(buckets_, used_buckets, bucket_size_);
 
   // Zero-out slot array.
-  std::memset(slots_, 0x0, sizeof(std::atomic<std::size_t>) * header_->num_slots);
+  std::memset(
+      slots_, 0x0, sizeof(std::atomic<std::size_t>) * header_->num_slots);
 
   // Zero-out used buckets.
   std::memset(buckets_, 0x0, used_buckets * bucket_size_);
@@ -570,24 +607,33 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-const uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getSingle(const TypedValue &key) const {
+const std::uint8_t* FastSeparateChainingHashTable<
+    resizable,
+    serializable,
+    force_key_copy,
+    allow_duplicate_keys>::getSingle(const TypedValue &key) const {
   DEBUG_ASSERT(!allow_duplicate_keys);
   DEBUG_ASSERT(this->key_types_.size() == 1);
-  DEBUG_ASSERT(key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
+  DEBUG_ASSERT(
+      key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
 
   const std::size_t hash_code = key.getHash();
-  std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
+  std::size_t bucket_ref =
+      slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
   while (bucket_ref != 0) {
     DEBUG_ASSERT(bucket_ref != std::numeric_limits<std::size_t>::max());
-    const char *bucket = static_cast<const char*>(buckets_) + (bucket_ref - 1) * bucket_size_;
-    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t*>(
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (bucket_ref - 1) * bucket_size_;
+    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t *>(
         bucket + sizeof(std::atomic<std::size_t>));
-    if ((bucket_hash == hash_code) && key_manager_.scalarKeyCollisionCheck(key, bucket)) {
+    if ((bucket_hash == hash_code) &&
+        key_manager_.scalarKeyCollisionCheck(key, bucket)) {
       // Match located.
-      return reinterpret_cast<const uint8_t*>(bucket + kValueOffset);
+      return reinterpret_cast<const std::uint8_t *>(bucket + kValueOffset);
     }
-    bucket_ref = reinterpret_cast<const std::atomic<std::size_t>*>(bucket)->load(std::memory_order_relaxed);
+    bucket_ref =
+        reinterpret_cast<const std::atomic<std::size_t> *>(bucket)->load(
+            std::memory_order_relaxed);
   }
 
   // Reached the end of the chain and didn't find a match.
@@ -598,23 +644,31 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-const uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getSingleCompositeKey(const std::vector<TypedValue> &key) const {
+const std::uint8_t* FastSeparateChainingHashTable<resizable,
+                                                  serializable,
+                                                  force_key_copy,
+                                                  allow_duplicate_keys>::
+    getSingleCompositeKey(const std::vector<TypedValue> &key) const {
   DEBUG_ASSERT(!allow_duplicate_keys);
   DEBUG_ASSERT(this->key_types_.size() == key.size());
 
   const std::size_t hash_code = this->hashCompositeKey(key);
-  std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
+  std::size_t bucket_ref =
+      slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
   while (bucket_ref != 0) {
     DEBUG_ASSERT(bucket_ref != std::numeric_limits<std::size_t>::max());
-    const char *bucket = static_cast<const char*>(buckets_) + (bucket_ref - 1) * bucket_size_;
-    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t*>(
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (bucket_ref - 1) * bucket_size_;
+    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t *>(
         bucket + sizeof(std::atomic<std::size_t>));
-    if ((bucket_hash == hash_code) && key_manager_.compositeKeyCollisionCheck(key, bucket)) {
+    if ((bucket_hash == hash_code) &&
+        key_manager_.compositeKeyCollisionCheck(key, bucket)) {
       // Match located.
-      return reinterpret_cast<const uint8_t*>(bucket + kValueOffset);
+      return reinterpret_cast<const std::uint8_t *>(bucket + kValueOffset);
     }
-    bucket_ref = reinterpret_cast<const std::atomic<std::size_t>*>(bucket)->load(std::memory_order_relaxed);
+    bucket_ref =
+        reinterpret_cast<const std::atomic<std::size_t> *>(bucket)->load(
+            std::memory_order_relaxed);
   }
 
   // Reached the end of the chain and didn't find a match.
@@ -625,23 +679,32 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-const uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getSingleCompositeKey(const std::vector<TypedValue> &key, int index) const {
+const std::uint8_t* FastSeparateChainingHashTable<resizable,
+                                                  serializable,
+                                                  force_key_copy,
+                                                  allow_duplicate_keys>::
+    getSingleCompositeKey(const std::vector<TypedValue> &key, int index) const {
   DEBUG_ASSERT(!allow_duplicate_keys);
   DEBUG_ASSERT(this->key_types_.size() == key.size());
 
   const std::size_t hash_code = this->hashCompositeKey(key);
-  std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
+  std::size_t bucket_ref =
+      slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
   while (bucket_ref != 0) {
     DEBUG_ASSERT(bucket_ref != std::numeric_limits<std::size_t>::max());
-    const char *bucket = static_cast<const char*>(buckets_) + (bucket_ref - 1) * bucket_size_;
-    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t*>(
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (bucket_ref - 1) * bucket_size_;
+    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t *>(
         bucket + sizeof(std::atomic<std::size_t>));
-    if ((bucket_hash == hash_code) && key_manager_.compositeKeyCollisionCheck(key, bucket)) {
+    if ((bucket_hash == hash_code) &&
+        key_manager_.compositeKeyCollisionCheck(key, bucket)) {
       // Match located.
-      return reinterpret_cast<const uint8_t*>(bucket + kValueOffset)+this->payload_offsets_[index];
+      return reinterpret_cast<const std::uint8_t *>(bucket + kValueOffset) +
+             this->payload_offsets_[index];
     }
-    bucket_ref = reinterpret_cast<const std::atomic<std::size_t>*>(bucket)->load(std::memory_order_relaxed);
+    bucket_ref =
+        reinterpret_cast<const std::atomic<std::size_t> *>(bucket)->load(
+            std::memory_order_relaxed);
   }
 
   // Reached the end of the chain and didn't find a match.
@@ -652,26 +715,38 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getAll(const TypedValue &key, std::vector<const uint8_t*> *values) const {
+void FastSeparateChainingHashTable<
+    resizable,
+    serializable,
+    force_key_copy,
+    allow_duplicate_keys>::getAll(const TypedValue &key,
+                                  std::vector<const std::uint8_t *> *values)
+    const {
   DEBUG_ASSERT(this->key_types_.size() == 1);
-  DEBUG_ASSERT(key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
+  DEBUG_ASSERT(
+      key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
 
   const std::size_t hash_code = key.getHash();
-  std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
+  std::size_t bucket_ref =
+      slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
   while (bucket_ref != 0) {
     DEBUG_ASSERT(bucket_ref != std::numeric_limits<std::size_t>::max());
-    const char *bucket = static_cast<const char*>(buckets_) + (bucket_ref - 1) * bucket_size_;
-    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t*>(
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (bucket_ref - 1) * bucket_size_;
+    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t *>(
         bucket + sizeof(std::atomic<std::size_t>));
-    if ((bucket_hash == hash_code) && key_manager_.scalarKeyCollisionCheck(key, bucket)) {
+    if ((bucket_hash == hash_code) &&
+        key_manager_.scalarKeyCollisionCheck(key, bucket)) {
       // Match located.
-      values->push_back(reinterpret_cast<const uint8_t*>(bucket + kValueOffset));
+      values->push_back(
+          reinterpret_cast<const std::uint8_t *>(bucket + kValueOffset));
       if (!allow_duplicate_keys) {
         return;
       }
     }
-    bucket_ref = reinterpret_cast<const std::atomic<std::size_t>*>(bucket)->load(std::memory_order_relaxed);
+    bucket_ref =
+        reinterpret_cast<const std::atomic<std::size_t> *>(bucket)->load(
+            std::memory_order_relaxed);
   }
 }
 
@@ -679,25 +754,35 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getAllCompositeKey(const std::vector<TypedValue> &key, std::vector<const uint8_t*> *values) const {
+void FastSeparateChainingHashTable<resizable,
+                                   serializable,
+                                   force_key_copy,
+                                   allow_duplicate_keys>::
+    getAllCompositeKey(const std::vector<TypedValue> &key,
+                       std::vector<const std::uint8_t *> *values) const {
   DEBUG_ASSERT(this->key_types_.size() == key.size());
 
   const std::size_t hash_code = this->hashCompositeKey(key);
-  std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
+  std::size_t bucket_ref =
+      slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
   while (bucket_ref != 0) {
     DEBUG_ASSERT(bucket_ref != std::numeric_limits<std::size_t>::max());
-    const char *bucket = static_cast<const char*>(buckets_) + (bucket_ref - 1) * bucket_size_;
-    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t*>(
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (bucket_ref - 1) * bucket_size_;
+    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t *>(
         bucket + sizeof(std::atomic<std::size_t>));
-    if ((bucket_hash == hash_code) && key_manager_.compositeKeyCollisionCheck(key, bucket)) {
+    if ((bucket_hash == hash_code) &&
+        key_manager_.compositeKeyCollisionCheck(key, bucket)) {
       // Match located.
-      values->push_back(reinterpret_cast<const uint8_t*>(bucket + kValueOffset));
+      values->push_back(
+          reinterpret_cast<const std::uint8_t *>(bucket + kValueOffset));
       if (!allow_duplicate_keys) {
         return;
       }
     }
-    bucket_ref = reinterpret_cast<const std::atomic<std::size_t>*>(bucket)->load(std::memory_order_relaxed);
+    bucket_ref =
+        reinterpret_cast<const std::atomic<std::size_t> *>(bucket)->load(
+            std::memory_order_relaxed);
   }
 }
 
@@ -705,18 +790,22 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-HashTablePutResult
-    FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-        ::putInternal(const TypedValue &key,
-                      const std::size_t variable_key_size,
-                      const uint8_t &value,
-                      HashTablePreallocationState *prealloc_state) {
+HashTablePutResult FastSeparateChainingHashTable<resizable,
+                                                 serializable,
+                                                 force_key_copy,
+                                                 allow_duplicate_keys>::
+    putInternal(const TypedValue &key,
+                const std::size_t variable_key_size,
+                const std::uint8_t &value,
+                HashTablePreallocationState *prealloc_state) {
   DEBUG_ASSERT(this->key_types_.size() == 1);
-  DEBUG_ASSERT(key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
+  DEBUG_ASSERT(
+      key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
 
   if (prealloc_state == nullptr) {
     // Early check for a free bucket.
-    if (header_->buckets_allocated.load(std::memory_order_relaxed) >= header_->num_buckets) {
+    if (header_->buckets_allocated.load(std::memory_order_relaxed) >=
+        header_->num_buckets) {
       return HashTablePutResult::kOutOfSpace;
     }
 
@@ -763,10 +852,11 @@ HashTablePutResult
   writeScalarKeyToBucket(key, hash_code, bucket, prealloc_state);
 
   // Store the value by using placement new with ValueT's copy constructor.
-  new(static_cast<char*>(bucket) + kValueOffset) uint8_t(value);
+  new (static_cast<char *>(bucket) + kValueOffset) std::uint8_t(value);
 
   // Update the previous chain pointer to point to the new bucket.
-  pending_chain_ptr->store(pending_chain_ptr_finish_value, std::memory_order_release);
+  pending_chain_ptr->store(pending_chain_ptr_finish_value,
+                           std::memory_order_release);
 
   // We're all done.
   return HashTablePutResult::kOK;
@@ -776,17 +866,20 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-HashTablePutResult
-    FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-        ::putCompositeKeyInternalFast(const std::vector<TypedValue> &key,
-                                  const std::size_t variable_key_size,
-                                  const uint8_t *init_value_ptr,
-                                  HashTablePreallocationState *prealloc_state) {
+HashTablePutResult FastSeparateChainingHashTable<resizable,
+                                                 serializable,
+                                                 force_key_copy,
+                                                 allow_duplicate_keys>::
+    putCompositeKeyInternalFast(const std::vector<TypedValue> &key,
+                                const std::size_t variable_key_size,
+                                const std::uint8_t *init_value_ptr,
+                                HashTablePreallocationState *prealloc_state) {
   DEBUG_ASSERT(this->key_types_.size() == key.size());
 
   if (prealloc_state == nullptr) {
     // Early check for a free bucket.
-    if (header_->buckets_allocated.load(std::memory_order_relaxed) >= header_->num_buckets) {
+    if (header_->buckets_allocated.load(std::memory_order_relaxed) >=
+        header_->num_buckets) {
       return HashTablePutResult::kOutOfSpace;
     }
 
@@ -832,12 +925,11 @@ HashTablePutResult
   // Write the key and hash.
   writeCompositeKeyToBucket(key, hash_code, bucket, prealloc_state);
 
-  // Store the value by using placement new with ValueT's copy constructor.
-//  new(static_cast<char*>(bucket) + kValueOffset) uint8_t(value);
-    uint8_t *value = static_cast<uint8_t*>(bucket) + kValueOffset;
-        memcpy(value, init_value_ptr, this->total_payload_size_);
+  std::uint8_t *value = static_cast<std::uint8_t *>(bucket) + kValueOffset;
+  memcpy(value, init_value_ptr, this->total_payload_size_);
   // Update the previous chain pointer to point to the new bucket.
-  pending_chain_ptr->store(pending_chain_ptr_finish_value, std::memory_order_release);
+  pending_chain_ptr->store(pending_chain_ptr_finish_value,
+                           std::memory_order_release);
 
   // We're all done.
   return HashTablePutResult::kOK;
@@ -847,13 +939,17 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::upsertInternalFast(const TypedValue &key,
-                     const std::size_t variable_key_size,
-                     const std::uint8_t *init_value_ptr) {
+std::uint8_t* FastSeparateChainingHashTable<resizable,
+                                            serializable,
+                                            force_key_copy,
+                                            allow_duplicate_keys>::
+    upsertInternalFast(const TypedValue &key,
+                       const std::size_t variable_key_size,
+                       const std::uint8_t *init_value_ptr) {
   DEBUG_ASSERT(!allow_duplicate_keys);
   DEBUG_ASSERT(this->key_types_.size() == 1);
-  DEBUG_ASSERT(key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
+  DEBUG_ASSERT(
+      key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
 
   if (variable_key_size > 0) {
     // Don't allocate yet, since the key may already be present. However, we
@@ -861,9 +957,11 @@ uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy,
     // space is big enough to hold the key (at least one must be true: either
     // the key is already present and allocated, or we need to be able to
     // allocate enough space for it).
-    std::size_t allocated_bytes = header_->variable_length_bytes_allocated.load(std::memory_order_relaxed);
-    if ((allocated_bytes < variable_key_size)
-        && (allocated_bytes + variable_key_size > key_manager_.getVariableLengthKeyStorageSize())) {
+    std::size_t allocated_bytes = header_->variable_length_bytes_allocated.load(
+        std::memory_order_relaxed);
+    if ((allocated_bytes < variable_key_size) &&
+        (allocated_bytes + variable_key_size >
+         key_manager_.getVariableLengthKeyStorageSize())) {
       return nullptr;
     }
   }
@@ -886,7 +984,8 @@ uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy,
       return nullptr;
     } else if (key_manager_.scalarKeyCollisionCheck(key, bucket)) {
       // Found an already-existing entry for this key.
-      return reinterpret_cast<uint8_t*>(static_cast<char*>(bucket) + kValueOffset);
+      return reinterpret_cast<std::uint8_t *>(static_cast<char *>(bucket) +
+                                              kValueOffset);
     }
   }
 
@@ -895,16 +994,15 @@ uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy,
   writeScalarKeyToBucket(key, hash_code, bucket, nullptr);
 
   // Copy the supplied 'initial_value' into place.
-//  uint8_t *value = new(static_cast<char*>(bucket) + kValueOffset) uint8_t(initial_value);
-
-    uint8_t *value = static_cast<unsigned char*>(bucket) + kValueOffset;
-    if (init_value_ptr == nullptr)
-        memcpy(value, init_payload_, this->total_payload_size_);
-    else
-        memcpy(value, init_value_ptr, this->total_payload_size_);
+  std::uint8_t *value = static_cast<unsigned char *>(bucket) + kValueOffset;
+  if (init_value_ptr == nullptr)
+    memcpy(value, init_payload_, this->total_payload_size_);
+  else
+    memcpy(value, init_value_ptr, this->total_payload_size_);
 
   // Update the previous chain pointer to point to the new bucket.
-  pending_chain_ptr->store(pending_chain_ptr_finish_value, std::memory_order_release);
+  pending_chain_ptr->store(pending_chain_ptr_finish_value,
+                           std::memory_order_release);
 
   // Return the value.
   return value;
@@ -914,10 +1012,13 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::upsertCompositeKeyInternalFast(const std::vector<TypedValue> &key,
-                                 const std::uint8_t *init_value_ptr,
-                                 const std::size_t variable_key_size) {
+std::uint8_t* FastSeparateChainingHashTable<resizable,
+                                            serializable,
+                                            force_key_copy,
+                                            allow_duplicate_keys>::
+    upsertCompositeKeyInternalFast(const std::vector<TypedValue> &key,
+                                   const std::uint8_t *init_value_ptr,
+                                   const std::size_t variable_key_size) {
   DEBUG_ASSERT(!allow_duplicate_keys);
   DEBUG_ASSERT(this->key_types_.size() == key.size());
 
@@ -927,9 +1028,11 @@ uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy,
     // space is big enough to hold the key (at least one must be true: either
     // the key is already present and allocated, or we need to be able to
     // allocate enough space for it).
-    std::size_t allocated_bytes = header_->variable_length_bytes_allocated.load(std::memory_order_relaxed);
-    if ((allocated_bytes < variable_key_size)
-        && (allocated_bytes + variable_key_size > key_manager_.getVariableLengthKeyStorageSize())) {
+    std::size_t allocated_bytes = header_->variable_length_bytes_allocated.load(
+        std::memory_order_relaxed);
+    if ((allocated_bytes < variable_key_size) &&
+        (allocated_bytes + variable_key_size >
+         key_manager_.getVariableLengthKeyStorageSize())) {
       return nullptr;
     }
   }
@@ -952,7 +1055,8 @@ uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy,
       return nullptr;
     } else if (key_manager_.compositeKeyCollisionCheck(key, bucket)) {
       // Found an already-existing entry for this key.
-      return reinterpret_cast<uint8_t*>(static_cast<char*>(bucket) + kValueOffset);
+      return reinterpret_cast<std::uint8_t *>(static_cast<char *>(bucket) +
+                                              kValueOffset);
     }
   }
 
@@ -960,17 +1064,16 @@ uint8_t* FastSeparateChainingHashTable<resizable, serializable, force_key_copy,
   // Write the key and hash.
   writeCompositeKeyToBucket(key, hash_code, bucket, nullptr);
 
-//  uint8_t *value;
-//  value = static_cast<unsigned char*>(bucket) + kValueOffset;
-    uint8_t *value = static_cast<unsigned char*>(bucket) + kValueOffset;
-    if (init_value_ptr == nullptr) {
-        memcpy(value, init_payload_, this->total_payload_size_);
-    } else {
-        memcpy(value, init_value_ptr, this->total_payload_size_);
-    }
+  std::uint8_t *value = static_cast<unsigned char *>(bucket) + kValueOffset;
+  if (init_value_ptr == nullptr) {
+    memcpy(value, init_payload_, this->total_payload_size_);
+  } else {
+    memcpy(value, init_value_ptr, this->total_payload_size_);
+  }
 
   // Update the previous chaing pointer to point to the new bucket.
-  pending_chain_ptr->store(pending_chain_ptr_finish_value, std::memory_order_release);
+  pending_chain_ptr->store(pending_chain_ptr_finish_value,
+                           std::memory_order_release);
 
   // Return the value.
   return value;
@@ -980,13 +1083,19 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getNextEntry(TypedValue *key, const uint8_t **value, std::size_t *entry_num) const {
+bool FastSeparateChainingHashTable<
+    resizable,
+    serializable,
+    force_key_copy,
+    allow_duplicate_keys>::getNextEntry(TypedValue *key,
+                                        const std::uint8_t **value,
+                                        std::size_t *entry_num) const {
   DEBUG_ASSERT(this->key_types_.size() == 1);
   if (*entry_num < header_->buckets_allocated.load(std::memory_order_relaxed)) {
-    const char *bucket = static_cast<const char*>(buckets_) + (*entry_num) * bucket_size_;
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (*entry_num) * bucket_size_;
     *key = key_manager_.getKeyComponentTyped(bucket, 0);
-    *value = reinterpret_cast<const uint8_t*>(bucket + kValueOffset);
+    *value = reinterpret_cast<const std::uint8_t *>(bucket + kValueOffset);
     ++(*entry_num);
     return true;
   } else {
@@ -998,18 +1107,22 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getNextEntryCompositeKey(std::vector<TypedValue> *key,
-                               const uint8_t **value,
-                               std::size_t *entry_num) const {
+bool FastSeparateChainingHashTable<resizable,
+                                   serializable,
+                                   force_key_copy,
+                                   allow_duplicate_keys>::
+    getNextEntryCompositeKey(std::vector<TypedValue> *key,
+                             const std::uint8_t **value,
+                             std::size_t *entry_num) const {
   if (*entry_num < header_->buckets_allocated.load(std::memory_order_relaxed)) {
-    const char *bucket = static_cast<const char*>(buckets_) + (*entry_num) * bucket_size_;
-    for (std::vector<const Type*>::size_type key_idx = 0;
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (*entry_num) * bucket_size_;
+    for (std::vector<const Type *>::size_type key_idx = 0;
          key_idx < this->key_types_.size();
          ++key_idx) {
       key->emplace_back(key_manager_.getKeyComponentTyped(bucket, key_idx));
     }
-    *value = reinterpret_cast<const uint8_t*>(bucket + kValueOffset);
+    *value = reinterpret_cast<const std::uint8_t *>(bucket + kValueOffset);
     ++(*entry_num);
     return true;
   } else {
@@ -1021,29 +1134,38 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getNextEntryForKey(const TypedValue &key,
-                         const std::size_t hash_code,
-                         const uint8_t **value,
-                         std::size_t *entry_num) const {
+bool FastSeparateChainingHashTable<
+    resizable,
+    serializable,
+    force_key_copy,
+    allow_duplicate_keys>::getNextEntryForKey(const TypedValue &key,
+                                              const std::size_t hash_code,
+                                              const std::uint8_t **value,
+                                              std::size_t *entry_num) const {
   DEBUG_ASSERT(this->key_types_.size() == 1);
-  DEBUG_ASSERT(key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
+  DEBUG_ASSERT(
+      key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
 
   if (*entry_num == 0) {
-    *entry_num = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
+    *entry_num =
+        slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
   } else if (*entry_num == std::numeric_limits<std::size_t>::max()) {
     return false;
   }
 
   while (*entry_num != 0) {
     DEBUG_ASSERT(*entry_num != std::numeric_limits<std::size_t>::max());
-    const char *bucket = static_cast<const char*>(buckets_) + (*entry_num - 1) * bucket_size_;
-    *entry_num = reinterpret_cast<const std::atomic<std::size_t>*>(bucket)->load(std::memory_order_relaxed);
-    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t*>(
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (*entry_num - 1) * bucket_size_;
+    *entry_num =
+        reinterpret_cast<const std::atomic<std::size_t> *>(bucket)->load(
+            std::memory_order_relaxed);
+    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t *>(
         bucket + sizeof(std::atomic<std::size_t>));
-    if ((bucket_hash == hash_code) && key_manager_.scalarKeyCollisionCheck(key, bucket)) {
+    if ((bucket_hash == hash_code) &&
+        key_manager_.scalarKeyCollisionCheck(key, bucket)) {
       // Match located.
-      *value = reinterpret_cast<const uint8_t*>(bucket + kValueOffset);
+      *value = reinterpret_cast<const std::uint8_t *>(bucket + kValueOffset);
       if (*entry_num == 0) {
         // If this is the last bucket in the chain, prevent the next call from
         // starting over again.
@@ -1061,28 +1183,36 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::getNextEntryForCompositeKey(const std::vector<TypedValue> &key,
-                                  const std::size_t hash_code,
-                                  const uint8_t **value,
-                                  std::size_t *entry_num) const {
+bool FastSeparateChainingHashTable<resizable,
+                                   serializable,
+                                   force_key_copy,
+                                   allow_duplicate_keys>::
+    getNextEntryForCompositeKey(const std::vector<TypedValue> &key,
+                                const std::size_t hash_code,
+                                const std::uint8_t **value,
+                                std::size_t *entry_num) const {
   DEBUG_ASSERT(this->key_types_.size() == key.size());
 
   if (*entry_num == 0) {
-    *entry_num = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
+    *entry_num =
+        slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
   } else if (*entry_num == std::numeric_limits<std::size_t>::max()) {
     return false;
   }
 
   while (*entry_num != 0) {
     DEBUG_ASSERT(*entry_num != std::numeric_limits<std::size_t>::max());
-    const char *bucket = static_cast<const char*>(buckets_) + (*entry_num - 1) * bucket_size_;
-    *entry_num = reinterpret_cast<const std::atomic<std::size_t>*>(bucket)->load(std::memory_order_relaxed);
-    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t*>(
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (*entry_num - 1) * bucket_size_;
+    *entry_num =
+        reinterpret_cast<const std::atomic<std::size_t> *>(bucket)->load(
+            std::memory_order_relaxed);
+    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t *>(
         bucket + sizeof(std::atomic<std::size_t>));
-    if ((bucket_hash == hash_code) && key_manager_.compositeKeyCollisionCheck(key, bucket)) {
+    if ((bucket_hash == hash_code) &&
+        key_manager_.compositeKeyCollisionCheck(key, bucket)) {
       // Match located.
-      *value = reinterpret_cast<const uint8_t*>(bucket + kValueOffset);
+      *value = reinterpret_cast<const std::uint8_t *>(bucket + kValueOffset);
       if (*entry_num == 0) {
         // If this is the last bucket in the chain, prevent the next call from
         // starting over again.
@@ -1100,23 +1230,32 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::hasKey(const TypedValue &key) const {
+bool FastSeparateChainingHashTable<
+    resizable,
+    serializable,
+    force_key_copy,
+    allow_duplicate_keys>::hasKey(const TypedValue &key) const {
   DEBUG_ASSERT(this->key_types_.size() == 1);
-  DEBUG_ASSERT(key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
+  DEBUG_ASSERT(
+      key.isPlausibleInstanceOf(this->key_types_.front()->getSignature()));
 
   const std::size_t hash_code = key.getHash();
-  std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
+  std::size_t bucket_ref =
+      slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
   while (bucket_ref != 0) {
     DEBUG_ASSERT(bucket_ref != std::numeric_limits<std::size_t>::max());
-    const char *bucket = static_cast<const char*>(buckets_) + (bucket_ref - 1) * bucket_size_;
-    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t*>(
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (bucket_ref - 1) * bucket_size_;
+    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t *>(
         bucket + sizeof(std::atomic<std::size_t>));
-    if ((bucket_hash == hash_code) && key_manager_.scalarKeyCollisionCheck(key, bucket)) {
+    if ((bucket_hash == hash_code) &&
+        key_manager_.scalarKeyCollisionCheck(key, bucket)) {
       // Find a match.
       return true;
     }
-    bucket_ref = reinterpret_cast<const std::atomic<std::size_t>*>(bucket)->load(std::memory_order_relaxed);
+    bucket_ref =
+        reinterpret_cast<const std::atomic<std::size_t> *>(bucket)->load(
+            std::memory_order_relaxed);
   }
   return false;
 }
@@ -1125,22 +1264,31 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::hasCompositeKey(const std::vector<TypedValue> &key) const {
+bool FastSeparateChainingHashTable<
+    resizable,
+    serializable,
+    force_key_copy,
+    allow_duplicate_keys>::hasCompositeKey(const std::vector<TypedValue> &key)
+    const {
   DEBUG_ASSERT(this->key_types_.size() == key.size());
 
   const std::size_t hash_code = this->hashCompositeKey(key);
-  std::size_t bucket_ref = slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
+  std::size_t bucket_ref =
+      slots_[hash_code % header_->num_slots].load(std::memory_order_relaxed);
   while (bucket_ref != 0) {
     DEBUG_ASSERT(bucket_ref != std::numeric_limits<std::size_t>::max());
-    const char *bucket = static_cast<const char*>(buckets_) + (bucket_ref - 1) * bucket_size_;
-    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t*>(
+    const char *bucket =
+        static_cast<const char *>(buckets_) + (bucket_ref - 1) * bucket_size_;
+    const std::size_t bucket_hash = *reinterpret_cast<const std::size_t *>(
         bucket + sizeof(std::atomic<std::size_t>));
-    if ((bucket_hash == hash_code) && key_manager_.compositeKeyCollisionCheck(key, bucket)) {
+    if ((bucket_hash == hash_code) &&
+        key_manager_.compositeKeyCollisionCheck(key, bucket)) {
       // Find a match.
       return true;
     }
-    bucket_ref = reinterpret_cast<const std::atomic<std::size_t>*>(bucket)->load(std::memory_order_relaxed);
+    bucket_ref =
+        reinterpret_cast<const std::atomic<std::size_t> *>(bucket)->load(
+            std::memory_order_relaxed);
   }
   return false;
 }
@@ -1149,10 +1297,13 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::resize(const std::size_t extra_buckets,
-             const std::size_t extra_variable_storage,
-             const std::size_t retry_num) {
+void FastSeparateChainingHashTable<
+    resizable,
+    serializable,
+    force_key_copy,
+    allow_duplicate_keys>::resize(const std::size_t extra_buckets,
+                                  const std::size_t extra_variable_storage,
+                                  const std::size_t retry_num) {
   DEBUG_ASSERT(resizable);
 
   // A retry should never be necessary with this implementation of HashTable.
@@ -1178,33 +1329,36 @@ void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allo
   // account kHashTableLoadFactor.
   std::size_t resized_num_slots = get_next_prime_number(
       (header_->num_buckets + extra_buckets / 2) * kHashTableLoadFactor * 2);
-  std::size_t variable_storage_required
-      = (resized_num_slots / kHashTableLoadFactor) * key_manager_.getEstimatedVariableKeySize();
-  const std::size_t original_variable_storage_used
-      = header_->variable_length_bytes_allocated.load(std::memory_order_relaxed);
+  std::size_t variable_storage_required =
+      (resized_num_slots / kHashTableLoadFactor) *
+      key_manager_.getEstimatedVariableKeySize();
+  const std::size_t original_variable_storage_used =
+      header_->variable_length_bytes_allocated.load(std::memory_order_relaxed);
   // If this resize was triggered by a too-large variable-length key, bump up
   // the variable-length storage requirement.
-  if ((extra_variable_storage > 0)
-      && (extra_variable_storage + original_variable_storage_used
-          > key_manager_.getVariableLengthKeyStorageSize())) {
+  if ((extra_variable_storage > 0) &&
+      (extra_variable_storage + original_variable_storage_used >
+       key_manager_.getVariableLengthKeyStorageSize())) {
     variable_storage_required += extra_variable_storage;
   }
 
-  const std::size_t resized_memory_required
-      = sizeof(Header)
-        + resized_num_slots * sizeof(std::atomic<std::size_t>)
-        + (resized_num_slots / kHashTableLoadFactor) * bucket_size_
-        + variable_storage_required;
-  const std::size_t resized_storage_slots
-      = this->storage_manager_->SlotsNeededForBytes(resized_memory_required);
+  const std::size_t resized_memory_required =
+      sizeof(Header) + resized_num_slots * sizeof(std::atomic<std::size_t>) +
+      (resized_num_slots / kHashTableLoadFactor) * bucket_size_ +
+      variable_storage_required;
+  const std::size_t resized_storage_slots =
+      this->storage_manager_->SlotsNeededForBytes(resized_memory_required);
   if (resized_storage_slots == 0) {
-    FATAL_ERROR("Storage requirement for resized SeparateChainingHashTable "
-                "exceeds maximum allocation size.");
+    FATAL_ERROR(
+        "Storage requirement for resized SeparateChainingHashTable "
+        "exceeds maximum allocation size.");
   }
 
   // Get a new StorageBlob to hold the resized hash table.
-  const block_id resized_blob_id = this->storage_manager_->createBlob(resized_storage_slots);
-  MutableBlobReference resized_blob = this->storage_manager_->getBlobMutable(resized_blob_id);
+  const block_id resized_blob_id =
+      this->storage_manager_->createBlob(resized_storage_slots);
+  MutableBlobReference resized_blob =
+      this->storage_manager_->getBlobMutable(resized_blob_id);
 
   // Locate data structures inside the new StorageBlob.
   void *aligned_memory_start = resized_blob->getMemoryMutable();
@@ -1212,12 +1366,12 @@ void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allo
   if (align(alignof(Header),
             sizeof(Header),
             aligned_memory_start,
-            available_memory)
-          == nullptr) {
+            available_memory) == nullptr) {
     // Should be impossible, as noted in constructor.
-    FATAL_ERROR("StorageBlob used to hold resized SeparateChainingHashTable "
-                "is too small to meet alignment requirements of "
-                "LinearOpenAddressingHashTable::Header.");
+    FATAL_ERROR(
+        "StorageBlob used to hold resized SeparateChainingHashTable "
+        "is too small to meet alignment requirements of "
+        "LinearOpenAddressingHashTable::Header.");
   } else if (aligned_memory_start != resized_blob->getMemoryMutable()) {
     // Again, should be impossible.
     DEV_WARNING("In SeparateChainingHashTable::resize(), StorageBlob "
@@ -1227,59 +1381,63 @@ void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allo
                 << "LinearOpenAddressingHashTable::Header.");
   }
 
-  Header *resized_header = static_cast<Header*>(aligned_memory_start);
-  aligned_memory_start = static_cast<char*>(aligned_memory_start) + sizeof(Header);
+  Header *resized_header = static_cast<Header *>(aligned_memory_start);
+  aligned_memory_start =
+      static_cast<char *>(aligned_memory_start) + sizeof(Header);
   available_memory -= sizeof(Header);
 
   // As in constructor, recompute the number of slots and buckets using the
   // actual available memory.
-  std::size_t resized_num_buckets
-      = (available_memory - extra_variable_storage)
-        / (kHashTableLoadFactor * sizeof(std::atomic<std::size_t>)
-           + bucket_size_
-           + key_manager_.getEstimatedVariableKeySize());
-  resized_num_slots = get_previous_prime_number(resized_num_buckets * kHashTableLoadFactor);
+  std::size_t resized_num_buckets =
+      (available_memory - extra_variable_storage) /
+      (kHashTableLoadFactor * sizeof(std::atomic<std::size_t>) + bucket_size_ +
+       key_manager_.getEstimatedVariableKeySize());
+  resized_num_slots =
+      get_previous_prime_number(resized_num_buckets * kHashTableLoadFactor);
   resized_num_buckets = resized_num_slots / kHashTableLoadFactor;
 
   // Locate slot array.
-  std::atomic<std::size_t> *resized_slots = static_cast<std::atomic<std::size_t>*>(aligned_memory_start);
-  aligned_memory_start = static_cast<char*>(aligned_memory_start)
-                         + sizeof(std::atomic<std::size_t>) * resized_num_slots;
+  std::atomic<std::size_t> *resized_slots =
+      static_cast<std::atomic<std::size_t> *>(aligned_memory_start);
+  aligned_memory_start = static_cast<char *>(aligned_memory_start) +
+                         sizeof(std::atomic<std::size_t>) * resized_num_slots;
   available_memory -= sizeof(std::atomic<std::size_t>) * resized_num_slots;
 
   // As in constructor, we will be extra paranoid and use align() to locate the
   // start of the array of buckets, as well.
   void *resized_buckets = aligned_memory_start;
-  if (align(kBucketAlignment,
-            bucket_size_,
-            resized_buckets,
-            available_memory)
-          == nullptr) {
-    FATAL_ERROR("StorageBlob used to hold resized SeparateChainingHashTable "
-                "is too small to meet alignment requirements of buckets.");
+  if (align(
+          kBucketAlignment, bucket_size_, resized_buckets, available_memory) ==
+      nullptr) {
+    FATAL_ERROR(
+        "StorageBlob used to hold resized SeparateChainingHashTable "
+        "is too small to meet alignment requirements of buckets.");
   } else if (resized_buckets != aligned_memory_start) {
-    DEV_WARNING("Bucket array start position adjusted to meet alignment "
-                "requirement for SeparateChainingHashTable's value type.");
-    if (resized_num_buckets * bucket_size_ + variable_storage_required > available_memory) {
+    DEV_WARNING(
+        "Bucket array start position adjusted to meet alignment "
+        "requirement for SeparateChainingHashTable's value type.");
+    if (resized_num_buckets * bucket_size_ + variable_storage_required >
+        available_memory) {
       --resized_num_buckets;
     }
   }
-  aligned_memory_start = static_cast<char*>(aligned_memory_start)
-                         + resized_num_buckets * bucket_size_;
+  aligned_memory_start = static_cast<char *>(aligned_memory_start) +
+                         resized_num_buckets * bucket_size_;
   available_memory -= resized_num_buckets * bucket_size_;
 
   void *resized_variable_length_key_storage = aligned_memory_start;
   const std::size_t resized_variable_length_key_storage_size = available_memory;
 
-  const std::size_t original_buckets_used = header_->buckets_allocated.load(std::memory_order_relaxed);
+  const std::size_t original_buckets_used =
+      header_->buckets_allocated.load(std::memory_order_relaxed);
 
   // Initialize the header.
   resized_header->num_slots = resized_num_slots;
   resized_header->num_buckets = resized_num_buckets;
-  resized_header->buckets_allocated.store(original_buckets_used, std::memory_order_relaxed);
+  resized_header->buckets_allocated.store(original_buckets_used,
+                                          std::memory_order_relaxed);
   resized_header->variable_length_bytes_allocated.store(
-      original_variable_storage_used,
-      std::memory_order_relaxed);
+      original_variable_storage_used, std::memory_order_relaxed);
 
   // Bulk-copy buckets. This is safe because:
   //     1. The "next" pointers will be adjusted when rebuilding chains below.
@@ -1298,30 +1456,34 @@ void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allo
   // GCC 4.8.3, so we assume we need to invoke ValueT's copy or move
   // constructor, even though the plain memcpy above could suffice for many
   // possible ValueTs.
-  void *current_value_original = static_cast<char*>(buckets_) + kValueOffset;
-  void *current_value_resized = static_cast<char*>(resized_buckets) + kValueOffset;
-  for (std::size_t bucket_num = 0; bucket_num < original_buckets_used; ++bucket_num) {
+  void *current_value_original = static_cast<char *>(buckets_) + kValueOffset;
+  void *current_value_resized =
+      static_cast<char *>(resized_buckets) + kValueOffset;
+  for (std::size_t bucket_num = 0; bucket_num < original_buckets_used;
+       ++bucket_num) {
     // Use a move constructor if available to avoid a deep-copy, since resizes
     // always succeed.
-    new (current_value_resized) uint8_t(std::move(*static_cast<uint8_t*>(current_value_original)));
-    current_value_original = static_cast<char*>(current_value_original) + bucket_size_;
-    current_value_resized = static_cast<char*>(current_value_resized) + bucket_size_;
+    new (current_value_resized) std::uint8_t(
+        std::move(*static_cast<std::uint8_t *>(current_value_original)));
+    current_value_original =
+        static_cast<char *>(current_value_original) + bucket_size_;
+    current_value_resized =
+        static_cast<char *>(current_value_resized) + bucket_size_;
   }
 
   // Copy over variable-length key components, if any.
   if (original_variable_storage_used > 0) {
-    DEBUG_ASSERT(original_variable_storage_used
-                 == key_manager_.getNextVariableLengthKeyOffset());
-    DEBUG_ASSERT(original_variable_storage_used <= resized_variable_length_key_storage_size);
+    DEBUG_ASSERT(original_variable_storage_used ==
+                 key_manager_.getNextVariableLengthKeyOffset());
+    DEBUG_ASSERT(original_variable_storage_used <=
+                 resized_variable_length_key_storage_size);
     std::memcpy(resized_variable_length_key_storage,
                 key_manager_.getVariableLengthKeyStorage(),
                 original_variable_storage_used);
   }
 
   // Destroy values in the original hash table, if neccesary,
-  DestroyValues(buckets_,
-                original_buckets_used,
-                bucket_size_);
+  DestroyValues(buckets_, original_buckets_used, bucket_size_);
 
   // Make resized structures active.
   std::swap(this->blob_, resized_blob);
@@ -1340,17 +1502,18 @@ void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allo
 
   // Rebuild chains.
   void *current_bucket = buckets_;
-  for (std::size_t bucket_num = 0; bucket_num < original_buckets_used; ++bucket_num) {
-    std::atomic<std::size_t> *next_ptr
-        = static_cast<std::atomic<std::size_t>*>(current_bucket);
-    const std::size_t hash_code = *reinterpret_cast<const std::size_t*>(
-        static_cast<const char*>(current_bucket) + sizeof(std::atomic<std::size_t>));
+  for (std::size_t bucket_num = 0; bucket_num < original_buckets_used;
+       ++bucket_num) {
+    std::atomic<std::size_t> *next_ptr =
+        static_cast<std::atomic<std::size_t> *>(current_bucket);
+    const std::size_t hash_code = *reinterpret_cast<const std::size_t *>(
+        static_cast<const char *>(current_bucket) +
+        sizeof(std::atomic<std::size_t>));
 
     const std::size_t slot_number = hash_code % header_->num_slots;
     std::size_t slot_ptr_value = 0;
-    if (slots_[slot_number].compare_exchange_strong(slot_ptr_value,
-                                                    bucket_num + 1,
-                                                    std::memory_order_relaxed)) {
+    if (slots_[slot_number].compare_exchange_strong(
+            slot_ptr_value, bucket_num + 1, std::memory_order_relaxed)) {
       // This bucket is the first in the chain for this block, so reset its
       // next pointer to 0.
       next_ptr->store(0, std::memory_order_relaxed);
@@ -1360,7 +1523,7 @@ void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allo
       next_ptr->store(slot_ptr_value, std::memory_order_relaxed);
       slots_[slot_number].store(bucket_num + 1, std::memory_order_relaxed);
     }
-    current_bucket = static_cast<char*>(current_bucket) + bucket_size_;
+    current_bucket = static_cast<char *>(current_bucket) + bucket_size_;
   }
 }
 
@@ -1368,10 +1531,13 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::preallocateForBulkInsert(const std::size_t total_entries,
-                               const std::size_t total_variable_key_size,
-                               HashTablePreallocationState *prealloc_state) {
+bool FastSeparateChainingHashTable<resizable,
+                                   serializable,
+                                   force_key_copy,
+                                   allow_duplicate_keys>::
+    preallocateForBulkInsert(const std::size_t total_entries,
+                             const std::size_t total_variable_key_size,
+                             HashTablePreallocationState *prealloc_state) {
   DEBUG_ASSERT(allow_duplicate_keys);
   if (!key_manager_.allocateVariableLengthKeyStorage(total_variable_key_size)) {
     return false;
@@ -1382,12 +1548,15 @@ bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allo
   // than one bucket and exceed 'header_->num_buckets', their respective
   // rollbacks might happen in such an order that some bucket ranges get
   // skipped, while others might get double-allocated later.
-  std::size_t original_buckets_allocated = header_->buckets_allocated.load(std::memory_order_relaxed);
-  std::size_t buckets_post_allocation = original_buckets_allocated + total_entries;
-  while ((buckets_post_allocation <= header_->num_buckets)
-         && !header_->buckets_allocated.compare_exchange_weak(original_buckets_allocated,
-                                                              buckets_post_allocation,
-                                                              std::memory_order_relaxed)) {
+  std::size_t original_buckets_allocated =
+      header_->buckets_allocated.load(std::memory_order_relaxed);
+  std::size_t buckets_post_allocation =
+      original_buckets_allocated + total_entries;
+  while ((buckets_post_allocation <= header_->num_buckets) &&
+         !header_->buckets_allocated.compare_exchange_weak(
+             original_buckets_allocated,
+             buckets_post_allocation,
+             std::memory_order_relaxed)) {
     buckets_post_allocation = original_buckets_allocated + total_entries;
   }
 
@@ -1398,8 +1567,9 @@ bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allo
 
   prealloc_state->bucket_position = original_buckets_allocated;
   if (total_variable_key_size != 0) {
-    prealloc_state->variable_length_key_position
-        = key_manager_.incrementNextVariableLengthKeyOffset(total_variable_key_size);
+    prealloc_state->variable_length_key_position =
+        key_manager_.incrementNextVariableLengthKeyOffset(
+            total_variable_key_size);
   }
   return true;
 }
@@ -1408,17 +1578,18 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-void FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::DestroyValues(void *hash_buckets,
-                    const std::size_t num_buckets,
-                    const std::size_t bucket_size) {
-  if (!std::is_trivially_destructible<uint8_t>::value) {
-    void *value_ptr = static_cast<char*>(hash_buckets) + kValueOffset;
-    for (std::size_t bucket_num = 0;
-         bucket_num < num_buckets;
-         ++bucket_num) {
-     static_cast<uint8_t*>(value_ptr)->~uint8_t();
-      value_ptr = static_cast<char*>(value_ptr) + bucket_size;
+void FastSeparateChainingHashTable<
+    resizable,
+    serializable,
+    force_key_copy,
+    allow_duplicate_keys>::DestroyValues(void *hash_buckets,
+                                         const std::size_t num_buckets,
+                                         const std::size_t bucket_size) {
+  if (!std::is_trivially_destructible<std::uint8_t>::value) {
+    void *value_ptr = static_cast<char *>(hash_buckets) + kValueOffset;
+    for (std::size_t bucket_num = 0; bucket_num < num_buckets; ++bucket_num) {
+      static_cast<std::uint8_t *>(value_ptr)->~uint8_t();
+      value_ptr = static_cast<char *>(value_ptr) + bucket_size;
     }
   }
 }
@@ -1427,39 +1598,45 @@ template <bool resizable,
           bool serializable,
           bool force_key_copy,
           bool allow_duplicate_keys>
-inline bool FastSeparateChainingHashTable<resizable, serializable, force_key_copy, allow_duplicate_keys>
-    ::locateBucketForInsertion(const std::size_t hash_code,
-                               const std::size_t variable_key_allocation_required,
-                               void **bucket,
-                               std::atomic<std::size_t> **pending_chain_ptr,
-                               std::size_t *pending_chain_ptr_finish_value,
-                               HashTablePreallocationState *prealloc_state) {
+inline bool FastSeparateChainingHashTable<resizable,
+                                          serializable,
+                                          force_key_copy,
+                                          allow_duplicate_keys>::
+    locateBucketForInsertion(const std::size_t hash_code,
+                             const std::size_t variable_key_allocation_required,
+                             void **bucket,
+                             std::atomic<std::size_t> **pending_chain_ptr,
+                             std::size_t *pending_chain_ptr_finish_value,
+                             HashTablePreallocationState *prealloc_state) {
   DEBUG_ASSERT((prealloc_state == nullptr) || allow_duplicate_keys);
   if (*bucket == nullptr) {
     *pending_chain_ptr = &(slots_[hash_code % header_->num_slots]);
   } else {
-    *pending_chain_ptr = static_cast<std::atomic<std::size_t>*>(*bucket);
+    *pending_chain_ptr = static_cast<std::atomic<std::size_t> *>(*bucket);
   }
   for (;;) {
     std::size_t existing_chain_ptr = 0;
-    if ((*pending_chain_ptr)->compare_exchange_strong(existing_chain_ptr,
-                        

<TRUNCATED>