You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@quickstep.apache.org by zu...@apache.org on 2016/06/09 04:42:05 UTC

[1/3] incubator-quickstep git commit: Fix Clang problems in Travis. [Forced Update!]

Repository: incubator-quickstep
Updated Branches:
  refs/heads/storage-fix 66ccceed7 -> d894e43ce (forced update)


Fix Clang problems in Travis.


Project: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/commit/2d39b8ec
Tree: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/tree/2d39b8ec
Diff: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/diff/2d39b8ec

Branch: refs/heads/storage-fix
Commit: 2d39b8ecd8b1ca3fb42ff2505a664a94b67ab9e3
Parents: eab1c9a
Author: Navneet Potti <na...@gmail.com>
Authored: Wed Jun 8 18:15:38 2016 -0500
Committer: Zuyu Zhang <zz...@pivotal.io>
Committed: Wed Jun 8 20:27:44 2016 -0700

----------------------------------------------------------------------
 .travis.yml | 15 ++++++++++++++-
 1 file changed, 14 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/2d39b8ec/.travis.yml
----------------------------------------------------------------------
diff --git a/.travis.yml b/.travis.yml
index 08d6f38..df39fb0 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -12,7 +12,7 @@ cache: ccache
 
 compiler:
   - gcc
-  # clang
+  - clang
 
 env:
   - BUILD_TYPE=Debug VECTOR_COPY_ELISION_LEVEL=joinwithbinaryexpressions
@@ -20,6 +20,19 @@ env:
   - BUILD_TYPE=Debug VECTOR_COPY_ELISION_LEVEL=none
   - BUILD_TYPE=Release VECTOR_COPY_ELISION_LEVEL=none
 
+before_install:
+  - LLVM_VERSION=3.7.1
+  - LLVM_ARCHIVE_PATH=$HOME/clang+llvm.tar.xz
+  - if [[ $CC = "clang" ]]; then
+      wget http://llvm.org/releases/$LLVM_VERSION/clang+llvm-$LLVM_VERSION-x86_64-linux-gnu-ubuntu-14.04.tar.xz -O $LLVM_ARCHIVE_PATH;
+      mkdir -p $HOME/clang-$LLVM_VERSION;
+      tar xf $LLVM_ARCHIVE_PATH -C $HOME/clang-$LLVM_VERSION --strip-components 1;
+      ln -sf $HOME/clang-$LLVM_VERSION/bin/clang++ $HOME/clang-$LLVM_VERSION/bin/clang++-3.7;
+      export PATH=$HOME/clang-$LLVM_VERSION/bin:$PATH;
+      export CPPFLAGS="-I $HOME/clang-$LLVM_VERSION/include/c++/v1";
+      echo "Using clang at " `which $CC-3.7` " and $CXX at " `which $CXX-3.7`;
+    fi
+
 install:
   - if [ "$VECTOR_COPY_ELISION_LEVEL" = "joinwithbinaryexpressions" ] && [ "$CC" = "gcc" ]; then
       export MAKE_JOBS=1;


[2/3] incubator-quickstep git commit: Minor Improvements in Storage.

Posted by zu...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/d894e43c/storage/ColumnStoreUtil.hpp
----------------------------------------------------------------------
diff --git a/storage/ColumnStoreUtil.hpp b/storage/ColumnStoreUtil.hpp
index eec452c..8ff9837 100644
--- a/storage/ColumnStoreUtil.hpp
+++ b/storage/ColumnStoreUtil.hpp
@@ -1,6 +1,6 @@
 /**
  *   Copyright 2011-2015 Quickstep Technologies LLC.
- *   Copyright 2015 Pivotal Software, Inc.
+ *   Copyright 2015-2016 Pivotal Software, Inc.
  *
  *   Licensed under the Apache License, Version 2.0 (the "License");
  *   you may not use this file except in compliance with the License.
@@ -30,7 +30,6 @@ namespace quickstep {
 class BasicColumnStoreTupleStorageSubBlock;
 class CatalogRelationSchema;
 class ComparisonPredicate;
-class CompressedColumnStoreTupleStorageSubBlock;
 class TupleIdSequence;
 
 namespace column_store_util {
@@ -41,8 +40,7 @@ namespace column_store_util {
 
 /**
  * @brief An iterator over the values in a column stripe. Used internally by
- *        BasicColumnStoreTupleStorageSubBlock and
- *        CompressedColumnStoreTupleStorageSubBlock.
+ *        BasicColumnStoreTupleStorageSubBlock.
  **/
 class ColumnStripeIterator : public std::iterator<std::random_access_iterator_tag, void*> {
  public:

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/d894e43c/storage/CompressedColumnStoreTupleStorageSubBlock.cpp
----------------------------------------------------------------------
diff --git a/storage/CompressedColumnStoreTupleStorageSubBlock.cpp b/storage/CompressedColumnStoreTupleStorageSubBlock.cpp
index 1173a84..cf14367 100644
--- a/storage/CompressedColumnStoreTupleStorageSubBlock.cpp
+++ b/storage/CompressedColumnStoreTupleStorageSubBlock.cpp
@@ -1,6 +1,6 @@
 /**
  *   Copyright 2011-2015 Quickstep Technologies LLC.
- *   Copyright 2015 Pivotal Software, Inc.
+ *   Copyright 2015-2016 Pivotal Software, Inc.
  *
  *   Licensed under the Apache License, Version 2.0 (the "License");
  *   you may not use this file except in compliance with the License.
@@ -44,9 +44,10 @@
 #include "types/operations/comparisons/ComparisonFactory.hpp"
 #include "types/operations/comparisons/ComparisonID.hpp"
 #include "utility/BitVector.hpp"
-#include "utility/Macros.hpp"
 #include "utility/PtrVector.hpp"
 
+#include "glog/logging.h"
+
 using std::equal_to;
 using std::greater;
 using std::less_equal;
@@ -78,10 +79,9 @@ CompressedColumnStoreTupleStorageSubBlock::CompressedColumnStoreTupleStorageSubB
                                      sub_block_memory,
                                      sub_block_memory_size),
       uncompressed_nulls_in_sort_column_(0) {
-  if (!DescriptionIsValid(relation_, description_)) {
-    FATAL_ERROR("Attempted to construct a CompressedColumnStoreTupleStorageSubBlock "
-                "from an invalid description.");
-  }
+  CHECK(DescriptionIsValid(relation_, description_))
+      << "Attempted to construct a CompressedColumnStoreTupleStorageSubBlock from an invalid description:\n"
+      << description_.DebugString();
 
   sort_column_id_ = description_.GetExtension(
       CompressedColumnStoreTupleStorageSubBlockDescription::sort_attribute_id);
@@ -163,7 +163,7 @@ bool CompressedColumnStoreTupleStorageSubBlock::DescriptionIsValid(
 std::size_t CompressedColumnStoreTupleStorageSubBlock::EstimateBytesPerTuple(
     const CatalogRelationSchema &relation,
     const TupleStorageSubBlockDescription &description) {
-  DEBUG_ASSERT(DescriptionIsValid(relation, description));
+  DCHECK(DescriptionIsValid(relation, description));
 
   std::unordered_set<attribute_id> compressed_attributes;
   for (int compressed_attribute_num = 0;
@@ -201,8 +201,8 @@ std::size_t CompressedColumnStoreTupleStorageSubBlock::EstimateBytesPerTuple(
 const void* CompressedColumnStoreTupleStorageSubBlock::getAttributeValue(
     const tuple_id tuple,
     const attribute_id attr) const {
-  DEBUG_ASSERT(hasTupleWithID(tuple));
-  DEBUG_ASSERT(supportsUntypedGetAttributeValue(attr));
+  DCHECK(hasTupleWithID(tuple));
+  DCHECK(supportsUntypedGetAttributeValue(attr));
 
   if (dictionary_coded_attributes_[attr]) {
     return dictionaries_.atUnchecked(attr).getUntypedValueForCode<true>(
@@ -215,7 +215,7 @@ const void* CompressedColumnStoreTupleStorageSubBlock::getAttributeValue(
 TypedValue CompressedColumnStoreTupleStorageSubBlock::getAttributeValueTyped(
     const tuple_id tuple,
     const attribute_id attr) const {
-  DEBUG_ASSERT(hasTupleWithID(tuple));
+  DCHECK(hasTupleWithID(tuple));
 
   if (dictionary_coded_attributes_[attr]) {
     return dictionaries_.atUnchecked(attr).getTypedValueForCode(
@@ -258,7 +258,7 @@ ValueAccessor* CompressedColumnStoreTupleStorageSubBlock::createValueAccessor(
 }
 
 bool CompressedColumnStoreTupleStorageSubBlock::deleteTuple(const tuple_id tuple) {
-  DEBUG_ASSERT(hasTupleWithID(tuple));
+  DCHECK(hasTupleWithID(tuple));
 
   if (compression_info_.uncompressed_attribute_has_nulls(sort_column_id_)
       && uncompressed_column_null_bitmaps_[sort_column_id_].getBit(tuple)) {
@@ -408,8 +408,8 @@ void CompressedColumnStoreTupleStorageSubBlock::rebuild() {
 std::uint32_t CompressedColumnStoreTupleStorageSubBlock::compressedGetCode(
     const tuple_id tid,
     const attribute_id attr_id) const {
-  DEBUG_ASSERT(hasTupleWithID(tid));
-  DEBUG_ASSERT((dictionary_coded_attributes_[attr_id]) || (truncated_attributes_[attr_id]));
+  DCHECK(hasTupleWithID(tid));
+  DCHECK((dictionary_coded_attributes_[attr_id]) || (truncated_attributes_[attr_id]));
   const void *code_location = getAttributePtr<false>(tid, attr_id);
   switch (compression_info_.attribute_size(attr_id)) {
     case 1:
@@ -419,9 +419,8 @@ std::uint32_t CompressedColumnStoreTupleStorageSubBlock::compressedGetCode(
     case 4:
       return *static_cast<const std::uint32_t*>(code_location);
     default:
-      FATAL_ERROR("Unexpected byte-length (not 1, 2, or 4) for compressed "
-                  "attribute ID " << attr_id
-                  << " in CompressedColumnStoreTupleStorageSubBlock::compressedGetCode()");
+      LOG(FATAL) << "Unexpected byte-length (not 1, 2, or 4) for compressed attribute ID " << attr_id
+                 << " in CompressedColumnStoreTupleStorageSubBlock::compressedGetCode()";
   }
 }
 
@@ -561,9 +560,8 @@ TupleIdSequence* CompressedColumnStoreTupleStorageSubBlock::getNotEqualCodesExcl
           }
           break;
         default:
-          FATAL_ERROR("Unexpected byte-length (not 1, 2, or 4) for compressed "
-                      "attribute ID " << attr_id
-                      << " in CompressedColumnStoreTupleStorageSubBlock::getNotEqualCodesExcludingNull()");
+          LOG(FATAL) << "Unexpected byte-length (not 1, 2, or 4) for compressed attribute ID " << attr_id
+                     << " in CompressedColumnStoreTupleStorageSubBlock::getNotEqualCodesExcludingNull()";
       }
       if (filter != nullptr) {
         matches->intersectWith(*filter);
@@ -601,9 +599,8 @@ TupleIdSequence* CompressedColumnStoreTupleStorageSubBlock::getNotEqualCodesExcl
           }
           break;
         default:
-          FATAL_ERROR("Unexpected byte-length (not 1, 2, or 4) for compressed "
-                      "attribute ID " << attr_id
-                      << " in CompressedColumnStoreTupleStorageSubBlock::getNotEqualCodesExcludingNull()");
+          LOG(FATAL) << "Unexpected byte-length (not 1, 2, or 4) for compressed attribute ID " << attr_id
+                     << " in CompressedColumnStoreTupleStorageSubBlock::getNotEqualCodesExcludingNull()";
       }
     }
     return matches;
@@ -700,9 +697,8 @@ TupleIdSequence* CompressedColumnStoreTupleStorageSubBlock::getCodesInRange(
           }
           break;
         default:
-          FATAL_ERROR("Unexpected byte-length (not 1, 2, or 4) for compressed "
-                      "attribute ID " << attr_id
-                      << " in CompressedColumnStoreTupleStorageSubBlock::getCodesInRange()");
+          LOG(FATAL) << "Unexpected byte-length (not 1, 2, or 4) for compressed attribute ID " << attr_id
+                     << " in CompressedColumnStoreTupleStorageSubBlock::getCodesInRange()";
       }
       if (filter != nullptr) {
         matches->intersectWith(*filter);
@@ -740,9 +736,8 @@ TupleIdSequence* CompressedColumnStoreTupleStorageSubBlock::getCodesInRange(
           }
           break;
         default:
-          FATAL_ERROR("Unexpected byte-length (not 1, 2, or 4) for compressed "
-                      "attribute ID " << attr_id
-                      << " in CompressedColumnStoreTupleStorageSubBlock::getCodesInRange()");
+          LOG(FATAL) << "Unexpected byte-length (not 1, 2, or 4) for compressed attribute ID " << attr_id
+                     << " in CompressedColumnStoreTupleStorageSubBlock::getCodesInRange()";
       }
     }
   }
@@ -831,7 +826,7 @@ void CompressedColumnStoreTupleStorageSubBlock::shiftUncompressedNullBitmaps(
 
 std::pair<tuple_id, tuple_id> CompressedColumnStoreTupleStorageSubBlock::getCompressedSortColumnRange(
     const std::pair<std::uint32_t, std::uint32_t> code_range) const {
-  DEBUG_ASSERT(dictionary_coded_attributes_[sort_column_id_] || truncated_attributes_[sort_column_id_]);
+  DCHECK(dictionary_coded_attributes_[sort_column_id_] || truncated_attributes_[sort_column_id_]);
 
   const void *attr_stripe = column_stripes_[sort_column_id_];
   pair<tuple_id, tuple_id> tuple_range;
@@ -861,9 +856,8 @@ std::pair<tuple_id, tuple_id> CompressedColumnStoreTupleStorageSubBlock::getComp
                             - static_cast<const uint32_t*>(attr_stripe);
         break;
       default:
-        FATAL_ERROR("Unexpected byte-length (not 1, 2, or 4) for compressed "
-                    "attribute ID " << sort_column_id_
-                    << " in CompressedColumnStoreTupleStorageSubBlock::getCompressedSortColumnRange()");
+        LOG(FATAL) << "Unexpected byte-length (not 1, 2, or 4) for compressed attribute ID " << sort_column_id_
+                   << " in CompressedColumnStoreTupleStorageSubBlock::getCompressedSortColumnRange()";
     }
   }
 
@@ -893,9 +887,8 @@ std::pair<tuple_id, tuple_id> CompressedColumnStoreTupleStorageSubBlock::getComp
                              - static_cast<const uint32_t*>(attr_stripe);
         break;
       default:
-        FATAL_ERROR("Unexpected byte-length (not 1, 2, or 4) for compressed "
-                    "attribute ID " << sort_column_id_
-                    << " in CompressedColumnStoreTupleStorageSubBlock::getCompressedSortColumnRange()");
+        LOG(FATAL) << "Unexpected byte-length (not 1, 2, or 4) for compressed attribute ID " << sort_column_id_
+                   << " in CompressedColumnStoreTupleStorageSubBlock::getCompressedSortColumnRange()";
     }
   }
 
@@ -945,9 +938,8 @@ TupleIdSequence* CompressedColumnStoreTupleStorageSubBlock::getCodesSatisfyingCo
         }
         break;
       default:
-        FATAL_ERROR("Unexpected byte-length (not 1, 2, or 4) for compressed "
-                    "attribute ID " << attr_id
-                    << " in CompressedColumnStoreTupleStorageSubBlock::getCodesSatisfyingComparison()");
+        LOG(FATAL) << "Unexpected byte-length (not 1, 2, or 4) for compressed attribute ID " << attr_id
+                   << " in CompressedColumnStoreTupleStorageSubBlock::getCodesSatisfyingComparison()";
     }
     if (filter != nullptr) {
       matches->intersectWith(*filter);
@@ -982,9 +974,8 @@ TupleIdSequence* CompressedColumnStoreTupleStorageSubBlock::getCodesSatisfyingCo
         }
         break;
       default:
-        FATAL_ERROR("Unexpected byte-length (not 1, 2, or 4) for compressed "
-                    "attribute ID " << attr_id
-                    << " in CompressedColumnStoreTupleStorageSubBlock::getCodesSatisfyingComparison()");
+        LOG(FATAL) << "Unexpected byte-length (not 1, 2, or 4) for compressed attribute ID " << attr_id
+                   << " in CompressedColumnStoreTupleStorageSubBlock::getCodesSatisfyingComparison()";
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/d894e43c/storage/CompressedPackedRowStoreTupleStorageSubBlock.cpp
----------------------------------------------------------------------
diff --git a/storage/CompressedPackedRowStoreTupleStorageSubBlock.cpp b/storage/CompressedPackedRowStoreTupleStorageSubBlock.cpp
index 163e7df..504c15d 100644
--- a/storage/CompressedPackedRowStoreTupleStorageSubBlock.cpp
+++ b/storage/CompressedPackedRowStoreTupleStorageSubBlock.cpp
@@ -1,6 +1,6 @@
 /**
  *   Copyright 2011-2015 Quickstep Technologies LLC.
- *   Copyright 2015 Pivotal Software, Inc.
+ *   Copyright 2015-2016 Pivotal Software, Inc.
  *
  *   Licensed under the Apache License, Version 2.0 (the "License");
  *   you may not use this file except in compliance with the License.
@@ -42,7 +42,8 @@
 #include "types/operations/comparisons/ComparisonFactory.hpp"
 #include "types/operations/comparisons/ComparisonID.hpp"
 #include "utility/BitVector.hpp"
-#include "utility/Macros.hpp"
+
+#include "glog/logging.h"
 
 using std::equal_to;
 using std::greater;
@@ -71,10 +72,9 @@ CompressedPackedRowStoreTupleStorageSubBlock::CompressedPackedRowStoreTupleStora
                                      sub_block_memory,
                                      sub_block_memory_size),
       num_uncompressed_attributes_with_nulls_(0) {
-  if (!DescriptionIsValid(relation_, description_)) {
-    FATAL_ERROR("Attempted to construct a CompressedPackedRowStoreTupleStorageSubBlock "
-                "from an invalid description.");
-  }
+  CHECK(DescriptionIsValid(relation_, description_))
+      << "Attempted to construct a CompressedPackedRowStoreTupleStorageSubBlock from an invalid description:\n"
+      << description_.DebugString();
 
   if ((!new_block) && (*static_cast<tuple_id*>(sub_block_memory_) != 0)) {
     initialize();
@@ -138,7 +138,7 @@ bool CompressedPackedRowStoreTupleStorageSubBlock::DescriptionIsValid(
 std::size_t CompressedPackedRowStoreTupleStorageSubBlock::EstimateBytesPerTuple(
     const CatalogRelationSchema &relation,
     const TupleStorageSubBlockDescription &description) {
-  DEBUG_ASSERT(DescriptionIsValid(relation, description));
+  DCHECK(DescriptionIsValid(relation, description));
 
   std::unordered_set<attribute_id> compressed_attributes;
   for (int compressed_attribute_num = 0;
@@ -176,8 +176,8 @@ std::size_t CompressedPackedRowStoreTupleStorageSubBlock::EstimateBytesPerTuple(
 const void* CompressedPackedRowStoreTupleStorageSubBlock::getAttributeValue(
     const tuple_id tuple,
     const attribute_id attr) const {
-  DEBUG_ASSERT(hasTupleWithID(tuple));
-  DEBUG_ASSERT(supportsUntypedGetAttributeValue(attr));
+  DCHECK(hasTupleWithID(tuple));
+  DCHECK(supportsUntypedGetAttributeValue(attr));
 
   if (dictionary_coded_attributes_[attr]) {
     return dictionaries_.atUnchecked(attr).getUntypedValueForCode<true>(
@@ -190,7 +190,7 @@ const void* CompressedPackedRowStoreTupleStorageSubBlock::getAttributeValue(
 TypedValue CompressedPackedRowStoreTupleStorageSubBlock::getAttributeValueTyped(
     const tuple_id tuple,
     const attribute_id attr) const {
-  DEBUG_ASSERT(hasTupleWithID(tuple));
+  DCHECK(hasTupleWithID(tuple));
 
   if (dictionary_coded_attributes_[attr]) {
     return dictionaries_.atUnchecked(attr).getTypedValueForCode(
@@ -237,7 +237,7 @@ ValueAccessor* CompressedPackedRowStoreTupleStorageSubBlock::createValueAccessor
 }
 
 bool CompressedPackedRowStoreTupleStorageSubBlock::deleteTuple(const tuple_id tuple) {
-  DEBUG_ASSERT(hasTupleWithID(tuple));
+  DCHECK(hasTupleWithID(tuple));
 
   if (tuple == *static_cast<const tuple_id*>(sub_block_memory_) - 1) {
     // Simply truncate if only removing the last tuple.
@@ -347,8 +347,8 @@ void CompressedPackedRowStoreTupleStorageSubBlock::rebuild() {
 std::uint32_t CompressedPackedRowStoreTupleStorageSubBlock::compressedGetCode(
     const tuple_id tid,
     const attribute_id attr_id) const {
-  DEBUG_ASSERT(hasTupleWithID(tid));
-  DEBUG_ASSERT((dictionary_coded_attributes_[attr_id]) || (truncated_attributes_[attr_id]));
+  DCHECK(hasTupleWithID(tid));
+  DCHECK((dictionary_coded_attributes_[attr_id]) || (truncated_attributes_[attr_id]));
   const void *code_location = static_cast<const char*>(tuple_storage_)
                               + tid * tuple_length_bytes_
                               + attribute_offsets_[attr_id];
@@ -360,9 +360,8 @@ std::uint32_t CompressedPackedRowStoreTupleStorageSubBlock::compressedGetCode(
     case 4:
       return *static_cast<const std::uint32_t*>(code_location);
     default:
-      FATAL_ERROR("Unexpected byte-length (not 1, 2, or 4) for compressed "
-                  "attribute ID " << attr_id
-                  << " in CompressedPackedRowStoreTupleStorageSubBlock::compressedGetCodeInl()");
+      LOG(FATAL) << "Unexpected byte-length (not 1, 2, or 4) for compressed attribute ID " << attr_id
+                 << " in CompressedPackedRowStoreTupleStorageSubBlock::compressedGetCodeInl()";
   }
 }
 
@@ -426,9 +425,8 @@ TupleIdSequence* CompressedPackedRowStoreTupleStorageSubBlock::getNotEqualCodesE
         }
         break;
       default:
-        FATAL_ERROR("Unexpected byte-length (not 1, 2, or 4) for compressed "
-                    "attribute ID " << attr_id
-                    << " in CompressedPackedRowStoreTupleStorageSubBlock::getNotEqualCodesExcludingNull()");
+        LOG(FATAL) << "Unexpected byte-length (not 1, 2, or 4) for compressed attribute ID " << attr_id
+                   << " in CompressedPackedRowStoreTupleStorageSubBlock::getNotEqualCodesExcludingNull()";
     }
     if (filter != nullptr) {
       matches->intersectWith(*filter);
@@ -469,9 +467,8 @@ TupleIdSequence* CompressedPackedRowStoreTupleStorageSubBlock::getNotEqualCodesE
         }
         break;
       default:
-        FATAL_ERROR("Unexpected byte-length (not 1, 2, or 4) for compressed "
-                    "attribute ID " << attr_id
-                    << " in CompressedPackedRowStoreTupleStorageSubBlock::getNotEqualCodesExcludingNull()");
+        LOG(FATAL) << "Unexpected byte-length (not 1, 2, or 4) for compressed attribute ID " << attr_id
+                   << " in CompressedPackedRowStoreTupleStorageSubBlock::getNotEqualCodesExcludingNull()";
     }
   }
   return matches;
@@ -536,9 +533,8 @@ TupleIdSequence* CompressedPackedRowStoreTupleStorageSubBlock::getCodesInRange(
         }
         break;
       default:
-        FATAL_ERROR("Unexpected byte-length (not 1, 2, or 4) for compressed "
-                    "attribute ID " << attr_id
-                    << " in CompressedPackedRowStoreTupleStorageSubBlock::getCodesInRange()");
+        LOG(FATAL) << "Unexpected byte-length (not 1, 2, or 4) for compressed attribute ID " << attr_id
+                   << " in CompressedPackedRowStoreTupleStorageSubBlock::getCodesInRange()";
     }
     if (filter != nullptr) {
       matches->intersectWith(*filter);
@@ -579,9 +575,8 @@ TupleIdSequence* CompressedPackedRowStoreTupleStorageSubBlock::getCodesInRange(
         }
         break;
       default:
-        FATAL_ERROR("Unexpected byte-length (not 1, 2, or 4) for compressed "
-                    "attribute ID " << attr_id
-                    << " in CompressedPackedRowStoreTupleStorageSubBlock::getCodesInRange()");
+        LOG(FATAL) << "Unexpected byte-length (not 1, 2, or 4) for compressed attribute ID " << attr_id
+                   << " in CompressedPackedRowStoreTupleStorageSubBlock::getCodesInRange()";
     }
   }
   return matches;
@@ -659,9 +654,8 @@ TupleIdSequence* CompressedPackedRowStoreTupleStorageSubBlock::getCodesSatisfyin
         }
         break;
       default:
-        FATAL_ERROR("Unexpected byte-length (not 1, 2, or 4) for compressed "
-                    "attribute ID " << attr_id
-                    << " in CompressedPackedRowStoreTupleStorageSubBlock::getCodesSatisfyingComparison()");
+        LOG(FATAL) << "Unexpected byte-length (not 1, 2, or 4) for compressed attribute ID " << attr_id
+                   << " in CompressedPackedRowStoreTupleStorageSubBlock::getCodesSatisfyingComparison()";
     }
     if (filter != nullptr) {
       matches->intersectWith(*filter);
@@ -699,9 +693,8 @@ TupleIdSequence* CompressedPackedRowStoreTupleStorageSubBlock::getCodesSatisfyin
         }
         break;
       default:
-        FATAL_ERROR("Unexpected byte-length (not 1, 2, or 4) for compressed "
-                    "attribute ID " << attr_id
-                    << " in CompressedPackedRowStoreTupleStorageSubBlock::getCodesSatisfyingComparison()");
+        LOG(FATAL) << "Unexpected byte-length (not 1, 2, or 4) for compressed attribute ID " << attr_id
+                   << " in CompressedPackedRowStoreTupleStorageSubBlock::getCodesSatisfyingComparison()";
     }
   }
   return matches;

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/d894e43c/storage/CompressedTupleStorageSubBlock.cpp
----------------------------------------------------------------------
diff --git a/storage/CompressedTupleStorageSubBlock.cpp b/storage/CompressedTupleStorageSubBlock.cpp
index 53379f6..bd29a4e 100644
--- a/storage/CompressedTupleStorageSubBlock.cpp
+++ b/storage/CompressedTupleStorageSubBlock.cpp
@@ -1,6 +1,6 @@
 /**
  *   Copyright 2011-2015 Quickstep Technologies LLC.
- *   Copyright 2015 Pivotal Software, Inc.
+ *   Copyright 2015-2016 Pivotal Software, Inc.
  *
  *   Licensed under the Apache License, Version 2.0 (the "License");
  *   you may not use this file except in compliance with the License.
@@ -24,12 +24,11 @@
 #include <utility>
 #include <vector>
 
+#include "catalog/CatalogAttribute.hpp"
 #include "catalog/CatalogRelationSchema.hpp"
 #include "catalog/CatalogTypedefs.hpp"
 #include "compression/CompressionDictionary.hpp"
-#include "compression/CompressionDictionaryLite.hpp"
 #include "expressions/predicate/ComparisonPredicate.hpp"
-#include "expressions/predicate/Predicate.hpp"
 #include "expressions/scalar/Scalar.hpp"
 #include "expressions/scalar/ScalarAttribute.hpp"
 #include "storage/CompressedBlockBuilder.hpp"
@@ -39,10 +38,11 @@
 #include "storage/TupleIdSequence.hpp"
 #include "storage/ValueAccessor.hpp"
 #include "storage/ValueAccessorUtil.hpp"
-#include "types/TypedValue.hpp"
-#include "types/containers/Tuple.hpp"
+#include "types/Type.hpp"
+#include "types/TypeID.hpp"
 #include "types/operations/comparisons/ComparisonID.hpp"
-#include "utility/Macros.hpp"
+
+#include "glog/logging.h"
 
 using std::ceil;
 using std::floor;
@@ -158,93 +158,90 @@ tuple_id CompressedTupleStorageSubBlock::bulkInsertTuplesWithRemappedAttributes(
 TupleIdSequence* CompressedTupleStorageSubBlock::getMatchesForPredicate(
     const ComparisonPredicate &predicate,
     const TupleIdSequence *filter) const {
-  DEBUG_ASSERT(builder_.get() == nullptr);
+  DCHECK(builder_.get() == nullptr);
 
   // Determine if the predicate is a comparison of a compressed attribute with
   // a literal.
-  if (predicate.isAttributeLiteralComparisonPredicate()) {
-    const CatalogAttribute *comparison_attribute = NULL;
-    if (predicate.getLeftOperand().hasStaticValue()) {
-      DEBUG_ASSERT(predicate.getRightOperand().getDataSource() == Scalar::kAttribute);
-      comparison_attribute
-          = &(static_cast<const ScalarAttribute&>(predicate.getRightOperand()).getAttribute());
-    } else {
-      DEBUG_ASSERT(predicate.getLeftOperand().getDataSource() == Scalar::kAttribute);
-      comparison_attribute
-          = &(static_cast<const ScalarAttribute&>(predicate.getLeftOperand()).getAttribute());
-    }
-    const attribute_id comparison_attribute_id = comparison_attribute->getID();
-
-    DEBUG_ASSERT(comparison_attribute->getParent().getID() == relation_.getID());
-    if (dictionary_coded_attributes_[comparison_attribute_id]
-        || truncated_attributes_[comparison_attribute_id]) {
-      const CompressionDictionary *dictionary = nullptr;
-      if (dictionary_coded_attributes_[comparison_attribute_id]) {
-        dictionary = static_cast<const CompressionDictionary*>(
-            dictionaries_.find(comparison_attribute_id)->second);
+  CHECK(predicate.isAttributeLiteralComparisonPredicate())
+      << "Called CompressedTupleStorageSubBlock::getMatchesForPredicate()"
+      << " with a predicate that can only be evaluated with a simple scan.";
+
+  const CatalogAttribute *comparison_attribute = nullptr;
+  if (predicate.getLeftOperand().hasStaticValue()) {
+    DCHECK_EQ(Scalar::kAttribute, predicate.getRightOperand().getDataSource());
+    comparison_attribute
+        = &(static_cast<const ScalarAttribute&>(predicate.getRightOperand()).getAttribute());
+  } else {
+    DCHECK_EQ(Scalar::kAttribute, predicate.getLeftOperand().getDataSource());
+    comparison_attribute
+        = &(static_cast<const ScalarAttribute&>(predicate.getLeftOperand()).getAttribute());
+  }
+  const attribute_id comparison_attribute_id = comparison_attribute->getID();
+
+  DCHECK_EQ(relation_.getID(), comparison_attribute->getParent().getID());
+  DCHECK(dictionary_coded_attributes_[comparison_attribute_id] ||
+         truncated_attributes_[comparison_attribute_id])
+      << "Called CompressedTupleStorageSubBlock::getMatchesForPredicate()"
+      << " with a predicate that can only be evaluated with a simple scan.";
+
+  const CompressionDictionary *dictionary = nullptr;
+  if (dictionary_coded_attributes_[comparison_attribute_id]) {
+    dictionary = static_cast<const CompressionDictionary*>(
+        dictionaries_.find(comparison_attribute_id)->second);
+  }
+  PredicateTransformResult result
+      = CompressedAttributePredicateTransformer::TransformPredicateOnCompressedAttribute(
+          relation_,
+          predicate,
+          dictionary,
+          GetMaxTruncatedValue(compression_info_.attribute_size(comparison_attribute_id)));
+
+  pair<uint32_t, uint32_t> match_range;
+  switch (result.type) {
+    case PredicateTransformResultType::kAll:
+      if (filter == nullptr) {
+        return getExistenceMap();
+      } else {
+        TupleIdSequence *filter_copy = new TupleIdSequence(filter->length());
+        filter_copy->assignFrom(*filter);
+        return filter_copy;
       }
-      PredicateTransformResult result
-          = CompressedAttributePredicateTransformer::TransformPredicateOnCompressedAttribute(
-              relation_,
-              predicate,
-              dictionary,
-              GetMaxTruncatedValue(compression_info_.attribute_size(comparison_attribute_id)));
-
-      pair<uint32_t, uint32_t> match_range;
-      switch (result.type) {
-        case PredicateTransformResultType::kAll:
-          if (filter == nullptr) {
-            return getExistenceMap();
+      // Pass through to base version to get all tuples.
+      return TupleStorageSubBlock::getMatchesForPredicate(predicate, filter);
+    case PredicateTransformResultType::kNone:
+      // No matches.
+      return new TupleIdSequence(*static_cast<const tuple_id*>(sub_block_memory_));
+    case PredicateTransformResultType::kBasicComparison:
+      switch (result.comp) {
+        case ComparisonID::kEqual:
+          return getEqualCodes(comparison_attribute_id, result.first_literal, filter);
+        case ComparisonID::kNotEqual:
+          if (result.exclude_nulls) {
+            return getNotEqualCodesExcludingNull(comparison_attribute_id,
+                                                 result.first_literal,
+                                                 result.second_literal,
+                                                 filter);
           } else {
-            TupleIdSequence *filter_copy = new TupleIdSequence(filter->length());
-            filter_copy->assignFrom(*filter);
-            return filter_copy;
+            return getNotEqualCodes(comparison_attribute_id,
+                                    result.first_literal,
+                                    filter);
           }
-          // Pass through to base version to get all tuples.
-          return TupleStorageSubBlock::getMatchesForPredicate(predicate, filter);
-        case PredicateTransformResultType::kNone:
-          // No matches.
-          return new TupleIdSequence(*static_cast<const tuple_id*>(sub_block_memory_));
-        case PredicateTransformResultType::kBasicComparison:
-          switch (result.comp) {
-            case ComparisonID::kEqual:
-              return getEqualCodes(comparison_attribute_id, result.first_literal, filter);
-            case ComparisonID::kNotEqual:
-              if (result.exclude_nulls) {
-                return getNotEqualCodesExcludingNull(comparison_attribute_id,
-                                                     result.first_literal,
-                                                     result.second_literal,
-                                                     filter);
-              } else {
-                return getNotEqualCodes(comparison_attribute_id,
+        case ComparisonID::kLess:
+          return getLessCodes(comparison_attribute_id, result.first_literal, filter);
+        case ComparisonID::kGreaterOrEqual:
+          return getGreaterOrEqualCodes(comparison_attribute_id,
                                         result.first_literal,
                                         filter);
-              }
-            case ComparisonID::kLess:
-              return getLessCodes(comparison_attribute_id, result.first_literal, filter);
-            case ComparisonID::kGreaterOrEqual:
-              return getGreaterOrEqualCodes(comparison_attribute_id,
-                                            result.first_literal,
-                                            filter);
-            default:
-              FATAL_ERROR("Unexpected ComparisonID in CompressedTupleStorageSubBlock::"
-                          "getMatchesForPredicate()");
-          }
-        case PredicateTransformResultType::kRangeComparison:
-          match_range.first = result.first_literal;
-          match_range.second = result.second_literal;
-          return getCodesInRange(comparison_attribute_id, match_range, filter);
         default:
-          FATAL_ERROR("Unexpected PredicateTransformResultType in CompressedTupleStorageSubBlock::"
-                      "getMatchesForPredicate()");
+          LOG(FATAL) << "Unexpected ComparisonID in CompressedTupleStorageSubBlock::getMatchesForPredicate()";
       }
-    } else {
-      FATAL_ERROR("Called CompressedTupleStorageSubBlock::getMatchesForPredicate() "
-                  "with a predicate that can only be evaluated with a simple scan.");
-    }
-  } else {
-    FATAL_ERROR("Called CompressedTupleStorageSubBlock::getMatchesForPredicate() "
-                "with a predicate that can only be evaluated with a simple scan.");
+    case PredicateTransformResultType::kRangeComparison:
+      match_range.first = result.first_literal;
+      match_range.second = result.second_literal;
+      return getCodesInRange(comparison_attribute_id, match_range, filter);
+    default:
+      LOG(FATAL)
+          << "Unexpected PredicateTransformResultType in CompressedTupleStorageSubBlock::getMatchesForPredicate()";
   }
 }
 
@@ -253,7 +250,7 @@ bool CompressedTupleStorageSubBlock::compressedComparisonIsAlwaysTrueForTruncate
     const attribute_id left_attr_id,
     const TypedValue &right_literal,
     const Type &right_literal_type) const {
-  DEBUG_ASSERT(truncated_attributes_[left_attr_id]);
+  DCHECK(truncated_attributes_[left_attr_id]);
 
   return CompressedAttributePredicateTransformer::CompressedComparisonIsAlwaysTrueForTruncatedAttribute(
       comp,
@@ -267,7 +264,7 @@ bool CompressedTupleStorageSubBlock::compressedComparisonIsAlwaysFalseForTruncat
     const attribute_id left_attr_id,
     const TypedValue &right_literal,
     const Type &right_literal_type) const {
-  DEBUG_ASSERT(truncated_attributes_[left_attr_id]);
+  DCHECK(truncated_attributes_[left_attr_id]);
 
   return CompressedAttributePredicateTransformer::CompressedComparisonIsAlwaysFalseForTruncatedAttribute(
       comp,

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/d894e43c/storage/CompressedTupleStorageSubBlock.hpp
----------------------------------------------------------------------
diff --git a/storage/CompressedTupleStorageSubBlock.hpp b/storage/CompressedTupleStorageSubBlock.hpp
index 19ebc7f..a5a0c5a 100644
--- a/storage/CompressedTupleStorageSubBlock.hpp
+++ b/storage/CompressedTupleStorageSubBlock.hpp
@@ -1,6 +1,6 @@
 /**
  *   Copyright 2011-2015 Quickstep Technologies LLC.
- *   Copyright 2015 Pivotal Software, Inc.
+ *   Copyright 2015-2016 Pivotal Software, Inc.
  *
  *   Licensed under the Apache License, Version 2.0 (the "License");
  *   you may not use this file except in compliance with the License.
@@ -30,21 +30,23 @@
 #include "compression/CompressionDictionary.hpp"
 #include "compression/CompressionDictionaryLite.hpp"
 #include "storage/CompressedBlockBuilder.hpp"
+#include "storage/StorageBlockInfo.hpp"
 #include "storage/StorageBlockLayout.pb.h"
-#include "storage/StorageErrors.hpp"
-#include "storage/TupleIdSequence.hpp"
 #include "storage/TupleStorageSubBlock.hpp"
 #include "types/operations/comparisons/ComparisonID.hpp"
-#include "types/TypedValue.hpp"
 #include "utility/Macros.hpp"
 #include "utility/PtrMap.hpp"
 
+#include "glog/logging.h"
+
 namespace quickstep {
 
 class CatalogRelationSchema;
 class ComparisonPredicate;
 class Tuple;
-class TupleStorageSubBlockDescription;
+class TupleIdSequence;
+class Type;
+class TypedValue;
 class ValueAccessor;
 
 /** \addtogroup Storage
@@ -141,8 +143,8 @@ class CompressedTupleStorageSubBlock : public TupleStorageSubBlock {
   void setAttributeValueInPlaceTyped(const tuple_id tuple,
                                      const attribute_id attr,
                                      const TypedValue &value) override {
-    FATAL_ERROR("Called CompressedTupleStorageSubBlock::setAttributeValueInPlaceTyped(), "
-                "which is not supported.");
+    LOG(FATAL) << "Called CompressedTupleStorageSubBlock::setAttributeValueInPlaceTyped(), "
+               << "which is not supported.";
   }
 
   // This override can more efficiently evaluate comparisons between a
@@ -184,7 +186,7 @@ class CompressedTupleStorageSubBlock : public TupleStorageSubBlock {
    *         attempted.
    **/
   bool compressedUnbuiltBlockAttributeMayBeCompressed(const attribute_id attr_id) const {
-    DEBUG_ASSERT(builder_.get() != nullptr);
+    DCHECK(builder_.get() != nullptr);
     return builder_->attributeMayBeCompressed(attr_id);
   }
 
@@ -199,7 +201,7 @@ class CompressedTupleStorageSubBlock : public TupleStorageSubBlock {
    *         compressed, false otherwise.
    **/
   inline bool compressedAttributeIsDictionaryCompressed(const attribute_id attr_id) const {
-    DEBUG_ASSERT(builder_.get() == nullptr);
+    DCHECK(builder_.get() == nullptr);
     return dictionary_coded_attributes_[attr_id];
   }
 
@@ -213,7 +215,7 @@ class CompressedTupleStorageSubBlock : public TupleStorageSubBlock {
    *         is truncated, false otherwise.
    **/
   inline bool compressedAttributeIsTruncationCompressed(const attribute_id attr_id) const {
-    DEBUG_ASSERT(builder_.get() == nullptr);
+    DCHECK(builder_.get() == nullptr);
     return truncated_attributes_[attr_id];
   }
 
@@ -231,8 +233,8 @@ class CompressedTupleStorageSubBlock : public TupleStorageSubBlock {
    *         is a Long.
    **/
   inline bool compressedTruncatedAttributeIsInt(const attribute_id attr_id) const {
-    DEBUG_ASSERT(builder_.get() == nullptr);
-    DEBUG_ASSERT(truncated_attributes_[attr_id]);
+    DCHECK(builder_.get() == nullptr);
+    DCHECK(truncated_attributes_[attr_id]);
     return truncated_attribute_is_int_[attr_id];
   }
 
@@ -247,7 +249,7 @@ class CompressedTupleStorageSubBlock : public TupleStorageSubBlock {
    *         uncompressed).
    **/
   std::size_t compressedGetCompressedAttributeSize(const attribute_id attr_id) const {
-    DEBUG_ASSERT(builder_.get() == nullptr);
+    DCHECK(builder_.get() == nullptr);
     return compression_info_.attribute_size(attr_id);
   }
 
@@ -262,15 +264,13 @@ class CompressedTupleStorageSubBlock : public TupleStorageSubBlock {
    *         by attr_id.
    **/
   const CompressionDictionary& compressedGetDictionary(const attribute_id attr_id) const {
-    DEBUG_ASSERT(builder_.get() == nullptr);
+    DCHECK(builder_.get() == nullptr);
     PtrMap<attribute_id, CompressionDictionaryLite>::const_iterator dict_it
         = dictionaries_.find(attr_id);
-    if (dict_it == dictionaries_.end()) {
-      FATAL_ERROR("Called CompressedTupleStorageSubBlock::getCompressionDictionary() "
-                  "for an attribute which is not dictionary-compressed.");
-    } else {
-      return static_cast<const CompressionDictionary&>(*(dict_it->second));
-    }
+    DCHECK(dict_it != dictionaries_.end())
+        << "Called CompressedTupleStorageSubBlock::getCompressionDictionary() "
+        << "for an attribute which is not dictionary-compressed.";
+    return static_cast<const CompressionDictionary&>(*(dict_it->second));
   }
 
   /**
@@ -340,8 +340,8 @@ class CompressedTupleStorageSubBlock : public TupleStorageSubBlock {
       case 4:
         return std::numeric_limits<std::uint32_t>::max() - 1;
       default:
-        FATAL_ERROR("Unexpected byte_length for truncated value in "
-                    "CompressedTupleStorageSubBlock::GetMaxTruncatedValue()");
+        LOG(FATAL) << "Unexpected byte_length for truncated value in "
+                   << "CompressedTupleStorageSubBlock::GetMaxTruncatedValue()";
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/d894e43c/storage/PackedRowStoreTupleStorageSubBlock.cpp
----------------------------------------------------------------------
diff --git a/storage/PackedRowStoreTupleStorageSubBlock.cpp b/storage/PackedRowStoreTupleStorageSubBlock.cpp
index ef83a29..a2fe7ca 100644
--- a/storage/PackedRowStoreTupleStorageSubBlock.cpp
+++ b/storage/PackedRowStoreTupleStorageSubBlock.cpp
@@ -1,6 +1,6 @@
 /**
  *   Copyright 2011-2015 Quickstep Technologies LLC.
- *   Copyright 2015 Pivotal Software, Inc.
+ *   Copyright 2015-2016 Pivotal Software, Inc.
  *   Copyright 2016, Quickstep Research Group, Computer Sciences Department,
  *     University of Wisconsin\u2014Madison.
  *
@@ -40,6 +40,8 @@
 #include "utility/BitVector.hpp"
 #include "utility/Macros.hpp"
 
+#include "glog/logging.h"
+
 using std::vector;
 using std::memcpy;
 using std::size_t;
@@ -61,9 +63,9 @@ PackedRowStoreTupleStorageSubBlock::PackedRowStoreTupleStorageSubBlock(
                            sub_block_memory_size),
       header_(static_cast<PackedRowStoreHeader*>(sub_block_memory)),
       null_bitmap_bytes_(0) {
-  if (!DescriptionIsValid(relation_, description_)) {
-    FATAL_ERROR("Attempted to construct a PackedRowStoreTupleStorageSubBlock from an invalid description.");
-  }
+  CHECK(DescriptionIsValid(relation_, description_))
+      << "Attempted to construct a PackedRowStoreTupleStorageSubBlock from an invalid description:\n"
+      << description_.DebugString();
 
   if (sub_block_memory_size < sizeof(PackedRowStoreHeader)) {
     throw BlockMemoryTooSmall("PackedRowStoreTupleStorageSubBlock", sub_block_memory_size);
@@ -128,7 +130,7 @@ bool PackedRowStoreTupleStorageSubBlock::DescriptionIsValid(
 std::size_t PackedRowStoreTupleStorageSubBlock::EstimateBytesPerTuple(
     const CatalogRelationSchema &relation,
     const TupleStorageSubBlockDescription &description) {
-  DEBUG_ASSERT(DescriptionIsValid(relation, description));
+  DCHECK(DescriptionIsValid(relation, description));
 
   // NOTE(chasseur): We round-up the number of bytes needed in the NULL bitmap
   // to avoid estimating 0 bytes needed for a relation with less than 8

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/d894e43c/storage/SplitRowStoreTupleStorageSubBlock.cpp
----------------------------------------------------------------------
diff --git a/storage/SplitRowStoreTupleStorageSubBlock.cpp b/storage/SplitRowStoreTupleStorageSubBlock.cpp
index 6c70d0f..747e20d 100644
--- a/storage/SplitRowStoreTupleStorageSubBlock.cpp
+++ b/storage/SplitRowStoreTupleStorageSubBlock.cpp
@@ -1,6 +1,6 @@
 /**
  *   Copyright 2011-2015 Quickstep Technologies LLC.
- *   Copyright 2015 Pivotal Software, Inc.
+ *   Copyright 2015-2016 Pivotal Software, Inc.
  *
  *   Licensed under the Apache License, Version 2.0 (the "License");
  *   you may not use this file except in compliance with the License.
@@ -35,6 +35,8 @@
 #include "utility/Macros.hpp"
 #include "utility/ScopedBuffer.hpp"
 
+#include "glog/logging.h"
+
 namespace quickstep {
 
 QUICKSTEP_REGISTER_TUPLE_STORE(SplitRowStoreTupleStorageSubBlock, SPLIT_ROW_STORE);
@@ -100,9 +102,9 @@ SplitRowStoreTupleStorageSubBlock::SplitRowStoreTupleStorageSubBlock(
                            sub_block_memory,
                            sub_block_memory_size),
       header_(static_cast<Header*>(sub_block_memory)) {
-  if (!DescriptionIsValid(relation_, description_)) {
-    FATAL_ERROR("Attempted to construct a SplitRowStoreTupleStorageSubBlock from an invalid description.");
-  }
+  CHECK(DescriptionIsValid(relation_, description_))
+      << "Attempted to construct a SplitRowStoreTupleStorageSubBlock from an invalid description."
+      << description_.DebugString();
 
   if (sub_block_memory_size < sizeof(Header)) {
     throw BlockMemoryTooSmall("SplitRowStoreTupleStorageSubBlock", sub_block_memory_size);
@@ -166,7 +168,7 @@ bool SplitRowStoreTupleStorageSubBlock::DescriptionIsValid(
 std::size_t SplitRowStoreTupleStorageSubBlock::EstimateBytesPerTuple(
     const CatalogRelationSchema &relation,
     const TupleStorageSubBlockDescription &description) {
-  DEBUG_ASSERT(DescriptionIsValid(relation, description));
+  DCHECK(DescriptionIsValid(relation, description));
 
   return relation.getFixedByteLength()                                           // Fixed-length attrs
          + BitVector<true>::BytesNeeded(relation.numNullableAttributes())        // Null bitmap

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/d894e43c/storage/StorageBlock.cpp
----------------------------------------------------------------------
diff --git a/storage/StorageBlock.cpp b/storage/StorageBlock.cpp
index fdd438d..6e9b12f 100644
--- a/storage/StorageBlock.cpp
+++ b/storage/StorageBlock.cpp
@@ -19,13 +19,13 @@
 
 #include "storage/StorageBlock.hpp"
 
-#include <climits>
 #include <memory>
 #include <type_traits>
 #include <unordered_map>
 #include <utility>
 #include <vector>
 
+#include "catalog/CatalogAttribute.hpp"
 #include "catalog/CatalogRelationSchema.hpp"
 #include "catalog/CatalogTypedefs.hpp"
 #include "expressions/aggregation/AggregationHandle.hpp"
@@ -54,21 +54,21 @@
 #include "storage/TupleStorageSubBlock.hpp"
 #include "storage/ValueAccessor.hpp"
 #include "storage/ValueAccessorUtil.hpp"
+
+#ifdef QUICKSTEP_HAVE_BITWEAVING
+#include "storage/bitweaving/BitWeavingIndexSubBlock.hpp"
+#include "storage/bitweaving/BitWeavingHIndexSubBlock.hpp"
+#include "storage/bitweaving/BitWeavingVIndexSubBlock.hpp"
+#endif
+
 #include "types/TypedValue.hpp"
 #include "types/containers/ColumnVector.hpp"
 #include "types/containers/ColumnVectorsValueAccessor.hpp"
 #include "types/containers/Tuple.hpp"
 #include "types/operations/comparisons/ComparisonUtil.hpp"
-#include "utility/Macros.hpp"
 
 #include "glog/logging.h"
 
-#ifdef QUICKSTEP_HAVE_BITWEAVING
-#include "storage/bitweaving/BitWeavingIndexSubBlock.hpp"
-#include "storage/bitweaving/BitWeavingHIndexSubBlock.hpp"
-#include "storage/bitweaving/BitWeavingVIndexSubBlock.hpp"
-#endif
-
 using std::make_pair;
 using std::pair;
 using std::size_t;
@@ -91,57 +91,30 @@ StorageBlock::StorageBlock(const CatalogRelationSchema &relation,
       all_indices_inconsistent_(false),
       relation_(relation) {
   if (new_block) {
-    if (block_memory_size_ < layout.getBlockHeaderSize()) {
-      throw BlockMemoryTooSmall("StorageBlock", block_memory_size_);
-    }
-
-    layout.copyHeaderTo(block_memory_);
-    DEBUG_ASSERT(*static_cast<const int*>(block_memory_) > 0);
-
-    if (!block_header_.ParseFromArray(static_cast<char*>(block_memory_) + sizeof(int),
-                                      *static_cast<const int*>(block_memory_))) {
-      FATAL_ERROR("A StorageBlockLayout created a malformed StorageBlockHeader.");
-    }
-
     // We mark a newly-created block as dirty, so that in the rare case that a
     // block is evicted before anything is inserted into it, we still write it
     // (and the header plus any sub-block specific fixed data structures) back
     // to disk.
     dirty_ = true;
 
-    DEBUG_ASSERT(block_header_.IsInitialized());
-    DEBUG_ASSERT(StorageBlockLayout::DescriptionIsValid(relation_, block_header_.layout()));
-    DEBUG_ASSERT(block_header_.index_size_size() == block_header_.layout().index_description_size());
-    DEBUG_ASSERT(block_header_.index_size_size() == block_header_.index_consistent_size());
-  } else {
-    if (block_memory_size < sizeof(int)) {
-      throw MalformedBlock();
-    }
-    if (*static_cast<const int*>(block_memory_) <= 0) {
-      throw MalformedBlock();
-    }
-    if (*static_cast<const int*>(block_memory_) + sizeof(int) > block_memory_size_) {
-      throw MalformedBlock();
-    }
+    DCHECK_GE(block_memory_size_, layout.getBlockHeaderSize())
+        << "BlockMemoryTooSmall: " << block_memory_size_ << " bytes is too small for StorageBlock";
 
-    if (!block_header_.ParseFromArray(static_cast<char*>(block_memory_) + sizeof(int),
-                                      *static_cast<const int*>(block_memory_))) {
-      throw MalformedBlock();
-    }
-    if (!block_header_.IsInitialized()) {
-      throw MalformedBlock();
-    }
-    if (!StorageBlockLayout::DescriptionIsValid(relation_, block_header_.layout())) {
-      throw MalformedBlock();
-    }
-    if (block_header_.index_size_size() != block_header_.layout().index_description_size()) {
-      throw MalformedBlock();
-    }
-    if (block_header_.index_size_size() != block_header_.index_consistent_size()) {
-      throw MalformedBlock();
-    }
+    layout.copyHeaderTo(block_memory_);
+  } else {
+    DCHECK_GT(*static_cast<const int*>(block_memory_), 0);
+    DCHECK_LE(*static_cast<const int*>(block_memory_) + sizeof(int), block_memory_size_);
   }
 
+  CHECK(block_header_.ParseFromArray(static_cast<char*>(block_memory_) + sizeof(int),
+                                     *static_cast<const int*>(block_memory_)))
+      << "A StorageBlockLayout created a malformed StorageBlockHeader.";
+
+  CHECK(block_header_.IsInitialized());
+  CHECK(StorageBlockLayout::DescriptionIsValid(relation_, block_header_.layout()));
+  CHECK_EQ(block_header_.index_size_size(), block_header_.layout().index_description_size());
+  CHECK_EQ(block_header_.index_size_size(), block_header_.index_consistent_size());
+
   size_t block_size_from_metadata = *static_cast<const int*>(block_memory_) + sizeof(int);
   block_size_from_metadata += block_header_.tuple_store_size();
   for (int index_num = 0;
@@ -204,7 +177,7 @@ bool StorageBlock::insertTuple(const Tuple &tuple) {
 
   TupleStorageSubBlock::InsertResult tuple_store_insert_result = tuple_store_->insertTuple(tuple);
   if (tuple_store_insert_result.inserted_id < 0) {
-    DEBUG_ASSERT(tuple_store_insert_result.ids_mutated == false);
+    DCHECK(!tuple_store_insert_result.ids_mutated);
     if (empty_before) {
       throw TupleTooLargeForBlock(tuple.getByteSize());
     } else {
@@ -218,11 +191,10 @@ bool StorageBlock::insertTuple(const Tuple &tuple) {
     update_succeeded = rebuildIndexes(true);
     if (!update_succeeded) {
       tuple_store_->deleteTuple(tuple_store_insert_result.inserted_id);
-      if (!rebuildIndexes(true)) {
-        // It should always be possible to rebuild an index with the tuples
-        // which it originally contained.
-        FATAL_ERROR("Rebuilding an IndexSubBlock failed after removing tuples.");
-      }
+      // It should always be possible to rebuild an index with the tuples which
+      // it originally contained.
+      CHECK(rebuildIndexes(true))
+          << "Rebuilding an IndexSubBlock failed after removing tuples.";
     }
   } else {
     update_succeeded = insertEntryInIndexes(tuple_store_insert_result.inserted_id);
@@ -588,10 +560,9 @@ StorageBlock::UpdateResult StorageBlock::update(
     const unordered_map<attribute_id, unique_ptr<const Scalar>> &assignments,
     const Predicate *predicate,
     InsertDestinationInterface *relocation_destination) {
-  if (relation_.getID() != relocation_destination->getRelation().getID()) {
-    FATAL_ERROR("StorageBlock::update() called with a relocation_destination "
-                "that does not belong to the same relation.");
-  }
+  CHECK_EQ(relation_.getID(), relocation_destination->getRelation().getID())
+      << "StorageBlock::update() called with a relocation_destination "
+      << "that does not belong to the same relation.";
 
   UpdateResult retval;
   // TODO(chasseur): Be smarter and only update indexes that need to be updated.
@@ -651,13 +622,11 @@ StorageBlock::UpdateResult StorageBlock::update(
     } else {
       // Make a copy of the tuple with the updated values.
       std::vector<TypedValue> updated_tuple_values;
-      for (CatalogRelationSchema::const_iterator attr_it = relation_.begin();
-           attr_it != relation_.end();
-           ++attr_it) {
+      for (const CatalogAttribute &attr : relation_) {
         std::unordered_map<attribute_id, TypedValue>::iterator update_it
-            = updated_values->find(attr_it->getID());
+            = updated_values->find(attr.getID());
         if (update_it == updated_values->end()) {
-          updated_tuple_values.emplace_back(tuple_store_->getAttributeValueTyped(*match_it, attr_it->getID()));
+          updated_tuple_values.emplace_back(tuple_store_->getAttributeValueTyped(*match_it, attr.getID()));
           updated_tuple_values.back().ensureNotReference();
         } else {
           updated_tuple_values.emplace_back(std::move(update_it->second));
@@ -889,9 +858,9 @@ void StorageBlock::sort(const PtrVector<Scalar> &order_by,  // NOLINT(build/incl
   // the method used. Average-case asymptotics is definitely better in the
   // later. Need to do an analysis of the two methods.
 
-  DEBUG_ASSERT(order_by.size() == sort_is_ascending.size());
-  DEBUG_ASSERT(order_by.size() == null_first.size());
-  DEBUG_ASSERT(order_by.size() > 0);
+  DCHECK_EQ(order_by.size(), sort_is_ascending.size());
+  DCHECK_EQ(order_by.size(), null_first.size());
+  DCHECK_GT(order_by.size(), 0u);
 
   // TODO(shoban): We should use reverse_iterator in conjunction with rbegin()
   // and rend() for better readability, if PtrVector supports it.
@@ -958,16 +927,14 @@ void StorageBlock::deleteTuples(const Predicate *predicate) {
     // Delete tuples from the TupleStorageSubBlock.
     if (tuple_store_->bulkDeleteTuples(matches.get())) {
       // If the tuple-ID sequence was mutated, rebuild all indices.
-      if (!rebuildIndexes(true)) {
-        FATAL_ERROR("Rebuilding an IndexSubBlock failed after removing tuples.");
-      }
+      CHECK(rebuildIndexes(true))
+          << "Rebuilding an IndexSubBlock failed after removing tuples.";
     } else if (rebuild_some_indices) {
       // Rebuild any remaining indices that don't support ad-hoc removal.
       for (PtrVector<IndexSubBlock>::iterator it = indices_.begin(); it != indices_.end(); ++it) {
         if (!it->supportsAdHocRemove()) {
-          if (!it->rebuild()) {
-            FATAL_ERROR("Rebuilding an IndexSubBlock failed after removing tuples.");
-          }
+          CHECK(it->rebuild())
+              << "Rebuilding an IndexSubBlock failed after removing tuples.";
         }
       }
     }
@@ -982,7 +949,7 @@ TupleStorageSubBlock* StorageBlock::CreateTupleStorageSubBlock(
     const bool new_block,
     void *sub_block_memory,
     const std::size_t sub_block_memory_size) {
-  DEBUG_ASSERT(description.IsInitialized());
+  DCHECK(description.IsInitialized());
   switch (description.sub_block_type()) {
     case TupleStorageSubBlockDescription::PACKED_ROW_STORE:
       return new PackedRowStoreTupleStorageSubBlock(relation,
@@ -1016,7 +983,7 @@ TupleStorageSubBlock* StorageBlock::CreateTupleStorageSubBlock(
                                                    sub_block_memory_size);
     default:
       if (new_block) {
-        FATAL_ERROR("A StorageBlockLayout provided an unknown TupleStorageSubBlockType.");
+        LOG(FATAL) << "A StorageBlockLayout provided an unknown TupleStorageSubBlockType.";
       } else {
         throw MalformedBlock();
       }
@@ -1029,7 +996,7 @@ IndexSubBlock* StorageBlock::CreateIndexSubBlock(
     const bool new_block,
     void *sub_block_memory,
     const std::size_t sub_block_memory_size) {
-  DEBUG_ASSERT(description.IsInitialized());
+  DCHECK(description.IsInitialized());
   switch (description.sub_block_type()) {
     case IndexSubBlockDescription::BLOOM_FILTER:
       return new BloomFilterIndexSubBlock(tuple_store,
@@ -1070,7 +1037,7 @@ IndexSubBlock* StorageBlock::CreateIndexSubBlock(
 #endif
     default:
       if (new_block) {
-        FATAL_ERROR("A StorageBlockLayout provided an unknown IndexBlockType.");
+        LOG(FATAL) << "A StorageBlockLayout provided an unknown IndexBlockType.";
       } else {
         throw MalformedBlock();
       }
@@ -1078,9 +1045,9 @@ IndexSubBlock* StorageBlock::CreateIndexSubBlock(
 }
 
 bool StorageBlock::insertEntryInIndexes(const tuple_id new_tuple) {
-  DEBUG_ASSERT(ad_hoc_insert_supported_);
-  DEBUG_ASSERT(new_tuple >= 0);
-  DEBUG_ASSERT(all_indices_consistent_);
+  DCHECK(ad_hoc_insert_supported_);
+  DCHECK_GE(new_tuple, 0);
+  DCHECK(all_indices_consistent_);
 
   for (PtrVector<IndexSubBlock>::iterator it = indices_.begin();
        it != indices_.end();
@@ -1111,9 +1078,8 @@ bool StorageBlock::insertEntryInIndexes(const tuple_id new_tuple) {
 
       if (tuple_store_->deleteTuple(new_tuple)) {
         // The tuple-ID sequence was mutated, so rebuild all indices.
-        if (!rebuildIndexes(true)) {
-          FATAL_ERROR("Rebuilding an IndexSubBlock failed after removing tuples.");
-        }
+        CHECK(rebuildIndexes(true))
+            << "Rebuilding an IndexSubBlock failed after removing tuples.";
       } else if (rebuild_some_indices) {
         // Rebuild those indices that were modified that don't support ad-hoc
         // removal.
@@ -1121,11 +1087,10 @@ bool StorageBlock::insertEntryInIndexes(const tuple_id new_tuple) {
              fixer_it != it;
              ++fixer_it) {
           if (!fixer_it->supportsAdHocRemove()) {
-            if (!fixer_it->rebuild()) {
-              // It should always be possible to rebuild an index with the
-              // tuples which it originally contained.
-              FATAL_ERROR("Rebuilding an IndexSubBlock failed after removing tuples.");
-            }
+            // It should always be possible to rebuild an index with the
+            // tuples which it originally contained.
+            CHECK(fixer_it->rebuild())
+                << "Rebuilding an IndexSubBlock failed after removing tuples.";
           }
         }
       }
@@ -1139,8 +1104,8 @@ bool StorageBlock::insertEntryInIndexes(const tuple_id new_tuple) {
 
 bool StorageBlock::bulkInsertEntriesInIndexes(TupleIdSequence *new_tuples,
                                               const bool roll_back_on_failure) {
-  DEBUG_ASSERT(ad_hoc_insert_supported_);
-  DEBUG_ASSERT(all_indices_consistent_);
+  DCHECK(ad_hoc_insert_supported_);
+  DCHECK(all_indices_consistent_);
 
   // If 'roll_back_on_failure' is false, we will allow some indices to become
   // inconsistent.
@@ -1177,9 +1142,8 @@ bool StorageBlock::bulkInsertEntriesInIndexes(TupleIdSequence *new_tuples,
 
         if (tuple_store_->bulkDeleteTuples(new_tuples)) {
           // The tuple-ID sequence was mutated, so rebuild all indices.
-          if (!rebuildIndexes(true)) {
-            FATAL_ERROR("Rebuilding an IndexSubBlock failed after removing tuples.");
-          }
+          CHECK(rebuildIndexes(true))
+              << "Rebuilding an IndexSubBlock failed after removing tuples.";
         } else if (rebuild_some_indices) {
           // Rebuild those indices that were modified that don't support ad-hoc
           // removal.
@@ -1187,11 +1151,10 @@ bool StorageBlock::bulkInsertEntriesInIndexes(TupleIdSequence *new_tuples,
                fixer_it != it;
                ++fixer_it) {
             if (!fixer_it->supportsAdHocRemove()) {
-              if (!fixer_it->rebuild()) {
-                // It should always be possible to rebuild an index with the
-                // tuples which it originally contained.
-                FATAL_ERROR("Rebuilding an IndexSubBlock failed after removing tuples.");
-              }
+              // It should always be possible to rebuild an index with the
+              // tuples which it originally contained.
+              CHECK(fixer_it->rebuild())
+                  << "Rebuilding an IndexSubBlock failed after removing tuples.";
             }
           }
         }
@@ -1338,12 +1301,11 @@ AggregationState* StorageBlock::aggregateHelperValueAccessor(
 #endif  // QUICKSTEP_ENABLE_VECTOR_COPY_ELISION_SELECTION
 
 void StorageBlock::updateHeader() {
-  DEBUG_ASSERT(*static_cast<const int*>(block_memory_) == block_header_.ByteSize());
+  DCHECK_EQ(*static_cast<const int*>(block_memory_), block_header_.ByteSize());
 
-  if (!block_header_.SerializeToArray(static_cast<char*>(block_memory_) + sizeof(int),
-                                      block_header_.ByteSize())) {
-    FATAL_ERROR("Failed to do binary serialization of StorageBlockHeader in StorageBlock::updateHeader()");
-  }
+  CHECK(block_header_.SerializeToArray(static_cast<char*>(block_memory_) + sizeof(int),
+                                       block_header_.ByteSize()))
+      << "Failed to do binary serialization of StorageBlockHeader in StorageBlock::updateHeader()";
 }
 
 void StorageBlock::invalidateAllIndexes() {

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/d894e43c/storage/StorageBlock.hpp
----------------------------------------------------------------------
diff --git a/storage/StorageBlock.hpp b/storage/StorageBlock.hpp
index 3ae3812..b79455a 100644
--- a/storage/StorageBlock.hpp
+++ b/storage/StorageBlock.hpp
@@ -35,6 +35,8 @@
 #include "utility/Macros.hpp"
 #include "utility/PtrVector.hpp"
 
+#include "glog/logging.h"
+
 namespace quickstep {
 
 class AggregationHandle;
@@ -182,7 +184,7 @@ class StorageBlock : public StorageBlockBase {
   }
 
   inline bool indexIsConsistent(const std::size_t index_id) const {
-    DEBUG_ASSERT(index_id < indices_consistent_.size());
+    DCHECK_LT(index_id, indices_consistent_.size());
     return indices_consistent_[index_id];
   }
 
@@ -207,7 +209,7 @@ class StorageBlock : public StorageBlockBase {
    * @return The specified IndexSubBlock in this block.
    **/
   inline const IndexSubBlock& getIndexSubBlock(const std::size_t index_id) const {
-    DEBUG_ASSERT(index_id < indices_.size());
+    DCHECK_LT(index_id, indices_.size());
     return indices_[index_id];
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/d894e43c/storage/StorageBlockLayout.cpp
----------------------------------------------------------------------
diff --git a/storage/StorageBlockLayout.cpp b/storage/StorageBlockLayout.cpp
index e28fc55..65beed3 100644
--- a/storage/StorageBlockLayout.cpp
+++ b/storage/StorageBlockLayout.cpp
@@ -1,6 +1,6 @@
 /**
  *   Copyright 2011-2015 Quickstep Technologies LLC.
- *   Copyright 2015 Pivotal Software, Inc.
+ *   Copyright 2015-2016 Pivotal Software, Inc.
  *   Copyright 2016, Quickstep Research Group, Computer Sciences Department,
  *     University of Wisconsin\u2014Madison.
  *
@@ -23,8 +23,6 @@
 #include <string>
 #include <vector>
 
-#include "glog/logging.h"
-
 #include "catalog/CatalogRelationSchema.hpp"
 #include "storage/StorageBlockLayout.pb.h"
 #include "storage/StorageConstants.hpp"
@@ -32,6 +30,8 @@
 #include "storage/SubBlockTypeRegistry.hpp"
 #include "utility/Macros.hpp"
 
+#include "glog/logging.h"
+
 using std::size_t;
 using std::string;
 using std::strlen;
@@ -68,7 +68,7 @@ void StorageBlockLayout::finalize() {
     block_header_.add_index_consistent(true);
   }
 
-  DEBUG_ASSERT(block_header_.IsInitialized());
+  DCHECK(block_header_.IsInitialized());
 
   size_t header_size = getBlockHeaderSize();
   if (header_size > layout_description_.num_slots() * kSlotSizeBytes) {
@@ -131,19 +131,18 @@ void StorageBlockLayout::finalize() {
 
   block_header_.set_tuple_store_size(sub_block_space - allocated_sub_block_space);
 
-  DEBUG_ASSERT(block_header_.IsInitialized());
-  DEBUG_ASSERT(header_size == getBlockHeaderSize());
+  DCHECK(block_header_.IsInitialized());
+  DCHECK(header_size == getBlockHeaderSize());
 }
 
 void StorageBlockLayout::copyHeaderTo(void *dest) const {
-  DEBUG_ASSERT(DescriptionIsValid(relation_, layout_description_));
-  DEBUG_ASSERT(block_header_.IsInitialized());
+  DCHECK(DescriptionIsValid(relation_, layout_description_));
+  DCHECK(block_header_.IsInitialized());
 
   *static_cast<int*>(dest) = block_header_.ByteSize();
-  if (!block_header_.SerializeToArray(static_cast<char*>(dest) + sizeof(int),
-                                      block_header_.ByteSize())) {
-    FATAL_ERROR("Failed to do binary serialization of StorageBlockHeader in StorageBlockLayout::copyHeaderTo()");
-  }
+  CHECK(block_header_.SerializeToArray(static_cast<char*>(dest) + sizeof(int),
+                                       block_header_.ByteSize()))
+      << "Failed to do binary serialization of StorageBlockHeader in StorageBlockLayout::copyHeaderTo()";
 }
 
 StorageBlockLayout* StorageBlockLayout::GenerateDefaultLayout(const CatalogRelationSchema &relation,
@@ -170,7 +169,7 @@ bool StorageBlockLayout::DescriptionIsValid(const CatalogRelationSchema &relatio
 }
 
 std::size_t StorageBlockLayout::estimateTuplesPerBlock() const {
-  DEBUG_ASSERT(block_header_.IsInitialized());
+  DCHECK(block_header_.IsInitialized());
   return ((layout_description_.num_slots() * kSlotSizeBytes) - getBlockHeaderSize())
          / estimated_bytes_per_tuple_;
 }

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/d894e43c/storage/TupleStorageSubBlock.cpp
----------------------------------------------------------------------
diff --git a/storage/TupleStorageSubBlock.cpp b/storage/TupleStorageSubBlock.cpp
index aafcd33..2e4eae4 100644
--- a/storage/TupleStorageSubBlock.cpp
+++ b/storage/TupleStorageSubBlock.cpp
@@ -1,6 +1,6 @@
 /**
  *   Copyright 2011-2015 Quickstep Technologies LLC.
- *   Copyright 2015 Pivotal Software, Inc.
+ *   Copyright 2015-2016 Pivotal Software, Inc.
  *
  *   Licensed under the Apache License, Version 2.0 (the "License");
  *   you may not use this file except in compliance with the License.
@@ -22,8 +22,6 @@
 #endif
 
 #include "storage/TupleIdSequence.hpp"
-#include "storage/ValueAccessor.hpp"
-#include "utility/Macros.hpp"
 
 #ifdef QUICKSTEP_DEBUG
 #include "catalog/CatalogAttribute.hpp"
@@ -33,6 +31,8 @@
 #include "types/containers/Tuple.hpp"
 #endif
 
+#include "glog/logging.h"
+
 namespace quickstep {
 
 tuple_id TupleStorageSubBlock::numTuples() const {
@@ -49,7 +49,7 @@ tuple_id TupleStorageSubBlock::numTuples() const {
       }
     }
     // Should have at least one tuple, otherwise isEmpty() would have been true.
-    DEBUG_ASSERT(count > 0);
+    DCHECK_GT(count, 0);
     return count;
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/d894e43c/storage/TupleStorageSubBlock.hpp
----------------------------------------------------------------------
diff --git a/storage/TupleStorageSubBlock.hpp b/storage/TupleStorageSubBlock.hpp
index 6da4698..8cee3b9 100644
--- a/storage/TupleStorageSubBlock.hpp
+++ b/storage/TupleStorageSubBlock.hpp
@@ -1,6 +1,6 @@
 /**
  *   Copyright 2011-2015 Quickstep Technologies LLC.
- *   Copyright 2015 Pivotal Software, Inc.
+ *   Copyright 2015-2016 Pivotal Software, Inc.
  *
  *   Licensed under the Apache License, Version 2.0 (the "License");
  *   you may not use this file except in compliance with the License.
@@ -18,6 +18,7 @@
 #ifndef QUICKSTEP_STORAGE_TUPLE_STORAGE_SUB_BLOCK_HPP_
 #define QUICKSTEP_STORAGE_TUPLE_STORAGE_SUB_BLOCK_HPP_
 
+#include <cstddef>
 #include <unordered_map>
 #include <vector>
 
@@ -28,6 +29,8 @@
 #include "types/TypedValue.hpp"
 #include "utility/Macros.hpp"
 
+#include "glog/logging.h"
+
 namespace quickstep {
 
 class CatalogRelationSchema;
@@ -404,9 +407,9 @@ class TupleStorageSubBlock {
    **/
   virtual TupleIdSequence* getMatchesForPredicate(const ComparisonPredicate &predicate,
                                                   const TupleIdSequence *filter) const {
-    FATAL_ERROR("Called TupleStorageSubBlock::getMatchesForPredicate() on a "
-                "TupleStorageStorageBlock that does not provide any non-scan "
-                "method for evaluating predicates.");
+    LOG(FATAL) << "Called TupleStorageSubBlock::getMatchesForPredicate() on a "
+               << "TupleStorageStorageBlock that does not provide any non-scan "
+               << "method for evaluating predicates.";
   }
 
   /**


[3/3] incubator-quickstep git commit: Minor Improvements in Storage.

Posted by zu...@apache.org.
Minor Improvements in Storage.


Project: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/commit/d894e43c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/tree/d894e43c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-quickstep/diff/d894e43c

Branch: refs/heads/storage-fix
Commit: d894e43ceb7b5fce5303b1208eef2ebe76b8831c
Parents: 2d39b8e
Author: Zuyu Zhang <zz...@pivotal.io>
Authored: Tue May 31 12:20:50 2016 -0700
Committer: Zuyu Zhang <zz...@pivotal.io>
Committed: Wed Jun 8 21:43:24 2016 -0700

----------------------------------------------------------------------
 .../BasicColumnStoreTupleStorageSubBlock.cpp    |  29 +-
 storage/BloomFilterIndexSubBlock.cpp            |  21 +-
 storage/CMakeLists.txt                          |  18 +-
 storage/CSBTreeIndexSubBlock.cpp                | 316 ++++++------
 storage/CSBTreeIndexSubBlock.hpp                |  14 +-
 storage/ColumnStoreUtil.cpp                     | 482 +++++++++----------
 storage/ColumnStoreUtil.hpp                     |   6 +-
 ...ompressedColumnStoreTupleStorageSubBlock.cpp |  73 ++-
 ...ressedPackedRowStoreTupleStorageSubBlock.cpp |  61 ++-
 storage/CompressedTupleStorageSubBlock.cpp      | 167 ++++---
 storage/CompressedTupleStorageSubBlock.hpp      |  44 +-
 storage/PackedRowStoreTupleStorageSubBlock.cpp  |  12 +-
 storage/SplitRowStoreTupleStorageSubBlock.cpp   |  12 +-
 storage/StorageBlock.cpp                        | 170 +++----
 storage/StorageBlock.hpp                        |   6 +-
 storage/StorageBlockLayout.cpp                  |  25 +-
 storage/TupleStorageSubBlock.cpp                |   8 +-
 storage/TupleStorageSubBlock.hpp                |  11 +-
 18 files changed, 719 insertions(+), 756 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/d894e43c/storage/BasicColumnStoreTupleStorageSubBlock.cpp
----------------------------------------------------------------------
diff --git a/storage/BasicColumnStoreTupleStorageSubBlock.cpp b/storage/BasicColumnStoreTupleStorageSubBlock.cpp
index 0c913ff..6b1262a 100644
--- a/storage/BasicColumnStoreTupleStorageSubBlock.cpp
+++ b/storage/BasicColumnStoreTupleStorageSubBlock.cpp
@@ -1,6 +1,6 @@
 /**
  *   Copyright 2011-2015 Quickstep Technologies LLC.
- *   Copyright 2015 Pivotal Software, Inc.
+ *   Copyright 2015-2016 Pivotal Software, Inc.
  *
  *   Licensed under the Apache License, Version 2.0 (the "License");
  *   you may not use this file except in compliance with the License.
@@ -52,6 +52,8 @@
 #include "utility/PtrVector.hpp"
 #include "utility/ScopedBuffer.hpp"
 
+#include "glog/logging.h"
+
 using std::memcpy;
 using std::memmove;
 using std::size_t;
@@ -106,9 +108,9 @@ BasicColumnStoreTupleStorageSubBlock::BasicColumnStoreTupleStorageSubBlock(
                            sub_block_memory_size),
       sorted_(true),
       header_(static_cast<BasicColumnStoreHeader*>(sub_block_memory)) {
-  if (!DescriptionIsValid(relation_, description_)) {
-    FATAL_ERROR("Attempted to construct a BasicColumnStoreTupleStorageSubBlock from an invalid description.");
-  }
+  CHECK(DescriptionIsValid(relation_, description_))
+      << "Attempted to construct a BasicColumnStoreTupleStorageSubBlock from an invalid description:\n"
+      << description_.DebugString();
 
   sort_column_id_ = description_.GetExtension(BasicColumnStoreTupleStorageSubBlockDescription::sort_attribute_id);
   sort_column_type_ = &(relation_.getAttributeById(sort_column_id_)->getType());
@@ -194,7 +196,7 @@ bool BasicColumnStoreTupleStorageSubBlock::DescriptionIsValid(
   }
 
   // Check that the specified sort attribute exists and can be ordered by LessComparison.
-  attribute_id sort_attribute_id = description.GetExtension(
+  const attribute_id sort_attribute_id = description.GetExtension(
       BasicColumnStoreTupleStorageSubBlockDescription::sort_attribute_id);
   if (!relation.hasAttributeWithId(sort_attribute_id)) {
     return false;
@@ -211,7 +213,7 @@ bool BasicColumnStoreTupleStorageSubBlock::DescriptionIsValid(
 std::size_t BasicColumnStoreTupleStorageSubBlock::EstimateBytesPerTuple(
     const CatalogRelationSchema &relation,
     const TupleStorageSubBlockDescription &description) {
-  DEBUG_ASSERT(DescriptionIsValid(relation, description));
+  CHECK(DescriptionIsValid(relation, description));
 
   // NOTE(chasseur): We round-up the number of bytes needed in the NULL bitmaps
   // to avoid estimating 0 bytes needed for a relation with less than 8
@@ -582,15 +584,14 @@ TupleIdSequence* BasicColumnStoreTupleStorageSubBlock::getMatchesForPredicate(
       column_stripes_[sort_column_id_],
       header_->num_tuples - header_->nulls_in_sort_column);
 
-  if (matches == nullptr) {
-    FATAL_ERROR("Called BasicColumnStoreTupleStorageSubBlock::getMatchesForPredicate() "
-                "with a predicate that can only be evaluated with a scan.");
-  } else {
-    if (filter != nullptr) {
-      matches->intersectWith(*filter);
-    }
-    return matches;
+  CHECK(matches != nullptr)
+      << "Called BasicColumnStoreTupleStorageSubBlock::getMatchesForPredicate() "
+      << "with a predicate that can only be evaluated with a scan.";
+
+  if (filter != nullptr) {
+    matches->intersectWith(*filter);
   }
+  return matches;
 }
 
 void BasicColumnStoreTupleStorageSubBlock::insertTupleAtPosition(

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/d894e43c/storage/BloomFilterIndexSubBlock.cpp
----------------------------------------------------------------------
diff --git a/storage/BloomFilterIndexSubBlock.cpp b/storage/BloomFilterIndexSubBlock.cpp
index e806217..e23da90 100644
--- a/storage/BloomFilterIndexSubBlock.cpp
+++ b/storage/BloomFilterIndexSubBlock.cpp
@@ -59,7 +59,8 @@ BloomFilterIndexSubBlock::BloomFilterIndexSubBlock(const TupleStorageSubBlock &t
       bit_array_size_in_bytes_(description.GetExtension(
                                    BloomFilterIndexSubBlockDescription::bloom_filter_size)) {
   CHECK(DescriptionIsValid(relation_, description_))
-      << "Attempted to construct an BloomFilterIndexSubBlock from an invalid description.";
+      << "Attempted to construct an BloomFilterIndexSubBlock from an invalid description:\n"
+      << description_.DebugString();
 
   // Store the attribute ids that are being indexed.
   indexed_attribute_ids_.reserve(description.indexed_attribute_ids_size());
@@ -88,7 +89,7 @@ BloomFilterIndexSubBlock::~BloomFilterIndexSubBlock() {
 }
 
 bool BloomFilterIndexSubBlock::DescriptionIsValid(const CatalogRelationSchema &relation,
-                                         const IndexSubBlockDescription &description) {
+                                                  const IndexSubBlockDescription &description) {
   if (!description.IsInitialized()) {
     return false;
   }
@@ -110,7 +111,7 @@ bool BloomFilterIndexSubBlock::DescriptionIsValid(const CatalogRelationSchema &r
 
 std::size_t BloomFilterIndexSubBlock::EstimateBytesPerTuple(const CatalogRelationSchema &relation,
                                                             const IndexSubBlockDescription &description) {
-  DCHECK(DescriptionIsValid(relation, description));
+  CHECK(DescriptionIsValid(relation, description));
   // Note: Returning zero here causes EstimteBytesPerBlock() to be invoked for size computation.
   return kZeroSize;
 }
@@ -118,12 +119,12 @@ std::size_t BloomFilterIndexSubBlock::EstimateBytesPerTuple(const CatalogRelatio
 std::size_t BloomFilterIndexSubBlock::EstimateBytesPerBlock(const CatalogRelationSchema &relation,
                                                             const IndexSubBlockDescription &description) {
   // Note: This function is only invoked when EstimateBytesPerTuple() returns zero.
-  DCHECK(DescriptionIsValid(relation, description));
+  CHECK(DescriptionIsValid(relation, description));
   return description.GetExtension(BloomFilterIndexSubBlockDescription::bloom_filter_size);
 }
 
 bool BloomFilterIndexSubBlock::addEntry(const tuple_id tuple) {
-  DCHECK(is_initialized_);
+  CHECK(is_initialized_);
   if (!is_consistent_) {
     return false;
   }
@@ -143,7 +144,7 @@ bool BloomFilterIndexSubBlock::addEntry(const tuple_id tuple) {
 }
 
 bool BloomFilterIndexSubBlock::bulkAddEntries(const TupleIdSequence &tuples) {
-  DCHECK(is_initialized_);
+  CHECK(is_initialized_);
   if (!is_consistent_) {
     return false;
   }
@@ -164,7 +165,7 @@ void BloomFilterIndexSubBlock::bulkRemoveEntries(const TupleIdSequence &tuples)
 
 predicate_cost_t BloomFilterIndexSubBlock::estimatePredicateEvaluationCost(
     const ComparisonPredicate &predicate) const {
-  DCHECK(is_initialized_);
+  CHECK(is_initialized_);
   BloomFilterSelectivity selectivity = getSelectivityForPredicate(predicate);
   // Note: A Bloomfilter index is only useful when it gives a zero selectivity
   //       in which case a block can be skipped entirely.
@@ -180,7 +181,7 @@ predicate_cost_t BloomFilterIndexSubBlock::estimatePredicateEvaluationCost(
 TupleIdSequence* BloomFilterIndexSubBlock::getMatchesForPredicate(
     const ComparisonPredicate &predicate,
     const TupleIdSequence *filter) const {
-  DCHECK(is_initialized_);
+  CHECK(is_initialized_);
   if (filter != nullptr) {
     LOG(FATAL) << "BloomFilterIndex does not support filter evaluation with predicate.";
   }
@@ -217,7 +218,7 @@ TupleIdSequence* BloomFilterIndexSubBlock::getMatchesForPredicate(
 
 BloomFilterIndexSubBlock::BloomFilterSelectivity
     BloomFilterIndexSubBlock::getSelectivityForPredicate(const ComparisonPredicate &predicate) const {
-  DCHECK(is_initialized_);
+  CHECK(is_initialized_);
   if (!is_consistent_) {
     return BloomFilterSelectivity::kSelectivityUnknown;
   }
@@ -258,7 +259,7 @@ BloomFilterIndexSubBlock::BloomFilterSelectivity
 }
 
 bool BloomFilterIndexSubBlock::rebuild() {
-  DCHECK(is_initialized_);
+  CHECK(is_initialized_);
   bloom_filter_->reset();
   bool didSucceed = true;
   if (tuple_store_.isPacked()) {

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/d894e43c/storage/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/storage/CMakeLists.txt b/storage/CMakeLists.txt
index a77976a..caf3bae 100644
--- a/storage/CMakeLists.txt
+++ b/storage/CMakeLists.txt
@@ -284,6 +284,7 @@ target_link_libraries(quickstep_storage_AggregationOperationState_proto
                       quickstep_storage_HashTable_proto
                       ${PROTOBUF_LIBRARY})
 target_link_libraries(quickstep_storage_BasicColumnStoreTupleStorageSubBlock
+                      glog
                       quickstep_catalog_CatalogAttribute
                       quickstep_catalog_CatalogRelationSchema
                       quickstep_catalog_CatalogTypedefs
@@ -415,11 +416,11 @@ if(QUICKSTEP_HAVE_BITWEAVING)
 endif()
 # CMAKE_VALIDATE_IGNORE_END
 target_link_libraries(quickstep_storage_ColumnStoreUtil
+                      glog
                       quickstep_catalog_CatalogAttribute
                       quickstep_catalog_CatalogRelationSchema
                       quickstep_catalog_CatalogTypedefs
                       quickstep_expressions_predicate_ComparisonPredicate
-                      quickstep_expressions_predicate_Predicate
                       quickstep_expressions_scalar_Scalar
                       quickstep_expressions_scalar_ScalarAttribute
                       quickstep_storage_StorageBlockInfo
@@ -449,6 +450,7 @@ target_link_libraries(quickstep_storage_CompressedBlockBuilder
                       quickstep_utility_PtrMap
                       quickstep_utility_PtrVector)
 target_link_libraries(quickstep_storage_CompressedColumnStoreTupleStorageSubBlock
+                      glog
                       quickstep_catalog_CatalogAttribute
                       quickstep_catalog_CatalogRelationSchema
                       quickstep_catalog_CatalogTypedefs
@@ -484,6 +486,7 @@ target_link_libraries(quickstep_storage_CompressedColumnStoreValueAccessor
                       quickstep_utility_Macros
                       quickstep_utility_PtrMap)
 target_link_libraries(quickstep_storage_CompressedPackedRowStoreTupleStorageSubBlock
+                      glog
                       quickstep_catalog_CatalogAttribute
                       quickstep_catalog_CatalogRelationSchema
                       quickstep_catalog_CatalogTypedefs
@@ -536,24 +539,26 @@ target_link_libraries(quickstep_storage_CompressedStoreUtil
                       quickstep_types_operations_comparisons_ComparisonID
                       quickstep_utility_Macros)
 target_link_libraries(quickstep_storage_CompressedTupleStorageSubBlock
+                      glog
+                      quickstep_catalog_CatalogAttribute
                       quickstep_catalog_CatalogRelationSchema
                       quickstep_catalog_CatalogTypedefs
                       quickstep_compression_CompressionDictionary
                       quickstep_compression_CompressionDictionaryLite
                       quickstep_expressions_predicate_ComparisonPredicate
-                      quickstep_expressions_predicate_Predicate
                       quickstep_expressions_scalar_Scalar
                       quickstep_expressions_scalar_ScalarAttribute
                       quickstep_storage_CompressedBlockBuilder
                       quickstep_storage_CompressedStoreUtil
+                      quickstep_storage_StorageBlockInfo
                       quickstep_storage_StorageBlockLayout_proto
                       quickstep_storage_StorageErrors
                       quickstep_storage_TupleIdSequence
                       quickstep_storage_TupleStorageSubBlock
                       quickstep_storage_ValueAccessor
                       quickstep_storage_ValueAccessorUtil
-                      quickstep_types_TypedValue
-                      quickstep_types_containers_Tuple
+                      quickstep_types_Type
+                      quickstep_types_TypeID
                       quickstep_types_operations_comparisons_ComparisonID
                       quickstep_utility_Macros
                       quickstep_utility_PtrMap)
@@ -754,6 +759,7 @@ target_link_libraries(quickstep_storage_LinearOpenAddressingHashTable
                       quickstep_utility_Macros
                       quickstep_utility_PrimeNumber)
 target_link_libraries(quickstep_storage_PackedRowStoreTupleStorageSubBlock
+                      glog
                       quickstep_catalog_CatalogAttribute
                       quickstep_catalog_CatalogRelationSchema
                       quickstep_catalog_CatalogTypedefs
@@ -856,6 +862,7 @@ target_link_libraries(quickstep_storage_SimpleScalarSeparateChainingHashTable
                       quickstep_utility_Macros
                       quickstep_utility_PrimeNumber)
 target_link_libraries(quickstep_storage_SplitRowStoreTupleStorageSubBlock
+                      glog
                       quickstep_catalog_CatalogRelationSchema
                       quickstep_expressions_predicate_PredicateCost
                       quickstep_storage_SplitRowStoreValueAccessor
@@ -888,6 +895,7 @@ target_link_libraries(quickstep_storage_StorageBlob
                       quickstep_utility_Macros)
 target_link_libraries(quickstep_storage_StorageBlock
                       glog
+                      quickstep_catalog_CatalogAttribute
                       quickstep_catalog_CatalogRelationSchema
                       quickstep_catalog_CatalogTypedefs
                       quickstep_expressions_aggregation_AggregationHandle
@@ -1002,13 +1010,13 @@ target_link_libraries(quickstep_storage_TupleIdSequence
 target_link_libraries(quickstep_storage_TupleReference
                       quickstep_storage_StorageBlockInfo)
 target_link_libraries(quickstep_storage_TupleStorageSubBlock
+                      glog
                       quickstep_catalog_CatalogAttribute
                       quickstep_catalog_CatalogRelationSchema
                       quickstep_catalog_CatalogTypedefs
                       quickstep_expressions_predicate_PredicateCost
                       quickstep_storage_StorageBlockInfo
                       quickstep_storage_TupleIdSequence
-                      quickstep_storage_ValueAccessor
                       quickstep_types_Type
                       quickstep_types_TypedValue
                       quickstep_types_containers_Tuple

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/d894e43c/storage/CSBTreeIndexSubBlock.cpp
----------------------------------------------------------------------
diff --git a/storage/CSBTreeIndexSubBlock.cpp b/storage/CSBTreeIndexSubBlock.cpp
index 8535398..000ff70 100644
--- a/storage/CSBTreeIndexSubBlock.cpp
+++ b/storage/CSBTreeIndexSubBlock.cpp
@@ -53,6 +53,8 @@
 #include "utility/PtrVector.hpp"
 #include "utility/ScopedBuffer.hpp"
 
+#include "glog/logging.h"
+
 using std::memcpy;
 using std::memmove;
 using std::pair;
@@ -76,7 +78,7 @@ class CompressedCodeLessComparator : public UncheckedComparator {
   }
 
   bool compareTypedValues(const TypedValue &left, const TypedValue &right) const {
-    FATAL_ERROR("Can not use CompressedCodeLessComparator to compare TypedValue.");
+    LOG(FATAL) << "Can not use CompressedCodeLessComparator to compare TypedValue.";
   }
 
   inline bool compareDataPtrs(const void *left, const void *right) const {
@@ -88,11 +90,11 @@ class CompressedCodeLessComparator : public UncheckedComparator {
   }
 
   bool compareTypedValueWithDataPtr(const TypedValue &left, const void *right) const {
-    FATAL_ERROR("Can not use CompressedCodeLessComparator to compare TypedValue.");
+    LOG(FATAL) << "Can not use CompressedCodeLessComparator to compare TypedValue.";
   }
 
   bool compareDataPtrWithTypedValue(const void *left, const TypedValue &right) const {
-    FATAL_ERROR("Can not use CompressedCodeLessComparator to compare TypedValue.");
+    LOG(FATAL) << "Can not use CompressedCodeLessComparator to compare TypedValue.";
   }
 };
 
@@ -194,7 +196,7 @@ CompositeKeyLessComparator::CompositeKeyLessComparator(
        it != owner_.indexed_attribute_ids_.end();
        ++it) {
     const Type &attribute_type = relation.getAttributeById(*it)->getType();
-    DEBUG_ASSERT(!attribute_type.isVariableLength());
+    DCHECK(!attribute_type.isVariableLength());
     attribute_comparators_.push_back(
         ComparisonFactory::GetComparison(ComparisonID::kLess).makeUncheckedComparatorForTypes(
             attribute_type,
@@ -203,7 +205,7 @@ CompositeKeyLessComparator::CompositeKeyLessComparator(
 }
 
 bool CompositeKeyLessComparator::compareDataPtrsInl(const void *left, const void *right) const {
-  DEBUG_ASSERT(attribute_comparators_.size() == owner_.indexed_attribute_offsets_.size());
+  DCHECK_EQ(owner_.indexed_attribute_offsets_.size(), attribute_comparators_.size());
   vector<size_t>::const_iterator offset_it = owner_.indexed_attribute_offsets_.begin();
   for (PtrVector<UncheckedComparator>::const_iterator comparator_it = attribute_comparators_.begin();
        comparator_it != attribute_comparators_.end();
@@ -245,9 +247,9 @@ CSBTreeIndexSubBlock::CSBTreeIndexSubBlock(const TupleStorageSubBlock &tuple_sto
       key_type_(nullptr),
       next_free_node_group_(kNodeGroupNone),
       num_free_node_groups_(0) {
-  if (!DescriptionIsValid(relation_, description_)) {
-    FATAL_ERROR("Attempted to construct a CSBTreeIndexSubBlock from an invalid description.");
-  }
+  DCHECK(DescriptionIsValid(relation_, description_))
+      << "Attempted to construct a CSBTreeIndexSubBlock from an invalid description:\n"
+      << description_.DebugString();
 
   const int num_indexed_attributes = description_.indexed_attribute_ids_size();
   if (num_indexed_attributes > 1) {
@@ -341,7 +343,7 @@ bool CSBTreeIndexSubBlock::DescriptionIsValid(const CatalogRelationSchema &relat
 std::size_t CSBTreeIndexSubBlock::EstimateBytesPerTuple(
     const CatalogRelationSchema &relation,
     const IndexSubBlockDescription &description) {
-  DEBUG_ASSERT(DescriptionIsValid(relation, description));
+  DCHECK(DescriptionIsValid(relation, description));
 
   size_t key_length = 0;
   for (int indexed_attribute_num = 0;
@@ -362,8 +364,8 @@ std::size_t CSBTreeIndexSubBlock::EstimateBytesPerBlock(
 }
 
 bool CSBTreeIndexSubBlock::addEntry(const tuple_id tuple) {
-  DEBUG_ASSERT(initialized_);
-  DEBUG_ASSERT(tuple_store_.hasTupleWithID(tuple));
+  DCHECK(initialized_);
+  DCHECK(tuple_store_.hasTupleWithID(tuple));
 
   InsertReturnValue retval;
 
@@ -375,7 +377,7 @@ bool CSBTreeIndexSubBlock::addEntry(const tuple_id tuple) {
         return true;
       }
     } else {
-      DEBUG_ASSERT(!composite_key_buffer.empty());
+      DCHECK(!composite_key_buffer.empty());
     }
 
     retval = rootInsertHelper(tuple,
@@ -406,8 +408,8 @@ bool CSBTreeIndexSubBlock::addEntry(const tuple_id tuple) {
         retval = compressedKeyAddEntryHelper<uint32_t>(tuple, code);
         break;
       default:
-        FATAL_ERROR("Unexpected compressed key byte-length (not 1, 2, or 4) encountered "
-                    "in CSBTreeIndexSubBlock::addEntry()");
+        LOG(FATAL) << "Unexpected compressed key byte-length (not 1, 2, or 4) encountered "
+                   << "in CSBTreeIndexSubBlock::addEntry()";
     }
   } else {
     TypedValue typed_key(tuple_store_.getAttributeValueTyped(tuple, indexed_attribute_ids_.front()));
@@ -417,7 +419,7 @@ bool CSBTreeIndexSubBlock::addEntry(const tuple_id tuple) {
         return true;
       }
     } else {
-      DEBUG_ASSERT(!typed_key.isNull());
+      DCHECK(!typed_key.isNull());
     }
 
     InvokeOnLessComparatorForTypeIgnoreNullability(
@@ -432,12 +434,12 @@ bool CSBTreeIndexSubBlock::addEntry(const tuple_id tuple) {
     return false;
   }
 
-  DEBUG_ASSERT(retval.new_node_group_id == kNodeGroupNone);
-  if (retval.split_node_least_key != NULL) {
+  DCHECK_EQ(kNodeGroupNone, retval.new_node_group_id);
+  if (retval.split_node_least_key != nullptr) {
     // The root was split, must create a new root.
     // Allocate the new root.
     int new_root_group_id = allocateNodeGroup();
-    DEBUG_ASSERT(new_root_group_id >= 0);
+    DCHECK_GE(new_root_group_id, 0);
     void *new_root = getNode(new_root_group_id, 0);
 
     // Set up the new root's header.
@@ -458,7 +460,7 @@ bool CSBTreeIndexSubBlock::addEntry(const tuple_id tuple) {
 }
 
 bool CSBTreeIndexSubBlock::bulkAddEntries(const TupleIdSequence &tuples) {
-  DEBUG_ASSERT(initialized_);
+  DCHECK(initialized_);
   // TODO(chasseur): Can possibly be more efficient in some cases if we sort
   // and insert groups where possible.
   for (TupleIdSequence::const_iterator it = tuples.begin();
@@ -478,7 +480,7 @@ bool CSBTreeIndexSubBlock::bulkAddEntries(const TupleIdSequence &tuples) {
 }
 
 void CSBTreeIndexSubBlock::removeEntry(const tuple_id tuple) {
-  DEBUG_ASSERT(initialized_);
+  DCHECK(initialized_);
   if (key_is_composite_) {
     ScopedBuffer composite_key_buffer(makeKeyCopy(tuple));
     if (key_is_nullable_) {
@@ -488,7 +490,7 @@ void CSBTreeIndexSubBlock::removeEntry(const tuple_id tuple) {
         return;
       }
     } else {
-      DEBUG_ASSERT(!composite_key_buffer.empty());
+      DCHECK(!composite_key_buffer.empty());
     }
 
     removeEntryFromLeaf(tuple,
@@ -524,8 +526,8 @@ void CSBTreeIndexSubBlock::removeEntry(const tuple_id tuple) {
         compressedKeyRemoveEntryHelper<uint32_t>(tuple, code);
         break;
       default:
-        FATAL_ERROR("Unexpected compressed key byte-length (not 1, 2, or 4) encountered "
-                    "in CSBTreeIndexSubBlock::removeEntry()");
+        LOG(FATAL) << "Unexpected compressed key byte-length (not 1, 2, or 4) encountered "
+                   << "in CSBTreeIndexSubBlock::removeEntry()";
     }
   } else {
     TypedValue typed_key(tuple_store_.getAttributeValueTyped(tuple, indexed_attribute_ids_.front()));
@@ -536,7 +538,7 @@ void CSBTreeIndexSubBlock::removeEntry(const tuple_id tuple) {
         return;
       }
     } else {
-      DEBUG_ASSERT(!typed_key.isNull());
+      DCHECK(!typed_key.isNull());
     }
 
     InvokeOnLessComparatorForTypeIgnoreNullability(
@@ -554,7 +556,7 @@ void CSBTreeIndexSubBlock::removeEntry(const tuple_id tuple) {
 }
 
 void CSBTreeIndexSubBlock::bulkRemoveEntries(const TupleIdSequence &tuples) {
-  DEBUG_ASSERT(initialized_);
+  DCHECK(initialized_);
   // TODO(chasseur): Can possibly be more efficient in some cases if we sort
   // and scan through leaves.
   for (TupleIdSequence::const_iterator it = tuples.begin();
@@ -577,34 +579,34 @@ predicate_cost_t CSBTreeIndexSubBlock::estimatePredicateEvaluationCost(
 
 TupleIdSequence* CSBTreeIndexSubBlock::getMatchesForPredicate(const ComparisonPredicate &predicate,
                                                               const TupleIdSequence *filter) const {
-  DEBUG_ASSERT(initialized_);
+  DCHECK(initialized_);
   if (key_is_composite_) {
     // TODO(chasseur): Evaluate predicates on composite keys.
-    FATAL_ERROR("CSBTreeIndexSubBlock::getMatchesForPredicate() is unimplemented for composite keys.");
+    LOG(FATAL) << "CSBTreeIndexSubBlock::getMatchesForPredicate() is unimplemented for composite keys.";
   }
 
   if (!predicate.isAttributeLiteralComparisonPredicate()) {
-    FATAL_ERROR("CSBTreeIndexSubBlock::getMatchesForPredicate() can not "
-                "evaluate predicates other than simple comparisons.");
+    LOG(FATAL) << "CSBTreeIndexSubBlock::getMatchesForPredicate() can not "
+               << "evaluate predicates other than simple comparisons.";
   }
 
-  const CatalogAttribute *comparison_attribute = NULL;
+  const CatalogAttribute *comparison_attribute = nullptr;
   bool left_literal = false;
   if (predicate.getLeftOperand().hasStaticValue()) {
-    DEBUG_ASSERT(predicate.getRightOperand().getDataSource() == Scalar::kAttribute);
+    DCHECK_EQ(Scalar::kAttribute, predicate.getRightOperand().getDataSource());
     comparison_attribute
         = &(static_cast<const ScalarAttribute&>(predicate.getRightOperand()).getAttribute());
     left_literal = true;
   } else {
-    DEBUG_ASSERT(predicate.getLeftOperand().getDataSource() == Scalar::kAttribute);
+    DCHECK_EQ(Scalar::kAttribute, predicate.getLeftOperand().getDataSource());
     comparison_attribute
         = &(static_cast<const ScalarAttribute&>(predicate.getLeftOperand()).getAttribute());
     left_literal = false;
   }
 
   if (comparison_attribute->getID() != indexed_attribute_ids_.front()) {
-    FATAL_ERROR("CSBTreeIndexSubBlock::getMatchesForPredicate() can not "
-                "evaluate predicates on non-indexed attributes.");
+    LOG(FATAL) << "CSBTreeIndexSubBlock::getMatchesForPredicate() can not "
+               << "evaluate predicates on non-indexed attributes.";
   }
 
   TypedValue comparison_literal;
@@ -683,9 +685,9 @@ bool CSBTreeIndexSubBlock::initialize(const bool new_block) {
     const CompressedTupleStorageSubBlock &compressed_tuple_store
         = static_cast<const CompressedTupleStorageSubBlock&>(tuple_store_);
     if (!compressed_tuple_store.compressedBlockIsBuilt()) {
-      FATAL_ERROR("CSBTreeIndexSubBlock::initialize() called with a key which "
-                  "may be compressed before the associated TupleStorageSubBlock "
-                  "was built.");
+      LOG(FATAL) << "CSBTreeIndexSubBlock::initialize() called with a key which "
+                 << "may be compressed before the associated TupleStorageSubBlock "
+                 << "was built.";
     }
 
     if (compressed_tuple_store.compressedAttributeIsDictionaryCompressed(indexed_attribute_ids_.front())
@@ -713,7 +715,7 @@ bool CSBTreeIndexSubBlock::initialize(const bool new_block) {
       key_length_bytes_ += attr_type.maximumByteLength();
     }
   }
-  DEBUG_ASSERT(key_length_bytes_ > 0);
+  DCHECK_GT(key_length_bytes_, 0u);
   key_tuple_id_pair_length_bytes_ = key_length_bytes_ + sizeof(tuple_id);
 
   // Compute the number of keys that can be stored in internal and leaf nodes.
@@ -780,7 +782,7 @@ bool CSBTreeIndexSubBlock::initialize(const bool new_block) {
     num_free_node_groups_ = num_node_groups - node_group_used_bitmap_->onesCount();
     if (num_free_node_groups_ > 0) {
       next_free_node_group_ = node_group_used_bitmap_->firstZero();
-      DEBUG_ASSERT(static_cast<size_t>(next_free_node_group_) < node_group_used_bitmap_->size());
+      DCHECK_LT(static_cast<size_t>(next_free_node_group_), node_group_used_bitmap_->size());
     }
   }
 
@@ -789,14 +791,14 @@ bool CSBTreeIndexSubBlock::initialize(const bool new_block) {
 
 void CSBTreeIndexSubBlock::clearIndex() {
   // Reset the free node group bitmap.
-  DEBUG_ASSERT(node_group_used_bitmap_->size() > 0);
+  DCHECK_GT(node_group_used_bitmap_->size(), 0u);
   node_group_used_bitmap_->clear();
   next_free_node_group_ = 0;
   num_free_node_groups_ = node_group_used_bitmap_->size();
 
   // Allocate the root node.
   setRootNodeGroupNumber(allocateNodeGroup());
-  DEBUG_ASSERT(getRootNodeGroupNumber() >= 0);
+  DCHECK_GE(getRootNodeGroupNumber(), 0);
 
   // Initialize the root node as an empty leaf node.
   NodeHeader *root_header = static_cast<NodeHeader*>(getRootNode());
@@ -806,8 +808,8 @@ void CSBTreeIndexSubBlock::clearIndex() {
 }
 
 void* CSBTreeIndexSubBlock::makeKeyCopy(const tuple_id tuple) const {
-  DEBUG_ASSERT(tuple_store_.hasTupleWithID(tuple));
-  DEBUG_ASSERT(indexed_attribute_ids_.size() == indexed_attribute_offsets_.size());
+  DCHECK(tuple_store_.hasTupleWithID(tuple));
+  DCHECK_EQ(indexed_attribute_ids_.size(), indexed_attribute_offsets_.size());
 
   ScopedBuffer key_copy(key_length_bytes_);
 
@@ -817,7 +819,7 @@ void* CSBTreeIndexSubBlock::makeKeyCopy(const tuple_id tuple) const {
        ++attr_it, ++offset_it) {
     TypedValue attr_value(tuple_store_.getAttributeValueTyped(tuple, *attr_it));
     if (attr_value.isNull()) {
-      return NULL;
+      return nullptr;
     }
     attr_value.copyInto(static_cast<char*>(key_copy.get()) + *offset_it);
   }
@@ -830,12 +832,12 @@ const void* CSBTreeIndexSubBlock::getLeastKey(const void *node) const {
     if (static_cast<const NodeHeader*>(node)->num_keys) {
       return static_cast<const char*>(node) + sizeof(NodeHeader);
     } else {
-      return NULL;
+      return nullptr;
     }
   } else {
-    DEBUG_ASSERT(static_cast<const NodeHeader*>(node)->num_keys);
+    DCHECK(static_cast<const NodeHeader*>(node)->num_keys);
     const void *least_key = getLeastKey(getNode(static_cast<const NodeHeader*>(node)->node_group_reference, 0));
-    if (least_key == NULL) {
+    if (least_key == nullptr) {
       // If the leftmost child leaf was empty, can just use the first key here.
       return static_cast<const char*>(node) + sizeof(NodeHeader);
     }
@@ -912,10 +914,10 @@ CSBTreeIndexSubBlock::InsertReturnValue CSBTreeIndexSubBlock::internalInsertHelp
     const ComparatorT &key_comparator,
     const NodeHeader *parent_node_header,
     void *node) {
-  DEBUG_ASSERT((node_group_allocation_requirement == 0) || (parent_node_header != NULL));
+  DCHECK((node_group_allocation_requirement == 0) || (parent_node_header != nullptr));
 
   NodeHeader *node_header = static_cast<NodeHeader*>(node);
-  DEBUG_ASSERT(!node_header->is_leaf);
+  DCHECK(!node_header->is_leaf);
 
   // Find the child to insert into.
   uint16_t key_num;
@@ -938,7 +940,7 @@ CSBTreeIndexSubBlock::InsertReturnValue CSBTreeIndexSubBlock::internalInsertHelp
     if (getRootNode() == node) {
       // If this node is the root, make sure there is additional space for a
       // new root.
-      DEBUG_ASSERT(node_group_allocation_requirement == 0);
+      DCHECK_EQ(0, node_group_allocation_requirement);
       child_node_group_allocation_requirement = 2;
     } else {
       child_node_group_allocation_requirement = node_group_allocation_requirement + 1;
@@ -974,14 +976,14 @@ CSBTreeIndexSubBlock::InsertReturnValue CSBTreeIndexSubBlock::internalInsertHelp
                                    && (key_num == small_half_num_children_);
   if (child_return_value.new_node_group_id != kNodeGroupNone) {
     // A new node group was allocated, and this node must be split.
-    DEBUG_ASSERT(child_return_value.split_node_least_key != NULL);
-    DEBUG_ASSERT(node_header->num_keys == max_keys_internal_);
+    DCHECK(child_return_value.split_node_least_key != nullptr);
+    DCHECK_EQ(max_keys_internal_, node_header->num_keys);
 
-    const void *group_end = NULL;
+    const void *group_end = nullptr;
     if (node_group_allocation_requirement) {
       // Parent node is full, must allocate new node group(s).
       // Should already by checked by the child:
-      DEBUG_ASSERT(num_free_node_groups_ >= node_group_allocation_requirement);
+      DCHECK_GE(num_free_node_groups_, node_group_allocation_requirement);
 
       // Split the node group.
       group_end = splitNodeGroupHelper(parent_node_header, &node, &retval);
@@ -989,7 +991,7 @@ CSBTreeIndexSubBlock::InsertReturnValue CSBTreeIndexSubBlock::internalInsertHelp
       group_end = getNode(parent_node_header->node_group_reference, parent_node_header->num_keys + 1);
     }
 
-    if (group_end == NULL) {
+    if (group_end == nullptr) {
       retval.split_node_least_key = splitNodeAcrossGroups(node,
                                                           retval.new_node_group_id,
                                                           child_return_value.new_node_group_id,
@@ -1009,10 +1011,10 @@ CSBTreeIndexSubBlock::InsertReturnValue CSBTreeIndexSubBlock::internalInsertHelp
     }
 
     if (!child_return_value.left_split_group_smaller) {
-      DEBUG_ASSERT(key_num >= large_half_num_children_);
+      DCHECK_GE(key_num, large_half_num_children_);
       key_num -= large_half_num_children_;
 
-      if (group_end == NULL) {
+      if (group_end == nullptr) {
         node = getNode(retval.new_node_group_id, 0);
       } else {
         node = static_cast<char*>(node) + kCSBTreeNodeSizeBytes;
@@ -1020,7 +1022,7 @@ CSBTreeIndexSubBlock::InsertReturnValue CSBTreeIndexSubBlock::internalInsertHelp
     }
   }
 
-  if (child_return_value.split_node_least_key != NULL) {
+  if (child_return_value.split_node_least_key != nullptr) {
     // If the child was split, insert the new key.
     node_header = static_cast<NodeHeader*>(node);
     void *key_location = static_cast<char*>(node)
@@ -1052,11 +1054,11 @@ CSBTreeIndexSubBlock::InsertReturnValue CSBTreeIndexSubBlock::leafInsertHelper(
   InsertReturnValue retval;
 
   NodeHeader *node_header = static_cast<NodeHeader*>(node);
-  DEBUG_ASSERT(node_header->is_leaf);
+  DCHECK(node_header->is_leaf);
 
   if (node_header->num_keys == max_keys_leaf_) {
     // '*node' is full and must be split.
-    const void *group_end = NULL;
+    const void *group_end = nullptr;
     if (node_group_allocation_requirement) {
       // Parent node is full, must allocate new node group(s).
       if (num_free_node_groups_ < node_group_allocation_requirement) {
@@ -1068,7 +1070,7 @@ CSBTreeIndexSubBlock::InsertReturnValue CSBTreeIndexSubBlock::leafInsertHelper(
 
       // Split the node group.
       group_end = splitNodeGroupHelper(parent_node_header, &node, &retval);
-      DEBUG_ASSERT(static_cast<const NodeHeader*>(node)->is_leaf);
+      DCHECK(static_cast<const NodeHeader*>(node)->is_leaf);
     } else {
       // If we are splitting the root node, make sure the caller can allocate a
       // new root.
@@ -1090,7 +1092,7 @@ CSBTreeIndexSubBlock::InsertReturnValue CSBTreeIndexSubBlock::leafInsertHelper(
                                               + sizeof(NodeHeader)
                                               + (small_half_num_keys_leaf_) * key_tuple_id_pair_length_bytes_)) {
       // Insert in the first half.
-      if (group_end == NULL) {
+      if (group_end == nullptr) {
         retval.split_node_least_key = splitNodeAcrossGroups(node,
                                                             retval.new_node_group_id,
                                                             kNodeGroupNone,
@@ -1105,7 +1107,7 @@ CSBTreeIndexSubBlock::InsertReturnValue CSBTreeIndexSubBlock::leafInsertHelper(
       // node. The pointer 'retval.split_node_least_key' will remain correct
       // if this is the case, as splitNodeInGroup() returns a pointer to the
       // first leaf key's location.
-      if (group_end == NULL) {
+      if (group_end == nullptr) {
         retval.split_node_least_key = splitNodeAcrossGroups(node,
                                                             retval.new_node_group_id,
                                                             kNodeGroupNone,
@@ -1165,7 +1167,7 @@ const void* CSBTreeIndexSubBlock::splitNodeGroupHelper(
     caller_return_value->left_split_group_smaller = false;
     if (*node == center_node) {
       caller_return_value->new_node_group_id = splitNodeGroup(parent_node_header, false, true);
-      return NULL;
+      return nullptr;
     } else {
       caller_return_value->new_node_group_id = splitNodeGroup(parent_node_header, false, false);
       // TODO(chasseur): Verify that this logic is correct.
@@ -1185,16 +1187,16 @@ const void* CSBTreeIndexSubBlock::splitNodeGroupHelper(
 int CSBTreeIndexSubBlock::splitNodeGroup(const NodeHeader *parent_node_header,
                                          const bool left_smaller,
                                          const bool will_split_node_across_groups) {
-  DEBUG_ASSERT(!parent_node_header->is_leaf);
-  DEBUG_ASSERT(parent_node_header->num_keys == max_keys_internal_);
-  DEBUG_ASSERT(num_free_node_groups_ > 0);
+  DCHECK(!parent_node_header->is_leaf);
+  DCHECK_EQ(max_keys_internal_, parent_node_header->num_keys);
+  DCHECK_GT(num_free_node_groups_, 0);
   if (will_split_node_across_groups) {
-    DEBUG_ASSERT(!left_smaller);
+    DCHECK(!left_smaller);
   }
 
   // Allocate a new node group.
   int new_node_group_id = allocateNodeGroup();
-  DEBUG_ASSERT(new_node_group_id >= 0);
+  DCHECK_GE(new_node_group_id, 0);
   void *copy_destination;
   if (will_split_node_across_groups) {
     copy_destination = getNode(new_node_group_id, 1);
@@ -1236,15 +1238,15 @@ const void* CSBTreeIndexSubBlock::splitNodeInGroup(void *node,
                                                    const bool child_was_split_across_groups) {
   NodeHeader *node_header = static_cast<NodeHeader*>(node);
   if (child_was_split_across_groups) {
-    DEBUG_ASSERT(!node_header->is_leaf);
-    DEBUG_ASSERT(!left_smaller);
+    DCHECK(!node_header->is_leaf);
+    DCHECK(!left_smaller);
   }
   if (node_header->is_leaf) {
-    DEBUG_ASSERT(right_child_node_group == kNodeGroupNone);
-    DEBUG_ASSERT(node_header->num_keys == max_keys_leaf_);
+    DCHECK_EQ(kNodeGroupNone, right_child_node_group);
+    DCHECK_EQ(max_keys_leaf_, node_header->num_keys);
   } else {
-    DEBUG_ASSERT(right_child_node_group >= 0);
-    DEBUG_ASSERT(node_header->num_keys == max_keys_internal_);
+    DCHECK_GE(right_child_node_group, 0);
+    DCHECK_EQ(max_keys_internal_, node_header->num_keys);
   }
 
   void *next_node = static_cast<char*>(node) + kCSBTreeNodeSizeBytes;
@@ -1328,22 +1330,22 @@ const void* CSBTreeIndexSubBlock::splitNodeAcrossGroups(void *node,
                                                         const int right_child_node_group,
                                                         const bool left_smaller,
                                                         const bool child_was_split_across_groups) {
-  DEBUG_ASSERT(destination_group_number >= 0);
-  DEBUG_ASSERT(static_cast<size_t>(destination_group_number) < node_group_used_bitmap_->size());
-  DEBUG_ASSERT(node_group_used_bitmap_->getBit(destination_group_number));
+  DCHECK_GE(destination_group_number, 0);
+  DCHECK_LT(static_cast<size_t>(destination_group_number), node_group_used_bitmap_->size());
+  DCHECK(node_group_used_bitmap_->getBit(destination_group_number));
 
   NodeHeader *node_header = static_cast<NodeHeader*>(node);
   if (child_was_split_across_groups) {
-    DEBUG_ASSERT(!node_header->is_leaf);
-    DEBUG_ASSERT(!left_smaller);
+    DCHECK(!node_header->is_leaf);
+    DCHECK(!left_smaller);
   }
   if (node_header->is_leaf) {
-    DEBUG_ASSERT(right_child_node_group == kNodeGroupNone);
-    DEBUG_ASSERT(node_header->num_keys == max_keys_leaf_);
-    DEBUG_ASSERT(node_header->node_group_reference == destination_group_number);
+    DCHECK_EQ(kNodeGroupNone, right_child_node_group);
+    DCHECK_EQ(max_keys_leaf_, node_header->num_keys);
+    DCHECK_EQ(destination_group_number, node_header->node_group_reference);
   } else {
-    DEBUG_ASSERT(right_child_node_group >= 0);
-    DEBUG_ASSERT(node_header->num_keys == max_keys_internal_);
+    DCHECK_GE(right_child_node_group, 0);
+    DCHECK_EQ(max_keys_internal_, node_header->num_keys);
   }
 
   // Do the split.
@@ -1419,10 +1421,10 @@ void CSBTreeIndexSubBlock::insertEntryInLeaf(const tuple_id tuple,
                                              const void *key,
                                              const ComparatorT &key_comparator,
                                              void *node) {
-  DEBUG_ASSERT(static_cast<NodeHeader*>(node)->is_leaf);
+  DCHECK(static_cast<NodeHeader*>(node)->is_leaf);
 
   const uint16_t num_keys = static_cast<NodeHeader*>(node)->num_keys;
-  DEBUG_ASSERT(num_keys < max_keys_leaf_);
+  DCHECK_LT(num_keys, max_keys_leaf_);
 
   char *current_key = static_cast<char*>(node) + sizeof(NodeHeader);
   for (uint16_t key_num = 0;
@@ -1463,18 +1465,18 @@ void CSBTreeIndexSubBlock::removeEntryFromLeaf(const tuple_id tuple,
                                                const void *key,
                                                const ComparatorT &key_comparator,
                                                void *node) {
-  DEBUG_ASSERT(static_cast<NodeHeader*>(node)->is_leaf);
+  DCHECK(static_cast<NodeHeader*>(node)->is_leaf);
 
   void *right_sibling;
   const uint16_t num_keys = static_cast<NodeHeader*>(node)->num_keys;
   // If node is totally empty, immediately chase the next sibling.
   if (num_keys == 0) {
     right_sibling = getRightSiblingOfLeafNode(node);
-    if (right_sibling != NULL) {
+    if (right_sibling != nullptr) {
       removeEntryFromLeaf(tuple, key, key_comparator, right_sibling);
       return;
     } else {
-      FATAL_ERROR("CSBTree: attempted to remove nonexistent entry.");
+      LOG(FATAL) << "CSBTree: attempted to remove nonexistent entry.";
     }
   }
 
@@ -1489,7 +1491,7 @@ void CSBTreeIndexSubBlock::removeEntryFromLeaf(const tuple_id tuple,
       continue;
     } else if (key_comparator.compareDataPtrsInl(key, existing_key_ptr)) {
       // Past the target key, but the target has not been found.
-      FATAL_ERROR("CSBTree: attempted to remove nonexistent entry.");
+      LOG(FATAL) << "CSBTree: attempted to remove nonexistent entry.";
     } else {
       // Key matches, so check tuple_id.
       if (tuple == *reinterpret_cast<const tuple_id*>(existing_key_ptr + key_length_bytes_)) {
@@ -1512,11 +1514,11 @@ void CSBTreeIndexSubBlock::removeEntryFromLeaf(const tuple_id tuple,
 
   // Proceed to next sibling.
   right_sibling = getRightSiblingOfLeafNode(node);
-  if (right_sibling != NULL) {
+  if (right_sibling != nullptr) {
     removeEntryFromLeaf(tuple, key, key_comparator, right_sibling);
     return;
   } else {
-    FATAL_ERROR("CSBTree: attempted to remove nonexistent entry.");
+    LOG(FATAL) << "CSBTree: attempted to remove nonexistent entry.";
   }
 }
 
@@ -1569,8 +1571,8 @@ TupleIdSequence* CSBTreeIndexSubBlock::evaluateComparisonPredicateOnUncompressed
     const ComparisonID comp,
     const TypedValue &right_literal,
     const Type &right_literal_type) const {
-  DEBUG_ASSERT(!key_is_compressed_);
-  DEBUG_ASSERT(!key_is_composite_);
+  DCHECK(!key_is_compressed_);
+  DCHECK(!key_is_composite_);
 
   csbtree_internal::PredicateEvaluationForwarder forwarder(*this, comp, right_literal);
 
@@ -1590,8 +1592,8 @@ TupleIdSequence* CSBTreeIndexSubBlock::evaluateComparisonPredicateOnCompressedKe
     ComparisonID comp,
     const TypedValue &right_literal,
     const Type &right_literal_type) const {
-  DEBUG_ASSERT(key_is_compressed_);
-  DEBUG_ASSERT(!key_is_composite_);
+  DCHECK(key_is_compressed_);
+  DCHECK(!key_is_composite_);
 
   // Stack variables to hold compressed codes as needed.
   uint8_t byte_code;
@@ -1633,9 +1635,9 @@ TupleIdSequence* CSBTreeIndexSubBlock::evaluateComparisonPredicateOnCompressedKe
           byte_code = short_code = word_code = limits.first;
           comp = ComparisonID::kGreaterOrEqual;
         } else {
-          FATAL_ERROR("CompressionDictionary::getLimitCodesForComparisonTyped() returned "
-                      "limits which did not extend to either the minimum or maximum code "
-                      "when called by CSBTreeIndexSubBlock::evaluateComparisonPredicateOnCompressedKey().");
+          LOG(FATAL) << "CompressionDictionary::getLimitCodesForComparisonTyped() returned "
+                     << "limits which did not extend to either the minimum or maximum code "
+                     << "when called by CSBTreeIndexSubBlock::evaluateComparisonPredicateOnCompressedKey().";
         }
         break;
       }
@@ -1712,8 +1714,8 @@ TupleIdSequence* CSBTreeIndexSubBlock::evaluateComparisonPredicateOnCompressedKe
       break;
     }
     default:
-      FATAL_ERROR("Unexpected compressed key byte-length (not 1, 2, or 4) encountered "
-                  "in CSBTreeIndexSubBlock::getMatchesForPredicate()");
+      LOG(FATAL) << "Unexpected compressed key byte-length (not 1, 2, or 4) encountered "
+                 << "in CSBTreeIndexSubBlock::getMatchesForPredicate()";
   }
 }
 
@@ -1758,7 +1760,7 @@ TupleIdSequence* CSBTreeIndexSubBlock::evaluatePredicate(
                                             literal_less_key_comparator,
                                             key_less_literal_comparator);
     default:
-      FATAL_ERROR("Unrecognized ComparisonID in CSBTreeIndexSubBlock::evaluatePredicate()");
+      LOG(FATAL) << "Unrecognized ComparisonID in CSBTreeIndexSubBlock::evaluatePredicate()";
   }
 }
 
@@ -1775,8 +1777,8 @@ TupleIdSequence* CSBTreeIndexSubBlock::evaluateEqualPredicate(
                                      literal,
                                      literal_less_key_comparator,
                                      key_less_literal_comparator);
-  while (search_node != NULL) {
-    DEBUG_ASSERT(static_cast<const NodeHeader*>(search_node)->is_leaf);
+  while (search_node != nullptr) {
+    DCHECK(static_cast<const NodeHeader*>(search_node)->is_leaf);
     const uint16_t num_keys = static_cast<const NodeHeader*>(search_node)->num_keys;
     const char *key_ptr = static_cast<const char*>(search_node) + sizeof(NodeHeader);
     for (uint16_t entry_num = 0; entry_num < num_keys; ++entry_num) {
@@ -1817,8 +1819,8 @@ TupleIdSequence* CSBTreeIndexSubBlock::evaluateNotEqualPredicate(
 
   // Fill in all tuples from leaves definitively less than the key.
   while (search_node != boundary_node) {
-    DEBUG_ASSERT(search_node != NULL);
-    DEBUG_ASSERT(static_cast<const NodeHeader*>(search_node)->is_leaf);
+    DCHECK(search_node != nullptr);
+    DCHECK(static_cast<const NodeHeader*>(search_node)->is_leaf);
     const uint16_t num_keys = static_cast<const NodeHeader*>(search_node)->num_keys;
     const char *tuple_id_ptr = static_cast<const char*>(search_node)
                                + sizeof(NodeHeader)
@@ -1833,8 +1835,8 @@ TupleIdSequence* CSBTreeIndexSubBlock::evaluateNotEqualPredicate(
   // Actually do comparisons in leaves that may contain the literal key.
   bool equal_found = false;
   bool past_equal = false;
-  while (search_node != NULL) {
-    DEBUG_ASSERT(static_cast<const NodeHeader*>(search_node)->is_leaf);
+  while (search_node != nullptr) {
+    DCHECK(static_cast<const NodeHeader*>(search_node)->is_leaf);
     const uint16_t num_keys = static_cast<const NodeHeader*>(search_node)->num_keys;
     const char *key_ptr = static_cast<const char*>(search_node) + sizeof(NodeHeader);
     for (uint16_t entry_num = 0; entry_num < num_keys; ++entry_num) {
@@ -1870,8 +1872,8 @@ TupleIdSequence* CSBTreeIndexSubBlock::evaluateNotEqualPredicate(
   }
 
   // Fill in all tuples from leaves definitively greater than the key.
-  while (search_node != NULL) {
-    DEBUG_ASSERT(static_cast<const NodeHeader*>(search_node)->is_leaf);
+  while (search_node != nullptr) {
+    DCHECK(static_cast<const NodeHeader*>(search_node)->is_leaf);
     uint16_t num_keys = static_cast<const NodeHeader*>(search_node)->num_keys;
     const char *tuple_id_ptr = static_cast<const char*>(search_node)
                                + sizeof(NodeHeader)
@@ -1903,8 +1905,8 @@ TupleIdSequence* CSBTreeIndexSubBlock::evaluateLessPredicate(
 
   // Fill in all tuples from leaves definitively less than the key.
   while (search_node != boundary_node) {
-    DEBUG_ASSERT(search_node != NULL);
-    DEBUG_ASSERT(static_cast<const NodeHeader*>(search_node)->is_leaf);
+    DCHECK(search_node != nullptr);
+    DCHECK(static_cast<const NodeHeader*>(search_node)->is_leaf);
     uint16_t num_keys = static_cast<const NodeHeader*>(search_node)->num_keys;
     const char *tuple_id_ptr = static_cast<const char*>(search_node)
                                + sizeof(NodeHeader)
@@ -1919,8 +1921,8 @@ TupleIdSequence* CSBTreeIndexSubBlock::evaluateLessPredicate(
   // Actually do comparisons in leaves that may contain the literal key.
   if (include_equal) {
     bool equal_found = false;
-    while (search_node != NULL) {
-      DEBUG_ASSERT(static_cast<const NodeHeader*>(search_node)->is_leaf);
+    while (search_node != nullptr) {
+      DCHECK(static_cast<const NodeHeader*>(search_node)->is_leaf);
       uint16_t num_keys = static_cast<const NodeHeader*>(search_node)->num_keys;
       const char *key_ptr = static_cast<const char*>(search_node) + sizeof(NodeHeader);
       for (uint16_t entry_num = 0; entry_num < num_keys; ++entry_num) {
@@ -1947,8 +1949,8 @@ TupleIdSequence* CSBTreeIndexSubBlock::evaluateLessPredicate(
       search_node = getRightSiblingOfLeafNode(search_node);
     }
   } else {
-    while (search_node != NULL) {
-      DEBUG_ASSERT(static_cast<const NodeHeader*>(search_node)->is_leaf);
+    while (search_node != nullptr) {
+      DCHECK(static_cast<const NodeHeader*>(search_node)->is_leaf);
       uint16_t num_keys = static_cast<const NodeHeader*>(search_node)->num_keys;
       const char *key_ptr = static_cast<const char*>(search_node) + sizeof(NodeHeader);
       for (uint16_t entry_num = 0; entry_num < num_keys; ++entry_num) {
@@ -1983,8 +1985,8 @@ TupleIdSequence* CSBTreeIndexSubBlock::evaluateGreaterPredicate(
 
   // Do comparisons in leaves that may contain the literal key.
   bool match_found = false;
-  while (search_node != NULL) {
-    DEBUG_ASSERT(static_cast<const NodeHeader*>(search_node)->is_leaf);
+  while (search_node != nullptr) {
+    DCHECK(static_cast<const NodeHeader*>(search_node)->is_leaf);
     uint16_t num_keys = static_cast<const NodeHeader*>(search_node)->num_keys;
     const char *key_ptr = static_cast<const char*>(search_node) + sizeof(NodeHeader);
     for (uint16_t entry_num = 0; entry_num < num_keys; ++entry_num) {
@@ -2017,8 +2019,8 @@ TupleIdSequence* CSBTreeIndexSubBlock::evaluateGreaterPredicate(
   }
 
   // Fill in all tuples from leaves definitively greater than the key.
-  while (search_node != NULL) {
-    DEBUG_ASSERT(static_cast<const NodeHeader*>(search_node)->is_leaf);
+  while (search_node != nullptr) {
+    DCHECK(static_cast<const NodeHeader*>(search_node)->is_leaf);
     uint16_t num_keys = static_cast<const NodeHeader*>(search_node)->num_keys;
     const char *tuple_id_ptr = static_cast<const char*>(search_node)
                                + sizeof(NodeHeader)
@@ -2034,7 +2036,7 @@ TupleIdSequence* CSBTreeIndexSubBlock::evaluateGreaterPredicate(
 }
 
 bool CSBTreeIndexSubBlock::rebuildSpaceCheck() const {
-  DEBUG_ASSERT(node_group_used_bitmap_->size() > 0);
+  DCHECK_GT(node_group_used_bitmap_->size(), 0);
   if (tuple_store_.isEmpty()) {
     return true;
   }
@@ -2062,8 +2064,8 @@ bool CSBTreeIndexSubBlock::rebuildSpaceCheck() const {
 }
 
 uint16_t CSBTreeIndexSubBlock::rebuildLeaves(std::vector<int> *used_node_groups) {
-  DEBUG_ASSERT(static_cast<size_t>(num_free_node_groups_) == node_group_used_bitmap_->size() - 1);
-  DEBUG_ASSERT(rebuildSpaceCheck());
+  DCHECK_EQ(static_cast<size_t>(num_free_node_groups_), node_group_used_bitmap_->size() - 1);
+  DCHECK(rebuildSpaceCheck());
 
   if (key_is_compressed_) {
     vector<csbtree_internal::CompressedEntryReference> entries;
@@ -2107,7 +2109,7 @@ std::uint16_t CSBTreeIndexSubBlock::buildLeavesFromEntryReferences(
       if (current_node_number == max_keys_internal_) {
         // At the end of this node group, must allocate a new one.
         int next_node_group_number = allocateNodeGroup();
-        DEBUG_ASSERT(next_node_group_number >= 0);
+        DCHECK_GE(next_node_group_number, 0);
         used_node_groups->push_back(next_node_group_number);
         reinterpret_cast<NodeHeader*>(node_ptr)->node_group_reference = next_node_group_number;
         current_node_group_number = next_node_group_number;
@@ -2147,17 +2149,17 @@ std::uint16_t CSBTreeIndexSubBlock::buildLeavesFromEntryReferences(
 
 void CSBTreeIndexSubBlock::generateEntryReferencesFromCompressedCodes(
     std::vector<csbtree_internal::CompressedEntryReference> *entry_references) const {
-  DEBUG_ASSERT(key_is_compressed_);
+  DCHECK(key_is_compressed_);
   // TODO(chasseur): Handle NULL.
-  DEBUG_ASSERT(!key_is_nullable_);
-  DEBUG_ASSERT(entry_references->empty());
+  DCHECK(!key_is_nullable_);
+  DCHECK(entry_references->empty());
 
-  DEBUG_ASSERT(tuple_store_.isCompressed());
+  DCHECK(tuple_store_.isCompressed());
   const CompressedTupleStorageSubBlock &compressed_tuple_store
       = static_cast<const CompressedTupleStorageSubBlock&>(tuple_store_);
-  DEBUG_ASSERT(compressed_tuple_store.compressedBlockIsBuilt());
-  DEBUG_ASSERT(compressed_tuple_store.compressedAttributeIsDictionaryCompressed(indexed_attribute_ids_.front())
-               || compressed_tuple_store.compressedAttributeIsTruncationCompressed(indexed_attribute_ids_.front()));
+  DCHECK(compressed_tuple_store.compressedBlockIsBuilt());
+  DCHECK(compressed_tuple_store.compressedAttributeIsDictionaryCompressed(indexed_attribute_ids_.front()) ||
+         compressed_tuple_store.compressedAttributeIsTruncationCompressed(indexed_attribute_ids_.front()));
 
   if (tuple_store_.isPacked()) {
     for (tuple_id tid = 0; tid <= tuple_store_.getMaxTupleID(); ++tid) {
@@ -2179,14 +2181,14 @@ void CSBTreeIndexSubBlock::generateEntryReferencesFromCompressedCodes(
        entry_references->end(),
        csbtree_internal::CompressedEntryReferenceComparator());
 
-  DEBUG_ASSERT(static_cast<vector<csbtree_internal::CompressedEntryReference>::size_type>(
-                   tuple_store_.numTuples()) == entry_references->size());
+  DCHECK_EQ(static_cast<vector<csbtree_internal::CompressedEntryReference>::size_type>(tuple_store_.numTuples()),
+            entry_references->size());
 }
 
 void CSBTreeIndexSubBlock::generateEntryReferencesFromTypedValues(
     vector<csbtree_internal::EntryReference> *entry_references) const {
-  DEBUG_ASSERT(!key_is_composite_);
-  DEBUG_ASSERT(entry_references->empty());
+  DCHECK(!key_is_composite_);
+  DCHECK(entry_references->empty());
 
   tuple_id null_count = 0;
   if (tuple_store_.isPacked()) {
@@ -2218,21 +2220,21 @@ void CSBTreeIndexSubBlock::generateEntryReferencesFromTypedValues(
                                                                         entry_references->begin(),
                                                                         entry_references->end());
 
-  DEBUG_ASSERT(static_cast<vector<csbtree_internal::EntryReference>::size_type>(tuple_store_.numTuples())
-               == entry_references->size() + null_count);
+  DCHECK_EQ(static_cast<vector<csbtree_internal::EntryReference>::size_type>(tuple_store_.numTuples()),
+            entry_references->size() + null_count);
 }
 
 void CSBTreeIndexSubBlock::generateEntryReferencesFromCompositeKeys(
     vector<csbtree_internal::CompositeEntryReference> *entry_references) const {
-  DEBUG_ASSERT(key_is_composite_);
-  DEBUG_ASSERT(entry_references->empty());
+  DCHECK(key_is_composite_);
+  DCHECK(entry_references->empty());
 
   tuple_id null_count = 0;
   if (tuple_store_.isPacked()) {
     for (tuple_id tid = 0; tid <= tuple_store_.getMaxTupleID(); ++tid) {
       void *key_copy = makeKeyCopy(tid);
       // Don't insert a NULL key.
-      if (key_copy != NULL) {
+      if (key_copy != nullptr) {
         entry_references->emplace_back(key_copy, tid);
       } else {
         ++null_count;
@@ -2243,7 +2245,7 @@ void CSBTreeIndexSubBlock::generateEntryReferencesFromCompositeKeys(
       if (tuple_store_.hasTupleWithID(tid)) {
         void *key_copy = makeKeyCopy(tid);
         // Don't insert a NULL key.
-        if (key_copy != NULL) {
+        if (key_copy != nullptr) {
           entry_references->emplace_back(key_copy, tid);
         } else {
           ++null_count;
@@ -2256,15 +2258,15 @@ void CSBTreeIndexSubBlock::generateEntryReferencesFromCompositeKeys(
        entry_references->end(),
        csbtree_internal::CompositeEntryReferenceComparator(*composite_key_comparator_));
 
-  DEBUG_ASSERT(static_cast<vector<csbtree_internal::EntryReference>::size_type>(tuple_store_.numTuples())
-               == entry_references->size() + null_count);
+  DCHECK_EQ(static_cast<vector<csbtree_internal::EntryReference>::size_type>(tuple_store_.numTuples()),
+            entry_references->size() + null_count);
 }
 
 uint16_t CSBTreeIndexSubBlock::rebuildInternalLevel(const std::vector<int> &child_node_groups,
                                                     uint16_t last_child_num_nodes,
                                                     std::vector<int> *used_node_groups) {
-  DEBUG_ASSERT(last_child_num_nodes > 0);
-  DEBUG_ASSERT(!child_node_groups.empty());
+  DCHECK_GT(last_child_num_nodes, 0u);
+  DCHECK(!child_node_groups.empty());
 
   std::vector<int>::const_iterator last_it = child_node_groups.end() - 1;
   // Adjusted to proper value below.
@@ -2275,7 +2277,7 @@ uint16_t CSBTreeIndexSubBlock::rebuildInternalLevel(const std::vector<int> &chil
     next_to_last_it -= 2;
     if (last_child_num_nodes < large_half_num_children_) {
       // Rebalance last node groups as needed.
-      DEBUG_ASSERT(child_node_groups.size() > 1);
+      DCHECK_GT(child_node_groups.size(), 1u);
       next_to_last_child_num_nodes = rebalanceNodeGroups(*next_to_last_it,
                                                          child_node_groups.back(),
                                                          last_child_num_nodes);
@@ -2284,7 +2286,7 @@ uint16_t CSBTreeIndexSubBlock::rebuildInternalLevel(const std::vector<int> &chil
   }
 
   int current_node_group_number = allocateNodeGroup();
-  DEBUG_ASSERT(current_node_group_number >= 0);
+  DCHECK_GE(current_node_group_number, 0);
   used_node_groups->push_back(current_node_group_number);
 
   uint16_t current_node_number = 0;
@@ -2294,7 +2296,7 @@ uint16_t CSBTreeIndexSubBlock::rebuildInternalLevel(const std::vector<int> &chil
     if (current_node_number == max_keys_internal_ + 1) {
       // Advance to next node group.
       current_node_group_number = allocateNodeGroup();
-      DEBUG_ASSERT(current_node_group_number >= 0);
+      DCHECK_GE(current_node_group_number, 0);
       used_node_groups->push_back(current_node_group_number);
       current_node_number = 0;
     }
@@ -2321,7 +2323,7 @@ uint16_t CSBTreeIndexSubBlock::rebuildInternalLevel(const std::vector<int> &chil
 uint16_t CSBTreeIndexSubBlock::rebalanceNodeGroups(const int full_node_group_number,
                                                    const int underfull_node_group_number,
                                                    const uint16_t underfull_num_nodes) {
-  DEBUG_ASSERT(underfull_num_nodes < large_half_num_children_);
+  DCHECK_LT(underfull_num_nodes, large_half_num_children_);
 
   const uint16_t shift_nodes = large_half_num_children_ - underfull_num_nodes;
   const uint16_t full_group_remaining_nodes = max_keys_internal_ + 1 - shift_nodes;
@@ -2349,7 +2351,7 @@ uint16_t CSBTreeIndexSubBlock::rebalanceNodeGroups(const int full_node_group_num
 void CSBTreeIndexSubBlock::makeInternalNode(const int child_node_group_number,
                                             const uint16_t num_children,
                                             void *node) {
-  DEBUG_ASSERT(num_children > 1);
+  DCHECK_GT(num_children, 1u);
   // Setup header.
   static_cast<NodeHeader*>(node)->num_keys = num_children - 1;
   static_cast<NodeHeader*>(node)->is_leaf = false;
@@ -2358,7 +2360,7 @@ void CSBTreeIndexSubBlock::makeInternalNode(const int child_node_group_number,
   // Fill in keys.
   char *key_ptr = static_cast<char*>(node) + sizeof(NodeHeader);
   for (uint16_t child_num = 1; child_num < num_children; ++child_num) {
-    DEBUG_ASSERT(static_cast<const NodeHeader*>(getNode(child_node_group_number, child_num))->num_keys > 0);
+    DCHECK_GT(static_cast<const NodeHeader*>(getNode(child_node_group_number, child_num))->num_keys, 0);
     // TODO(chasseur): We could simply remember the least keys of all nodes
     // generated in the previous pass, but that is a time/space tradeoff
     // which may not be worth it.
@@ -2374,7 +2376,7 @@ int CSBTreeIndexSubBlock::allocateNodeGroup() {
     // No more node groups are available.
     return kNodeGroupNone;
   } else {
-    DEBUG_ASSERT(!node_group_used_bitmap_->getBit(next_free_node_group_));
+    DCHECK(!node_group_used_bitmap_->getBit(next_free_node_group_));
     // Return the next free node group.
     int retval = next_free_node_group_;
     // Mark this node group as used and decrement the count of free node
@@ -2384,7 +2386,7 @@ int CSBTreeIndexSubBlock::allocateNodeGroup() {
     // If there are still free node groups remaining, locate the next one.
     if (num_free_node_groups_) {
       next_free_node_group_ = node_group_used_bitmap_->firstZero(retval + 1);
-      DEBUG_ASSERT(static_cast<size_t>(next_free_node_group_) < node_group_used_bitmap_->size());
+      DCHECK_LT(static_cast<size_t>(next_free_node_group_), node_group_used_bitmap_->size());
       return retval;
     } else {
       next_free_node_group_ = kNodeGroupNone;
@@ -2394,9 +2396,9 @@ int CSBTreeIndexSubBlock::allocateNodeGroup() {
 }
 
 void CSBTreeIndexSubBlock::deallocateNodeGroup(const int node_group_number) {
-  DEBUG_ASSERT(node_group_number >= 0);
-  DEBUG_ASSERT(static_cast<size_t>(node_group_number) < node_group_used_bitmap_->size());
-  DEBUG_ASSERT(node_group_used_bitmap_->getBit(node_group_number));
+  DCHECK_GE(node_group_number, 0);
+  DCHECK_LT(static_cast<size_t>(node_group_number), node_group_used_bitmap_->size());
+  DCHECK(node_group_used_bitmap_->getBit(node_group_number));
 
   node_group_used_bitmap_->setBit(node_group_number, false);
   ++num_free_node_groups_;

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/d894e43c/storage/CSBTreeIndexSubBlock.hpp
----------------------------------------------------------------------
diff --git a/storage/CSBTreeIndexSubBlock.hpp b/storage/CSBTreeIndexSubBlock.hpp
index 24e2ab5..fbe1753 100644
--- a/storage/CSBTreeIndexSubBlock.hpp
+++ b/storage/CSBTreeIndexSubBlock.hpp
@@ -38,6 +38,8 @@
 #include "utility/Macros.hpp"
 #include "utility/PtrVector.hpp"
 
+#include "glog/logging.h"
+
 namespace quickstep {
 
 class CSBTreeIndexSubBlock;
@@ -69,7 +71,7 @@ class CompositeKeyLessComparator : public UncheckedComparator {
   }
 
   bool compareTypedValues(const TypedValue &left, const TypedValue &right) const {
-    FATAL_ERROR("Can not use CompositeKeyLessComparator to compare TypedValue.");
+    LOG(FATAL) << "Can not use CompositeKeyLessComparator to compare TypedValue.";
   }
 
   inline bool compareDataPtrs(const void *left, const void *right) const {
@@ -79,11 +81,11 @@ class CompositeKeyLessComparator : public UncheckedComparator {
   bool compareDataPtrsInl(const void *left, const void *right) const;
 
   bool compareTypedValueWithDataPtr(const TypedValue &left, const void *right) const {
-    FATAL_ERROR("Can not use CompositeKeyLessComparator to compare TypedValue.");
+    LOG(FATAL) << "Can not use CompositeKeyLessComparator to compare TypedValue.";
   }
 
   bool compareDataPtrWithTypedValue(const void *left, const TypedValue &right) const {
-    FATAL_ERROR("Can not use CompositeKeyLessComparator to compare TypedValue.");
+    LOG(FATAL) << "Can not use CompositeKeyLessComparator to compare TypedValue.";
   }
 
  private:
@@ -263,7 +265,7 @@ class CSBTreeIndexSubBlock : public IndexSubBlock {
   // Get the location of the node designated by 'node_number' in the group
   // with 'node_group_number'.
   inline void* getNode(const int node_group_number, const std::uint16_t node_number) const {
-    DEBUG_ASSERT(node_group_number >= 0);
+    DCHECK_GE(node_group_number, 0);
     return static_cast<char*>(node_groups_start_)
            + node_group_number * node_group_size_bytes_
            + node_number * kCSBTreeNodeSizeBytes;
@@ -277,14 +279,14 @@ class CSBTreeIndexSubBlock : public IndexSubBlock {
   // Get the right-sibling of the leaf node '*node', which may be in another
   // group. If '*node' is the very right-most leaf, returns NULL.
   inline void* getRightSiblingOfLeafNode(const void *node) const {
-    DEBUG_ASSERT(static_cast<const NodeHeader*>(node)->is_leaf);
+    DCHECK(static_cast<const NodeHeader*>(node)->is_leaf);
     const int sibling_reference = static_cast<const NodeHeader*>(node)->node_group_reference;
     if (sibling_reference == kNodeGroupNextLeaf) {
       return const_cast<char*>(static_cast<const char*>(node) + kCSBTreeNodeSizeBytes);
     } else if (sibling_reference >= 0) {
       return getNode(sibling_reference, 0);
     } else {
-      DEBUG_ASSERT(sibling_reference == kNodeGroupNone);
+      DCHECK_EQ(kNodeGroupNone, sibling_reference);
       return NULL;
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-quickstep/blob/d894e43c/storage/ColumnStoreUtil.cpp
----------------------------------------------------------------------
diff --git a/storage/ColumnStoreUtil.cpp b/storage/ColumnStoreUtil.cpp
index 1355376..1479036 100644
--- a/storage/ColumnStoreUtil.cpp
+++ b/storage/ColumnStoreUtil.cpp
@@ -1,6 +1,6 @@
 /**
  *   Copyright 2011-2015 Quickstep Technologies LLC.
- *   Copyright 2015 Pivotal Software, Inc.
+ *   Copyright 2015-2016 Pivotal Software, Inc.
  *
  *   Licensed under the Apache License, Version 2.0 (the "License");
  *   you may not use this file except in compliance with the License.
@@ -20,7 +20,6 @@
 #include "catalog/CatalogAttribute.hpp"
 #include "catalog/CatalogRelationSchema.hpp"
 #include "expressions/predicate/ComparisonPredicate.hpp"
-#include "expressions/predicate/Predicate.hpp"
 #include "expressions/scalar/Scalar.hpp"
 #include "expressions/scalar/ScalarAttribute.hpp"
 #include "storage/StorageBlockInfo.hpp"
@@ -30,7 +29,8 @@
 #include "types/operations/comparisons/Comparison.hpp"
 #include "types/operations/comparisons/ComparisonID.hpp"
 #include "types/operations/comparisons/ComparisonUtil.hpp"
-#include "utility/Macros.hpp"
+
+#include "glog/logging.h"
 
 namespace quickstep {
 namespace column_store_util {
@@ -42,256 +42,256 @@ TupleIdSequence* SortColumnPredicateEvaluator::EvaluatePredicateForUncompressedS
     void *sort_attribute_stripe,
     const tuple_id num_tuples) {
   // Determine if the predicate is a comparison of the sort column with a literal.
-  if (predicate.isAttributeLiteralComparisonPredicate()) {
-    const CatalogAttribute *comparison_attribute = NULL;
-    bool left_literal = false;
-    if (predicate.getLeftOperand().hasStaticValue()) {
-      DEBUG_ASSERT(predicate.getRightOperand().getDataSource() == Scalar::kAttribute);
-      comparison_attribute
-          = &(static_cast<const ScalarAttribute&>(predicate.getRightOperand()).getAttribute());
-      left_literal = true;
-    } else {
-      DEBUG_ASSERT(predicate.getLeftOperand().getDataSource() == Scalar::kAttribute);
-      comparison_attribute
-          = &(static_cast<const ScalarAttribute&>(predicate.getLeftOperand()).getAttribute());
-      left_literal = false;
-    }
+  if (!predicate.isAttributeLiteralComparisonPredicate()) {
+    // Can not evaluate a non-comparison predicate, so pass through.
+    return nullptr;
+  }
+
+  const CatalogAttribute *comparison_attribute = nullptr;
+  bool left_literal = false;
+  if (predicate.getLeftOperand().hasStaticValue()) {
+    DCHECK_EQ(Scalar::kAttribute, predicate.getRightOperand().getDataSource());
+    comparison_attribute
+        = &(static_cast<const ScalarAttribute&>(predicate.getRightOperand()).getAttribute());
+    left_literal = true;
+  } else {
+    DCHECK_EQ(Scalar::kAttribute, predicate.getLeftOperand().getDataSource());
+    comparison_attribute
+        = &(static_cast<const ScalarAttribute&>(predicate.getLeftOperand()).getAttribute());
+    left_literal = false;
+  }
+
+  DCHECK_EQ(relation.getID(), comparison_attribute->getParent().getID());
+  if (comparison_attribute->getID() != sort_attribute_id) {
+    return nullptr;
+  }
 
-    DEBUG_ASSERT(comparison_attribute->getParent().getID() == relation.getID());
-    if (comparison_attribute->getID() == sort_attribute_id) {
-      const Type &attr_type = comparison_attribute->getType();
+  const Type &attr_type = comparison_attribute->getType();
 
-      TypedValue comparison_literal;
-      const Type *literal_type;
+  TypedValue comparison_literal;
+  const Type *literal_type;
+  if (left_literal) {
+    comparison_literal = predicate.getLeftOperand().getStaticValue().makeReferenceToThis();
+    literal_type = &predicate.getLeftOperand().getType();
+  } else {
+    comparison_literal = predicate.getRightOperand().getStaticValue().makeReferenceToThis();
+    literal_type = &predicate.getRightOperand().getType();
+  }
+  const bool same_types = literal_type->isSubsumedBy(attr_type);
+
+  // Find the bounds on the range of matching tuples.
+  tuple_id min_match = 0;
+  tuple_id max_match_bound = num_tuples;
+  ColumnStripeIterator begin_it(sort_attribute_stripe,
+                                attr_type.maximumByteLength(),
+                                0);
+  ColumnStripeIterator end_it(sort_attribute_stripe,
+                              attr_type.maximumByteLength(),
+                              num_tuples);
+
+  switch (predicate.getComparison().getComparisonID()) {
+    case ComparisonID::kEqual:
+    // Note: There is a special branch below for kNotEqual which takes the
+    // complement of the matched range.
+    case ComparisonID::kNotEqual: {
+      ColumnStripeIterator min_match_it;
+      if (same_types) {
+        min_match_it = GetBoundForUntypedValue<
+            ColumnStripeIterator,
+            LowerBoundFunctor>(attr_type,
+                               begin_it,
+                               end_it,
+                               comparison_literal.getDataPtr());
+        min_match = min_match_it.getTuplePosition();
+        max_match_bound = GetBoundForUntypedValue<
+            ColumnStripeIterator,
+            UpperBoundFunctor>(attr_type,
+                               min_match_it,
+                               end_it,
+                               comparison_literal.getDataPtr()).getTuplePosition();
+      } else {
+        min_match_it = GetBoundForDifferentTypedValue<
+            ColumnStripeIterator,
+            LowerBoundFunctor>(attr_type,
+                               begin_it,
+                               end_it,
+                               comparison_literal,
+                               *literal_type);
+        min_match = min_match_it.getTuplePosition();
+        max_match_bound = GetBoundForDifferentTypedValue<
+            ColumnStripeIterator,
+            UpperBoundFunctor>(attr_type,
+                               min_match_it,
+                               end_it,
+                               comparison_literal,
+                               *literal_type).getTuplePosition();
+      }
+      break;
+    }
+    case ComparisonID::kLess:
+      if (left_literal) {
+        if (same_types) {
+          min_match = GetBoundForUntypedValue<
+              ColumnStripeIterator,
+              UpperBoundFunctor>(attr_type,
+                                 begin_it,
+                                 end_it,
+                                 comparison_literal.getDataPtr()).getTuplePosition();
+        } else {
+          min_match = GetBoundForDifferentTypedValue<
+              ColumnStripeIterator,
+              UpperBoundFunctor>(attr_type,
+                                 begin_it,
+                                 end_it,
+                                 comparison_literal,
+                                 *literal_type).getTuplePosition();
+        }
+      } else {
+        if (same_types) {
+          max_match_bound = GetBoundForUntypedValue<
+              ColumnStripeIterator,
+              LowerBoundFunctor>(attr_type,
+                                 begin_it,
+                                 end_it,
+                                 comparison_literal.getDataPtr()).getTuplePosition();
+        } else {
+          max_match_bound = GetBoundForDifferentTypedValue<
+              ColumnStripeIterator,
+              LowerBoundFunctor>(attr_type,
+                                 begin_it,
+                                 end_it,
+                                 comparison_literal,
+                                 *literal_type).getTuplePosition();
+        }
+      }
+      break;
+    case ComparisonID::kLessOrEqual:
       if (left_literal) {
-        comparison_literal = predicate.getLeftOperand().getStaticValue().makeReferenceToThis();
-        literal_type = &predicate.getLeftOperand().getType();
+        if (same_types) {
+          min_match = GetBoundForUntypedValue<
+              ColumnStripeIterator,
+              LowerBoundFunctor>(attr_type,
+                                 begin_it,
+                                 end_it,
+                                 comparison_literal.getDataPtr()).getTuplePosition();
+        } else {
+          min_match = GetBoundForDifferentTypedValue<
+              ColumnStripeIterator,
+              LowerBoundFunctor>(attr_type,
+                                 begin_it,
+                                 end_it,
+                                 comparison_literal,
+                                 *literal_type).getTuplePosition();
+        }
       } else {
-        comparison_literal = predicate.getRightOperand().getStaticValue().makeReferenceToThis();
-        literal_type = &predicate.getRightOperand().getType();
+        if (same_types) {
+          max_match_bound = GetBoundForUntypedValue<
+              ColumnStripeIterator,
+              UpperBoundFunctor>(attr_type,
+                                 begin_it,
+                                 end_it,
+                                 comparison_literal.getDataPtr()).getTuplePosition();
+        } else {
+          max_match_bound = GetBoundForDifferentTypedValue<
+              ColumnStripeIterator,
+              UpperBoundFunctor>(attr_type,
+                                 begin_it,
+                                 end_it,
+                                 comparison_literal,
+                                 *literal_type).getTuplePosition();
+        }
       }
-      const bool same_types = literal_type->isSubsumedBy(attr_type);
-
-      // Find the bounds on the range of matching tuples.
-      tuple_id min_match = 0;
-      tuple_id max_match_bound = num_tuples;
-      ColumnStripeIterator begin_it(sort_attribute_stripe,
-                                    attr_type.maximumByteLength(),
-                                    0);
-      ColumnStripeIterator end_it(sort_attribute_stripe,
-                                  attr_type.maximumByteLength(),
-                                  num_tuples);
-
-      switch (predicate.getComparison().getComparisonID()) {
-        case ComparisonID::kEqual:
-        // Note: There is a special branch below for kNotEqual which takes the
-        // complement of the matched range.
-        case ComparisonID::kNotEqual: {
-          ColumnStripeIterator min_match_it;
-          if (same_types) {
-            min_match_it = GetBoundForUntypedValue<
-                ColumnStripeIterator,
-                LowerBoundFunctor>(attr_type,
-                                   begin_it,
-                                   end_it,
-                                   comparison_literal.getDataPtr());
-            min_match = min_match_it.getTuplePosition();
-            max_match_bound = GetBoundForUntypedValue<
-                ColumnStripeIterator,
-                UpperBoundFunctor>(attr_type,
-                                   min_match_it,
-                                   end_it,
-                                   comparison_literal.getDataPtr()).getTuplePosition();
-          } else {
-            min_match_it = GetBoundForDifferentTypedValue<
-                ColumnStripeIterator,
-                LowerBoundFunctor>(attr_type,
-                                   begin_it,
-                                   end_it,
-                                   comparison_literal,
-                                   *literal_type);
-            min_match = min_match_it.getTuplePosition();
-            max_match_bound = GetBoundForDifferentTypedValue<
-                ColumnStripeIterator,
-                UpperBoundFunctor>(attr_type,
-                                   min_match_it,
-                                   end_it,
-                                   comparison_literal,
-                                   *literal_type).getTuplePosition();
-          }
-          break;
+      break;
+    case ComparisonID::kGreater:
+      if (left_literal) {
+        if (same_types) {
+          max_match_bound = GetBoundForUntypedValue<
+              ColumnStripeIterator,
+              LowerBoundFunctor>(attr_type,
+                                 begin_it,
+                                 end_it,
+                                 comparison_literal.getDataPtr()).getTuplePosition();
+        } else {
+          max_match_bound = GetBoundForDifferentTypedValue<
+              ColumnStripeIterator,
+              LowerBoundFunctor>(attr_type,
+                                 begin_it,
+                                 end_it,
+                                 comparison_literal,
+                                 *literal_type).getTuplePosition();
+        }
+      } else {
+        if (same_types) {
+          min_match = GetBoundForUntypedValue<
+              ColumnStripeIterator,
+              UpperBoundFunctor>(attr_type,
+                                 begin_it,
+                                 end_it,
+                                 comparison_literal.getDataPtr()).getTuplePosition();
+        } else {
+          min_match = GetBoundForDifferentTypedValue<
+              ColumnStripeIterator,
+              UpperBoundFunctor>(attr_type,
+                                 begin_it,
+                                 end_it,
+                                 comparison_literal,
+                                 *literal_type).getTuplePosition();
         }
-        case ComparisonID::kLess:
-          if (left_literal) {
-            if (same_types) {
-              min_match = GetBoundForUntypedValue<
-                  ColumnStripeIterator,
-                  UpperBoundFunctor>(attr_type,
-                                     begin_it,
-                                     end_it,
-                                     comparison_literal.getDataPtr()).getTuplePosition();
-            } else {
-              min_match = GetBoundForDifferentTypedValue<
-                  ColumnStripeIterator,
-                  UpperBoundFunctor>(attr_type,
-                                     begin_it,
-                                     end_it,
-                                     comparison_literal,
-                                     *literal_type).getTuplePosition();
-            }
-          } else {
-            if (same_types) {
-              max_match_bound = GetBoundForUntypedValue<
-                  ColumnStripeIterator,
-                  LowerBoundFunctor>(attr_type,
-                                     begin_it,
-                                     end_it,
-                                     comparison_literal.getDataPtr()).getTuplePosition();
-            } else {
-              max_match_bound = GetBoundForDifferentTypedValue<
-                  ColumnStripeIterator,
-                  LowerBoundFunctor>(attr_type,
-                                     begin_it,
-                                     end_it,
-                                     comparison_literal,
-                                     *literal_type).getTuplePosition();
-            }
-          }
-          break;
-        case ComparisonID::kLessOrEqual:
-          if (left_literal) {
-            if (same_types) {
-              min_match = GetBoundForUntypedValue<
-                  ColumnStripeIterator,
-                  LowerBoundFunctor>(attr_type,
-                                     begin_it,
-                                     end_it,
-                                     comparison_literal.getDataPtr()).getTuplePosition();
-            } else {
-              min_match = GetBoundForDifferentTypedValue<
-                  ColumnStripeIterator,
-                  LowerBoundFunctor>(attr_type,
-                                     begin_it,
-                                     end_it,
-                                     comparison_literal,
-                                     *literal_type).getTuplePosition();
-            }
-          } else {
-            if (same_types) {
-              max_match_bound = GetBoundForUntypedValue<
-                  ColumnStripeIterator,
-                  UpperBoundFunctor>(attr_type,
-                                     begin_it,
-                                     end_it,
-                                     comparison_literal.getDataPtr()).getTuplePosition();
-            } else {
-              max_match_bound = GetBoundForDifferentTypedValue<
-                  ColumnStripeIterator,
-                  UpperBoundFunctor>(attr_type,
-                                     begin_it,
-                                     end_it,
-                                     comparison_literal,
-                                     *literal_type).getTuplePosition();
-            }
-          }
-          break;
-        case ComparisonID::kGreater:
-          if (left_literal) {
-            if (same_types) {
-              max_match_bound = GetBoundForUntypedValue<
-                  ColumnStripeIterator,
-                  LowerBoundFunctor>(attr_type,
-                                     begin_it,
-                                     end_it,
-                                     comparison_literal.getDataPtr()).getTuplePosition();
-            } else {
-              max_match_bound = GetBoundForDifferentTypedValue<
-                  ColumnStripeIterator,
-                  LowerBoundFunctor>(attr_type,
-                                     begin_it,
-                                     end_it,
-                                     comparison_literal,
-                                     *literal_type).getTuplePosition();
-            }
-          } else {
-            if (same_types) {
-              min_match = GetBoundForUntypedValue<
-                  ColumnStripeIterator,
-                  UpperBoundFunctor>(attr_type,
-                                     begin_it,
-                                     end_it,
-                                     comparison_literal.getDataPtr()).getTuplePosition();
-            } else {
-              min_match = GetBoundForDifferentTypedValue<
-                  ColumnStripeIterator,
-                  UpperBoundFunctor>(attr_type,
-                                     begin_it,
-                                     end_it,
-                                     comparison_literal,
-                                     *literal_type).getTuplePosition();
-            }
-          }
-          break;
-        case ComparisonID::kGreaterOrEqual:
-          if (left_literal) {
-            if (same_types) {
-              max_match_bound = GetBoundForUntypedValue<
-                  ColumnStripeIterator,
-                  UpperBoundFunctor>(attr_type,
-                                     begin_it,
-                                     end_it,
-                                     comparison_literal.getDataPtr()).getTuplePosition();
-            } else {
-              max_match_bound = GetBoundForDifferentTypedValue<
-                  ColumnStripeIterator,
-                  UpperBoundFunctor>(attr_type,
-                                     begin_it,
-                                     end_it,
-                                     comparison_literal,
-                                     *literal_type).getTuplePosition();
-            }
-          } else {
-            if (same_types) {
-              min_match = GetBoundForUntypedValue<
-                  ColumnStripeIterator,
-                  LowerBoundFunctor>(attr_type,
-                                     begin_it,
-                                     end_it,
-                                     comparison_literal.getDataPtr()).getTuplePosition();
-            } else {
-              min_match = GetBoundForDifferentTypedValue<
-                  ColumnStripeIterator,
-                  LowerBoundFunctor>(attr_type,
-                                     begin_it,
-                                     end_it,
-                                     comparison_literal,
-                                     *literal_type).getTuplePosition();
-            }
-          }
-          break;
-        default:
-          FATAL_ERROR("Unknown Comparison in SortColumnPredicateEvaluator::"
-                      "EvaluatePredicateForUncompressedSortColumn()");
       }
-
-      // Create and return the sequence of matches.
-      TupleIdSequence *matches = new TupleIdSequence(num_tuples);
-      if (predicate.getComparison().getComparisonID() == ComparisonID::kNotEqual) {
-        // Special case: return all tuples NOT in the range for kEqual.
-        matches->setRange(0, min_match, true);
-        matches->setRange(max_match_bound, num_tuples - max_match_bound, true);
+      break;
+    case ComparisonID::kGreaterOrEqual:
+      if (left_literal) {
+        if (same_types) {
+          max_match_bound = GetBoundForUntypedValue<
+              ColumnStripeIterator,
+              UpperBoundFunctor>(attr_type,
+                                 begin_it,
+                                 end_it,
+                                 comparison_literal.getDataPtr()).getTuplePosition();
+        } else {
+          max_match_bound = GetBoundForDifferentTypedValue<
+              ColumnStripeIterator,
+              UpperBoundFunctor>(attr_type,
+                                 begin_it,
+                                 end_it,
+                                 comparison_literal,
+                                 *literal_type).getTuplePosition();
+        }
       } else {
-        matches->setRange(min_match, max_match_bound - min_match, true);
+        if (same_types) {
+          min_match = GetBoundForUntypedValue<
+              ColumnStripeIterator,
+              LowerBoundFunctor>(attr_type,
+                                 begin_it,
+                                 end_it,
+                                 comparison_literal.getDataPtr()).getTuplePosition();
+        } else {
+          min_match = GetBoundForDifferentTypedValue<
+              ColumnStripeIterator,
+              LowerBoundFunctor>(attr_type,
+                                 begin_it,
+                                 end_it,
+                                 comparison_literal,
+                                 *literal_type).getTuplePosition();
+        }
       }
+      break;
+    default:
+      LOG(FATAL) << "Unknown Comparison in SortColumnPredicateEvaluator::"
+                 << "EvaluatePredicateForUncompressedSortColumn()";
+  }
 
-      return matches;
-    } else {
-      return NULL;
-    }
+  // Create and return the sequence of matches.
+  TupleIdSequence *matches = new TupleIdSequence(num_tuples);
+  if (predicate.getComparison().getComparisonID() == ComparisonID::kNotEqual) {
+    // Special case: return all tuples NOT in the range for kEqual.
+    matches->setRange(0, min_match, true);
+    matches->setRange(max_match_bound, num_tuples - max_match_bound, true);
   } else {
-    // Can not evaluate a non-comparison predicate, so pass through.
-    return NULL;
+    matches->setRange(min_match, max_match_bound - min_match, true);
   }
+
+  return matches;
 }
 
 }  // namespace column_store_util