You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@datasketches.apache.org by al...@apache.org on 2020/02/29 01:19:06 UTC

[incubator-datasketches-cpp] branch standard_operators created (now fe53cea)

This is an automated email from the ASF dual-hosted git repository.

alsay pushed a change to branch standard_operators
in repository https://gitbox.apache.org/repos/asf/incubator-datasketches-cpp.git.


      at fe53cea  use standard operators, some other minor cleanup

This branch includes the following new commits:

     new fe53cea  use standard operators, some other minor cleanup

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@datasketches.apache.org
For additional commands, e-mail: commits-help@datasketches.apache.org


[incubator-datasketches-cpp] 01/01: use standard operators, some other minor cleanup

Posted by al...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

alsay pushed a commit to branch standard_operators
in repository https://gitbox.apache.org/repos/asf/incubator-datasketches-cpp.git

commit fe53cea2c50691ed77e204372158005b3b4949c2
Author: AlexanderSaydakov <Al...@users.noreply.github.com>
AuthorDate: Fri Feb 28 17:18:51 2020 -0800

    use standard operators, some other minor cleanup
---
 cpc/include/cpc_compressor_impl.hpp          |  2 +-
 cpc/include/cpc_sketch_impl.hpp              | 30 +++++++++----------
 cpc/include/cpc_union_impl.hpp               | 28 +++++++++---------
 cpc/include/u32_table_impl.hpp               | 14 ++++-----
 fi/include/frequent_items_sketch_impl.hpp    |  2 +-
 fi/include/reverse_purge_hash_map_impl.hpp   |  6 +---
 hll/include/HllArray-internal.hpp            |  2 +-
 hll/include/HllUnion-internal.hpp            | 10 +++----
 kll/include/kll_helper_impl.hpp              |  4 +--
 kll/include/kll_quantile_calculator_impl.hpp |  2 +-
 kll/include/kll_sketch_impl.hpp              | 44 +++++++++++++---------------
 theta/include/binomial_bounds.hpp            |  9 ++----
 theta/include/theta_a_not_b_impl.hpp         |  8 ++---
 theta/include/theta_intersection_impl.hpp    |  4 +--
 theta/include/theta_sketch_impl.hpp          | 26 +++++++---------
 theta/include/theta_union_impl.hpp           |  2 +-
 16 files changed, 88 insertions(+), 105 deletions(-)

diff --git a/cpc/include/cpc_compressor_impl.hpp b/cpc/include/cpc_compressor_impl.hpp
index d5d928d..fdafd3f 100644
--- a/cpc/include/cpc_compressor_impl.hpp
+++ b/cpc/include/cpc_compressor_impl.hpp
@@ -438,7 +438,7 @@ uint8_t cpc_compressor<A>::determine_pseudo_phase(uint8_t lg_k, uint64_t c) {
     if (lg_k < 4) throw std::logic_error("lgK < 4");
     const size_t tmp = c >> (lg_k - 4);
     const uint8_t phase = tmp & 15;
-    if (phase < 0 or phase >= 16) throw std::out_of_range("wrong phase");
+    if (phase < 0 || phase >= 16) throw std::out_of_range("wrong phase");
     return phase;
   }
 }
diff --git a/cpc/include/cpc_sketch_impl.hpp b/cpc/include/cpc_sketch_impl.hpp
index db7250e..449d9c9 100644
--- a/cpc/include/cpc_sketch_impl.hpp
+++ b/cpc/include/cpc_sketch_impl.hpp
@@ -51,7 +51,7 @@ first_interesting_column(0),
 kxp(1 << lg_k),
 hip_est_accum(0)
 {
-  if (lg_k < CPC_MIN_LG_K or lg_k > CPC_MAX_LG_K) {
+  if (lg_k < CPC_MIN_LG_K || lg_k > CPC_MAX_LG_K) {
     throw std::invalid_argument("lg_k must be >= " + std::to_string(CPC_MIN_LG_K) + " and <= " + std::to_string(CPC_MAX_LG_K) + ": " + std::to_string(lg_k));
   }
 }
@@ -84,7 +84,7 @@ double cpc_sketch_alloc<A>::get_icon_estimate() const {
 
 template<typename A>
 double cpc_sketch_alloc<A>::get_lower_bound(unsigned kappa) const {
-  if (kappa < 1 or kappa > 3) {
+  if (kappa < 1 || kappa > 3) {
     throw std::invalid_argument("kappa must be 1, 2 or 3");
   }
   if (!was_merged) return get_hip_confidence_lb<A>(*this, kappa);
@@ -93,7 +93,7 @@ double cpc_sketch_alloc<A>::get_lower_bound(unsigned kappa) const {
 
 template<typename A>
 double cpc_sketch_alloc<A>::get_upper_bound(unsigned kappa) const {
-  if (kappa < 1 or kappa > 3) {
+  if (kappa < 1 || kappa > 3) {
     throw std::invalid_argument("kappa must be 1, 2 or 3");
   }
   if (!was_merged) return get_hip_confidence_ub<A>(*this, kappa);
@@ -249,7 +249,7 @@ void cpc_sketch_alloc<A>::update_windowed(uint32_t row_col) {
     const uint64_t c8post = static_cast<uint64_t>(num_coupons) << 3;
     if (c8post >= (27 + w8pre) * k) {
       move_window();
-      if (window_offset < 1 or window_offset > 56) throw std::logic_error("wrong window offset");
+      if (window_offset < 1 || window_offset > 56) throw std::logic_error("wrong window offset");
       const uint64_t w8post = static_cast<uint64_t>(window_offset) << 3;
       if (c8post >= (27 + w8post) * k) throw std::logic_error("c8pre is wrong"); // C < (K * 27/8) + (K * window_offset)
     }
@@ -271,7 +271,7 @@ template<typename A>
 void cpc_sketch_alloc<A>::promote_sparse_to_windowed() {
   const uint64_t k = 1 << lg_k;
   const uint64_t c32 = static_cast<uint64_t>(num_coupons) << 5;
-  if (!(c32 == 3 * k or (lg_k == 4 and c32 > 3 * k))) throw std::logic_error("wrong c32");
+  if (!(c32 == 3 * k || (lg_k == 4 && c32 > 3 * k))) throw std::logic_error("wrong c32");
 
   sliding_window.resize(k, 0); // zero the memory (because we will be OR'ing into it)
 
@@ -424,7 +424,7 @@ void cpc_sketch_alloc<A>::serialize(std::ostream& os) const {
   os.write((char*)&seed_hash, sizeof(seed_hash));
   if (!is_empty()) {
     os.write((char*)&num_coupons, sizeof(num_coupons));
-    if (has_table and has_window) {
+    if (has_table && has_window) {
       // if there is no window it is the same as number of coupons
       os.write((char*)&compressed.table_num_entries, sizeof(compressed.table_num_entries));
       // HIP values can be in two different places in the sequence of fields
@@ -438,7 +438,7 @@ void cpc_sketch_alloc<A>::serialize(std::ostream& os) const {
       os.write((char*)&compressed.window_data_words, sizeof(compressed.window_data_words));
     }
     // this is the second HIP decision point
-    if (has_hip and !(has_table and has_window)) write_hip(os);
+    if (has_hip && !(has_table && has_window)) write_hip(os);
     if (has_window) {
       os.write((char*)compressed.window_data.data(), compressed.window_data_words * sizeof(uint32_t));
     }
@@ -480,7 +480,7 @@ vector_u8<A> cpc_sketch_alloc<A>::serialize(unsigned header_size_bytes) const {
   ptr += copy_to_mem(&seed_hash, ptr, sizeof(seed_hash));
   if (!is_empty()) {
     ptr += copy_to_mem(&num_coupons, ptr, sizeof(num_coupons));
-    if (has_table and has_window) {
+    if (has_table && has_window) {
       // if there is no window it is the same as number of coupons
       ptr += copy_to_mem(&compressed.table_num_entries, ptr, sizeof(compressed.table_num_entries));
       // HIP values can be in two different places in the sequence of fields
@@ -494,7 +494,7 @@ vector_u8<A> cpc_sketch_alloc<A>::serialize(unsigned header_size_bytes) const {
       ptr += copy_to_mem(&compressed.window_data_words, ptr, sizeof(compressed.window_data_words));
     }
     // this is the second HIP decision point
-    if (has_hip and !(has_table and has_window)) ptr += copy_hip_to_mem(ptr);
+    if (has_hip && !(has_table && has_window)) ptr += copy_hip_to_mem(ptr);
     if (has_window) {
       ptr += copy_to_mem(compressed.window_data.data(), ptr, compressed.window_data_words * sizeof(uint32_t));
     }
@@ -532,9 +532,9 @@ cpc_sketch_alloc<A> cpc_sketch_alloc<A>::deserialize(std::istream& is, uint64_t
   uint32_t num_coupons = 0;
   double kxp = 0;
   double hip_est_accum = 0;
-  if (has_table or has_window) {
+  if (has_table || has_window) {
     is.read((char*)&num_coupons, sizeof(num_coupons));
-    if (has_table and has_window) {
+    if (has_table && has_window) {
       is.read((char*)&compressed.table_num_entries, sizeof(compressed.table_num_entries));
       if (has_hip) {
         is.read((char*)&kxp, sizeof(kxp));
@@ -547,7 +547,7 @@ cpc_sketch_alloc<A> cpc_sketch_alloc<A>::deserialize(std::istream& is, uint64_t
     if (has_window) {
       is.read((char*)&compressed.window_data_words, sizeof(compressed.window_data_words));
     }
-    if (has_hip and !(has_table and has_window)) {
+    if (has_hip && !(has_table && has_window)) {
       is.read((char*)&kxp, sizeof(kxp));
       is.read((char*)&hip_est_accum, sizeof(hip_est_accum));
     }
@@ -612,9 +612,9 @@ cpc_sketch_alloc<A> cpc_sketch_alloc<A>::deserialize(const void* bytes, size_t s
   uint32_t num_coupons = 0;
   double kxp = 0;
   double hip_est_accum = 0;
-  if (has_table or has_window) {
+  if (has_table || has_window) {
     ptr += copy_from_mem(ptr, &num_coupons, sizeof(num_coupons));
-    if (has_table and has_window) {
+    if (has_table && has_window) {
       ptr += copy_from_mem(ptr, &compressed.table_num_entries, sizeof(compressed.table_num_entries));
       if (has_hip) {
         ptr += copy_from_mem(ptr, &kxp, sizeof(kxp));
@@ -627,7 +627,7 @@ cpc_sketch_alloc<A> cpc_sketch_alloc<A>::deserialize(const void* bytes, size_t s
     if (has_window) {
       ptr += copy_from_mem(ptr, &compressed.window_data_words, sizeof(compressed.window_data_words));
     }
-    if (has_hip and !(has_table and has_window)) {
+    if (has_hip && !(has_table && has_window)) {
       ptr += copy_from_mem(ptr, &kxp, sizeof(kxp));
       ptr += copy_from_mem(ptr, &hip_est_accum, sizeof(hip_est_accum));
     }
diff --git a/cpc/include/cpc_union_impl.hpp b/cpc/include/cpc_union_impl.hpp
index bd390dc..7676919 100644
--- a/cpc/include/cpc_union_impl.hpp
+++ b/cpc/include/cpc_union_impl.hpp
@@ -29,7 +29,7 @@ seed(seed),
 accumulator(new (AllocCpc().allocate(1)) cpc_sketch_alloc<A>(lg_k, seed)),
 bit_matrix()
 {
-  if (lg_k < CPC_MIN_LG_K or lg_k > CPC_MAX_LG_K) {
+  if (lg_k < CPC_MIN_LG_K || lg_k > CPC_MAX_LG_K) {
     throw std::invalid_argument("lg_k must be >= " + std::to_string(CPC_MIN_LG_K) + " and <= " + std::to_string(CPC_MAX_LG_K) + ": " + std::to_string(lg_k));
   }
 }
@@ -108,16 +108,16 @@ void cpc_union_alloc<A>::internal_update(S&& sketch) {
   if (sketch.get_lg_k() < lg_k) reduce_k(sketch.get_lg_k());
   if (sketch.get_lg_k() < lg_k) throw std::logic_error("sketch lg_k < union lg_k");
 
-  if (accumulator == nullptr and bit_matrix.size() == 0) throw std::logic_error("both accumulator and bit matrix are absent");
+  if (accumulator == nullptr && bit_matrix.size() == 0) throw std::logic_error("both accumulator and bit matrix are absent");
 
-  if (cpc_sketch_alloc<A>::flavor::SPARSE == src_flavor and accumulator != nullptr)  { // Case A
+  if (cpc_sketch_alloc<A>::flavor::SPARSE == src_flavor && accumulator != nullptr)  { // Case A
     if (bit_matrix.size() > 0) throw std::logic_error("union bit_matrix is not expected");
     const auto initial_dest_flavor = accumulator->determine_flavor();
     if (cpc_sketch_alloc<A>::flavor::EMPTY != initial_dest_flavor and
         cpc_sketch_alloc<A>::flavor::SPARSE != initial_dest_flavor) throw std::logic_error("wrong flavor");
 
     // The following partially fixes the snowplow problem provided that the K's are equal.
-    if (cpc_sketch_alloc<A>::flavor::EMPTY == initial_dest_flavor and lg_k == sketch.get_lg_k()) {
+    if (cpc_sketch_alloc<A>::flavor::EMPTY == initial_dest_flavor && lg_k == sketch.get_lg_k()) {
       *accumulator = std::forward<S>(sketch);
       return;
     }
@@ -125,33 +125,33 @@ void cpc_union_alloc<A>::internal_update(S&& sketch) {
     walk_table_updating_sketch(sketch.surprising_value_table);
     const auto final_dst_flavor = accumulator->determine_flavor();
     // if the accumulator has graduated beyond sparse, switch to a bit matrix representation
-    if (final_dst_flavor != cpc_sketch_alloc<A>::flavor::EMPTY and final_dst_flavor != cpc_sketch_alloc<A>::flavor::SPARSE) {
+    if (final_dst_flavor != cpc_sketch_alloc<A>::flavor::EMPTY && final_dst_flavor != cpc_sketch_alloc<A>::flavor::SPARSE) {
       switch_to_bit_matrix();
     }
     return;
   }
 
-  if (cpc_sketch_alloc<A>::flavor::SPARSE == src_flavor and bit_matrix.size() > 0)  { // Case B
+  if (cpc_sketch_alloc<A>::flavor::SPARSE == src_flavor && bit_matrix.size() > 0)  { // Case B
     if (accumulator != nullptr) throw std::logic_error("union accumulator != null");
     or_table_into_matrix(sketch.surprising_value_table);
     return;
   }
 
-  if (cpc_sketch_alloc<A>::flavor::HYBRID != src_flavor and cpc_sketch_alloc<A>::flavor::PINNED != src_flavor
-      and cpc_sketch_alloc<A>::flavor::SLIDING != src_flavor) throw std::logic_error("wrong flavor");
+  if (cpc_sketch_alloc<A>::flavor::HYBRID != src_flavor && cpc_sketch_alloc<A>::flavor::PINNED != src_flavor
+      && cpc_sketch_alloc<A>::flavor::SLIDING != src_flavor) throw std::logic_error("wrong flavor");
 
   // source is past SPARSE mode, so make sure that dest is a bit matrix
   if (accumulator != nullptr) {
     if (bit_matrix.size() > 0) throw std::logic_error("union bit matrix is not expected");
     const auto dst_flavor = accumulator->determine_flavor();
-    if (cpc_sketch_alloc<A>::flavor::EMPTY != dst_flavor and cpc_sketch_alloc<A>::flavor::SPARSE != dst_flavor) {
+    if (cpc_sketch_alloc<A>::flavor::EMPTY != dst_flavor && cpc_sketch_alloc<A>::flavor::SPARSE != dst_flavor) {
       throw std::logic_error("wrong flavor");
     }
     switch_to_bit_matrix();
   }
   if (bit_matrix.size() == 0) throw std::logic_error("union bit_matrix is expected");
 
-  if (cpc_sketch_alloc<A>::flavor::HYBRID == src_flavor or cpc_sketch_alloc<A>::flavor::PINNED == src_flavor) { // Case C
+  if (cpc_sketch_alloc<A>::flavor::HYBRID == src_flavor || cpc_sketch_alloc<A>::flavor::PINNED == src_flavor) { // Case C
     or_window_into_matrix(sketch.sliding_window, sketch.window_offset, sketch.get_lg_k());
     or_table_into_matrix(sketch.surprising_value_table);
     return;
@@ -192,8 +192,8 @@ cpc_sketch_alloc<A> cpc_union_alloc<A>::get_result_from_bit_matrix() const {
   const uint64_t num_coupons = count_bits_set_in_matrix(bit_matrix.data(), k);
 
   const auto flavor = cpc_sketch_alloc<A>::determine_flavor(lg_k, num_coupons);
-  if (flavor != cpc_sketch_alloc<A>::flavor::HYBRID and flavor != cpc_sketch_alloc<A>::flavor::PINNED
-      and flavor != cpc_sketch_alloc<A>::flavor::SLIDING) throw std::logic_error("wrong flavor");
+  if (flavor != cpc_sketch_alloc<A>::flavor::HYBRID && flavor != cpc_sketch_alloc<A>::flavor::PINNED
+      && flavor != cpc_sketch_alloc<A>::flavor::SLIDING) throw std::logic_error("wrong flavor");
 
   const uint8_t offset = cpc_sketch_alloc<A>::determine_correct_offset(lg_k, num_coupons);
 
@@ -255,7 +255,7 @@ void cpc_union_alloc<A>::walk_table_updating_sketch(const u32_table<A>& table) {
   size_t stride = static_cast<size_t>(golden * static_cast<double>(num_slots));
   if (stride < 2) throw std::logic_error("stride < 2");
   if (stride == ((stride >> 1) << 1)) stride += 1; // force the stride to be odd
-  if (stride < 3 or stride >= num_slots) throw std::out_of_range("stride out of range");
+  if (stride < 3 || stride >= num_slots) throw std::out_of_range("stride out of range");
 
   for (size_t i = 0, j = 0; i < num_slots; i++, j += stride) {
     j &= num_slots - 1;
@@ -304,7 +304,7 @@ void cpc_union_alloc<A>::or_matrix_into_matrix(const vector_u64<A>& src_matrix,
 template<typename A>
 void cpc_union_alloc<A>::reduce_k(uint8_t new_lg_k) {
   if (new_lg_k >= lg_k) throw std::logic_error("new LgK >= union lgK");
-  if (accumulator == nullptr and bit_matrix.size() == 0) throw std::logic_error("both accumulator and bit_matrix are absent");
+  if (accumulator == nullptr && bit_matrix.size() == 0) throw std::logic_error("both accumulator and bit_matrix are absent");
 
   if (bit_matrix.size() > 0) { // downsample the unioner's bit matrix
     if (accumulator != nullptr) throw std::logic_error("accumulator is not null");
diff --git a/cpc/include/u32_table_impl.hpp b/cpc/include/u32_table_impl.hpp
index 3b49459..aa44ba2 100644
--- a/cpc/include/u32_table_impl.hpp
+++ b/cpc/include/u32_table_impl.hpp
@@ -44,7 +44,7 @@ num_items(0),
 slots(1 << lg_size, UINT32_MAX)
 {
   if (lg_size < 2) throw std::invalid_argument("lg_size must be >= 2");
-  if (num_valid_bits < 1 or num_valid_bits > 32) throw std::invalid_argument("num_valid_bits must be between 1 and 32");
+  if (num_valid_bits < 1 || num_valid_bits > 32) throw std::invalid_argument("num_valid_bits must be between 1 and 32");
 }
 
 template<typename A>
@@ -102,7 +102,7 @@ bool u32_table<A>::maybe_delete(uint32_t item) {
     fetched = slots[probe];
   }
   // shrink if necessary
-  if (U32_TABLE_DOWNSIZE_DENOM * num_items < U32_TABLE_DOWNSIZE_NUMER * (1 << lg_size) and lg_size > 2) {
+  if (U32_TABLE_DOWNSIZE_DENOM * num_items < U32_TABLE_DOWNSIZE_NUMER * (1 << lg_size) && lg_size > 2) {
     rebuild(lg_size - 1);
   }
   return true;
@@ -130,7 +130,7 @@ size_t u32_table<A>::lookup(uint32_t item) const {
   const uint8_t shift = num_valid_bits - lg_size;
   size_t probe = item >> shift;
   if (probe > mask) throw std::logic_error("probe out of range");
-  while (slots[probe] != item and slots[probe] != UINT32_MAX) {
+  while (slots[probe] != item && slots[probe] != UINT32_MAX) {
     probe = (probe + 1) & mask;
   }
   return probe;
@@ -178,7 +178,7 @@ vector_u32<A> u32_table<A>::unwrapping_get_items() const {
 
   // special rules for the region before the first empty slot
   uint32_t hi_bit = 1 << (num_valid_bits - 1);
-  while (i < table_size and slots[i] != UINT32_MAX) {
+  while (i < table_size && slots[i] != UINT32_MAX) {
     const uint32_t item = slots[i++];
     if (item & hi_bit) { result[r--] = item; } // this item was probably wrapped, so move to end
     else               { result[l++] = item; }
@@ -213,7 +213,7 @@ void u32_table<A>::merge(
     else if (arr_a[a] < arr_b[b]) { arr_c[c] = arr_a[a++]; }
     else                          { arr_c[c] = arr_b[b++]; }
   }
-  if (a != lim_a or b != lim_b) throw std::logic_error("merging error");
+  if (a != lim_a || b != lim_b) throw std::logic_error("merging error");
 }
 
 // In applications where the input array is already nearly sorted,
@@ -231,7 +231,7 @@ void u32_table<A>::introspective_insertion_sort(uint32_t* a, size_t l, size_t r)
   for (size_t i = l + 1; i < r; i++) {
     size_t j = i;
     uint32_t v = a[i];
-    while (j >= l + 1 and v < a[j - 1]) {
+    while (j >= l + 1 && v < a[j - 1]) {
       a[j] = a[j - 1];
       j--;
     }
@@ -252,7 +252,7 @@ void u32_table<A>::knuth_shell_sort3(uint32_t* a, size_t l, size_t r) {
     for (size_t i = l + h; i < r; i++) {
       size_t j = i;
       const uint32_t v = a[i];
-      while (j >= l + h and v < a[j - h]) {
+      while (j >= l + h && v < a[j - h]) {
         a[j] = a[j - h];
         j -= h;
       }
diff --git a/fi/include/frequent_items_sketch_impl.hpp b/fi/include/frequent_items_sketch_impl.hpp
index 5446bc8..6e2db10 100644
--- a/fi/include/frequent_items_sketch_impl.hpp
+++ b/fi/include/frequent_items_sketch_impl.hpp
@@ -160,7 +160,7 @@ frequent_items_sketch<T, H, E, S, A>::get_frequent_items(frequent_items_error_ty
   for (auto &it: map) {
     const uint64_t lb = it.second;
     const uint64_t ub = it.second + offset;
-    if ((err_type == NO_FALSE_NEGATIVES and ub > threshold) or (err_type == NO_FALSE_POSITIVES and lb > threshold)) {
+    if ((err_type == NO_FALSE_NEGATIVES && ub > threshold) || (err_type == NO_FALSE_POSITIVES && lb > threshold)) {
       items.push_back(row(&it.first, it.second, offset));
     }
   }
diff --git a/fi/include/reverse_purge_hash_map_impl.hpp b/fi/include/reverse_purge_hash_map_impl.hpp
index 4fd8af7..efc3b80 100644
--- a/fi/include/reverse_purge_hash_map_impl.hpp
+++ b/fi/include/reverse_purge_hash_map_impl.hpp
@@ -25,10 +25,6 @@
 #include <iterator>
 #include <cmath>
 
-#if defined(_MSC_VER)
-#include <iso646.h> // for and/or keywords
-#endif // _MSC_VER
-
 namespace datasketches {
 
 // clang++ seems to require this declaration for CMAKE_BUILD_TYPE='Debug"
@@ -178,7 +174,7 @@ template<typename T, typename H, typename E, typename A>
 typename reverse_purge_hash_map<T, H, E, A>::iterator reverse_purge_hash_map<T, H, E, A>::begin() const {
   const uint32_t size = 1 << lg_cur_size;
   uint32_t i = 0;
-  while (i < size and !is_active(i)) i++;
+  while (i < size && !is_active(i)) i++;
   return reverse_purge_hash_map<T, H, E, A>::iterator(this, i, 0);
 }
 
diff --git a/hll/include/HllArray-internal.hpp b/hll/include/HllArray-internal.hpp
index f99180c..52fc467 100644
--- a/hll/include/HllArray-internal.hpp
+++ b/hll/include/HllArray-internal.hpp
@@ -26,11 +26,11 @@
 #include "CubicInterpolation.hpp"
 #include "CompositeInterpolationXTable.hpp"
 #include "CouponList.hpp"
+#include "inv_pow2_table.hpp"
 #include <cstring>
 #include <cmath>
 #include <stdexcept>
 #include <string>
-#include "../../common/include/inv_pow2_table.hpp"
 
 namespace datasketches {
 
diff --git a/hll/include/HllUnion-internal.hpp b/hll/include/HllUnion-internal.hpp
index 829af1d..0d039f2 100644
--- a/hll/include/HllUnion-internal.hpp
+++ b/hll/include/HllUnion-internal.hpp
@@ -84,8 +84,8 @@ void hll_union_alloc<A>::update(const hll_sketch_alloc<A>& sketch) {
 template<typename A>
 void hll_union_alloc<A>::update(hll_sketch_alloc<A>&& sketch) {
   if (sketch.is_empty()) return;
-  if (gadget.is_empty() and sketch.get_target_type() == HLL_8 and sketch.get_lg_config_k() <= lg_max_k) {
-    if (sketch.get_current_mode() == HLL or sketch.get_lg_config_k() == lg_max_k) {
+  if (gadget.is_empty() && sketch.get_target_type() == HLL_8 && sketch.get_lg_config_k() <= lg_max_k) {
+    if (sketch.get_current_mode() == HLL || sketch.get_lg_config_k() == lg_max_k) {
       gadget = std::move(sketch);
     }
   }
@@ -312,8 +312,8 @@ template<typename A>
 void hll_union_alloc<A>::union_impl(const hll_sketch_alloc<A>& sketch, const int lg_max_k) {
   const HllSketchImpl<A>* src_impl = sketch.sketch_impl; //default
   HllSketchImpl<A>* dst_impl = gadget.sketch_impl; //default
-  if (src_impl->getCurMode() == LIST or src_impl->getCurMode() == SET) {
-    if (dst_impl->isEmpty() and src_impl->getLgConfigK() == dst_impl->getLgConfigK()) {
+  if (src_impl->getCurMode() == LIST || src_impl->getCurMode() == SET) {
+    if (dst_impl->isEmpty() && src_impl->getLgConfigK() == dst_impl->getLgConfigK()) {
       dst_impl = src_impl->copyAs(HLL_8);
       gadget.sketch_impl->get_deleter()(gadget.sketch_impl); // gadget replaced
     } else {
@@ -323,7 +323,7 @@ void hll_union_alloc<A>::union_impl(const hll_sketch_alloc<A>& sketch, const int
       }
     }
   } else if (!dst_impl->isEmpty()) { // src is HLL
-    if (dst_impl->getCurMode() == LIST or dst_impl->getCurMode() == SET) {
+    if (dst_impl->getCurMode() == LIST || dst_impl->getCurMode() == SET) {
       // swap so that src is LIST or SET, tgt is HLL
       // use lg_max_k because LIST has effective K of 2^26
       const CouponList<A>* src = static_cast<const CouponList<A>*>(dst_impl);
diff --git a/kll/include/kll_helper_impl.hpp b/kll/include/kll_helper_impl.hpp
index bc8435f..011a78d 100644
--- a/kll/include/kll_helper_impl.hpp
+++ b/kll/include/kll_helper_impl.hpp
@@ -223,7 +223,7 @@ kll_helper::compress_result kll_helper::general_compress(uint16_t k, uint8_t m,
     const auto raw_lim = in_levels[current_level + 1];
     const auto raw_pop = raw_lim - raw_beg;
 
-    if ((current_item_count < target_item_count) or (raw_pop < level_capacity(k, current_num_levels, current_level, m))) {
+    if ((current_item_count < target_item_count) || (raw_pop < level_capacity(k, current_num_levels, current_level, m))) {
       // move level over as is
       // make sure we are not moving data upwards
       if (raw_beg < out_levels[current_level]) throw std::logic_error("wrong move");
@@ -247,7 +247,7 @@ kll_helper::compress_result kll_helper::general_compress(uint16_t k, uint8_t m,
       }
 
       // level zero might not be sorted, so we must sort it if we wish to compact it
-      if ((current_level == 0) and !is_level_zero_sorted) {
+      if ((current_level == 0) && !is_level_zero_sorted) {
         std::sort(&items[adj_beg], &items[adj_beg + adj_pop], C());
       }
 
diff --git a/kll/include/kll_quantile_calculator_impl.hpp b/kll/include/kll_quantile_calculator_impl.hpp
index 16dc06f..2e3a422 100644
--- a/kll/include/kll_quantile_calculator_impl.hpp
+++ b/kll/include/kll_quantile_calculator_impl.hpp
@@ -165,7 +165,7 @@ void kll_quantile_calculator<T, C, A>::tandem_merge(const T* items_src, const ui
   auto i_src_2 = from_index_2;
   auto i_dst = from_index_1;
 
-  while ((i_src_1 < to_index_1) and (i_src_2 < to_index_2)) {
+  while ((i_src_1 < to_index_1) && (i_src_2 < to_index_2)) {
     if (C()(items_src[i_src_1], items_src[i_src_2])) {
       items_dst[i_dst] = std::move(items_src[i_src_1]);
       weights_dst[i_dst] = weights_src[i_src_1];
diff --git a/kll/include/kll_sketch_impl.hpp b/kll/include/kll_sketch_impl.hpp
index 9ead31a..8ddad1d 100644
--- a/kll/include/kll_sketch_impl.hpp
+++ b/kll/include/kll_sketch_impl.hpp
@@ -23,10 +23,6 @@
 #include <iostream>
 #include <iomanip>
 
-#if defined(_MSC_VER)
-#include <iso646.h> // for and/or keywords
-#endif // _MSC_VER
-
 #include "kll_helper.hpp"
 
 namespace datasketches {
@@ -46,7 +42,7 @@ min_value_(nullptr),
 max_value_(nullptr),
 is_level_zero_sorted_(false)
 {
-  if (k < MIN_K or k > MAX_K) {
+  if (k < MIN_K || k > MAX_K) {
     throw std::invalid_argument("K must be >= " + std::to_string(MIN_K) + " and <= " + std::to_string(MAX_K) + ": " + std::to_string(k));
   }
   levels_ = new (AllocU32().allocate(2)) uint32_t[2] {k_, k_};
@@ -269,7 +265,7 @@ T kll_sketch<T, C, S, A>::get_quantile(double fraction) const {
   if (is_empty()) return get_invalid_value();
   if (fraction == 0.0) return *min_value_;
   if (fraction == 1.0) return *max_value_;
-  if ((fraction < 0.0) or (fraction > 1.0)) {
+  if ((fraction < 0.0) || (fraction > 1.0)) {
     throw std::invalid_argument("Fraction cannot be less than zero or greater than 1.0");
   }
   // has side effect of sorting level zero if needed
@@ -285,7 +281,7 @@ std::vector<T, A> kll_sketch<T, C, S, A>::get_quantiles(const double* fractions,
   quantiles.reserve(size);
   for (uint32_t i = 0; i < size; i++) {
     const double fraction = fractions[i];
-    if ((fraction < 0.0) or (fraction > 1.0)) {
+    if ((fraction < 0.0) || (fraction > 1.0)) {
       throw std::invalid_argument("Fraction cannot be less than zero or greater than 1.0");
     }
     if      (fraction == 0.0) quantiles.push_back(*min_value_);
@@ -313,7 +309,7 @@ double kll_sketch<T, C, S, A>::get_rank(const T& value) const {
     for (uint32_t i = from_index; i < to_index; i++) {
       if (C()(items_[i], value)) {
         total += weight;
-      } else if ((level > 0) or is_level_zero_sorted_) {
+      } else if ((level > 0) || is_level_zero_sorted_) {
         break; // levels above 0 are sorted, no point comparing further
       }
     }
@@ -343,7 +339,7 @@ template<typename T, typename C, typename S, typename A>
 template<typename TT, typename std::enable_if<std::is_arithmetic<TT>::value, int>::type>
 size_t kll_sketch<T, C, S, A>::get_serialized_size_bytes() const {
   if (is_empty()) { return EMPTY_SIZE_BYTES; }
-  if (num_levels_ == 1 and get_num_retained() == 1) {
+  if (num_levels_ == 1 && get_num_retained() == 1) {
     return DATA_START_SINGLE_ITEM + sizeof(TT);
   }
   // the last integer in the levels_ array is not serialized because it can be derived
@@ -355,7 +351,7 @@ template<typename T, typename C, typename S, typename A>
 template<typename TT, typename std::enable_if<!std::is_arithmetic<TT>::value, int>::type>
 size_t kll_sketch<T, C, S, A>::get_serialized_size_bytes() const {
   if (is_empty()) { return EMPTY_SIZE_BYTES; }
-  if (num_levels_ == 1 and get_num_retained() == 1) {
+  if (num_levels_ == 1 && get_num_retained() == 1) {
     return DATA_START_SINGLE_ITEM + S().size_of_item(items_[levels_[0]]);
   }
   // the last integer in the levels_ array is not serialized because it can be derived
@@ -369,7 +365,7 @@ size_t kll_sketch<T, C, S, A>::get_serialized_size_bytes() const {
 template<typename T, typename C, typename S, typename A>
 void kll_sketch<T, C, S, A>::serialize(std::ostream& os) const {
   const bool is_single_item = n_ == 1;
-  const uint8_t preamble_ints(is_empty() or is_single_item ? PREAMBLE_INTS_SHORT : PREAMBLE_INTS_FULL);
+  const uint8_t preamble_ints(is_empty() || is_single_item ? PREAMBLE_INTS_SHORT : PREAMBLE_INTS_FULL);
   os.write((char*)&preamble_ints, sizeof(preamble_ints));
   const uint8_t serial_version(is_single_item ? SERIAL_VERSION_2 : SERIAL_VERSION_1);
   os.write((char*)&serial_version, sizeof(serial_version));
@@ -404,7 +400,7 @@ vector_u8<A> kll_sketch<T, C, S, A>::serialize(unsigned header_size_bytes) const
   const size_t size = header_size_bytes + get_serialized_size_bytes();
   vector_u8<A> bytes(size);
   uint8_t* ptr = bytes.data() + header_size_bytes;
-  const uint8_t preamble_ints(is_empty() or is_single_item ? PREAMBLE_INTS_SHORT : PREAMBLE_INTS_FULL);
+  const uint8_t preamble_ints(is_empty() || is_single_item ? PREAMBLE_INTS_SHORT : PREAMBLE_INTS_FULL);
   ptr += copy_to_mem(&preamble_ints, ptr, sizeof(preamble_ints));
   const uint8_t serial_version(is_single_item ? SERIAL_VERSION_2 : SERIAL_VERSION_1);
   ptr += copy_to_mem(&serial_version, ptr, sizeof(serial_version));
@@ -620,7 +616,7 @@ void kll_sketch<T, C, S, A>::compress_while_updating(void) {
 
   // level zero might not be sorted, so we must sort it if we wish to compact it
   // sort_level_zero() is not used here because of the adjustment for odd number of items
-  if ((level == 0) and !is_level_zero_sorted_) {
+  if ((level == 0) && !is_level_zero_sorted_) {
     std::sort(&items_[adj_beg], &items_[adj_beg + adj_pop], C());
   }
   if (pop_above == 0) {
@@ -732,7 +728,7 @@ vector_d<A> kll_sketch<T, C, S, A>::get_PMF_or_CDF(const T* split_points, uint32
   while (level < num_levels_) {
     const auto from_index = levels_[level];
     const auto to_index = levels_[level + 1]; // exclusive
-    if ((level == 0) and !is_level_zero_sorted_) {
+    if ((level == 0) && !is_level_zero_sorted_) {
       increment_buckets_unsorted_level(from_index, to_index, weight, split_points, size, buckets.data());
     } else {
       increment_buckets_sorted_level(from_index, to_index, weight, split_points, size, buckets.data());
@@ -776,7 +772,7 @@ void kll_sketch<T, C, S, A>::increment_buckets_sorted_level(uint32_t from_index,
 {
   uint32_t i = from_index;
   uint32_t j = 0;
-  while ((i <  to_index) and (j < size)) {
+  while ((i <  to_index) && (j < size)) {
     if (C()(items_[i], split_points[j])) {
       buckets[j] += weight; // this sample goes into this bucket
       i++; // move on to next sample and see whether it also goes into this bucket
@@ -850,11 +846,11 @@ void kll_sketch<T, C, S, A>::populate_work_arrays(const kll_sketch& other, T* wo
     const uint32_t other_pop = other.safe_level_size(lvl);
     worklevels[lvl + 1] = worklevels[lvl] + self_pop + other_pop;
 
-    if ((self_pop > 0) and (other_pop == 0)) {
+    if ((self_pop > 0) && (other_pop == 0)) {
       kll_helper::move_construct<T>(items_, levels_[lvl], levels_[lvl] + self_pop, workbuf, worklevels[lvl], true);
-    } else if ((self_pop == 0) and (other_pop > 0)) {
+    } else if ((self_pop == 0) && (other_pop > 0)) {
       kll_helper::copy_construct<T>(other.items_, other.levels_[lvl], other.levels_[lvl] + other_pop, workbuf, worklevels[lvl]);
-    } else if ((self_pop > 0) and (other_pop > 0)) {
+    } else if ((self_pop > 0) && (other_pop > 0)) {
       kll_helper::merge_sorted_arrays<T, C>(items_, levels_[lvl], self_pop, other.items_, other.levels_[lvl], other_pop, workbuf, worklevels[lvl]);
     }
   }
@@ -875,11 +871,11 @@ void kll_sketch<T, C, S, A>::populate_work_arrays(kll_sketch&& other, T* workbuf
     const uint32_t other_pop = other.safe_level_size(lvl);
     worklevels[lvl + 1] = worklevels[lvl] + self_pop + other_pop;
 
-    if ((self_pop > 0) and (other_pop == 0)) {
+    if ((self_pop > 0) && (other_pop == 0)) {
       kll_helper::move_construct<T>(items_, levels_[lvl], levels_[lvl] + self_pop, workbuf, worklevels[lvl], true);
-    } else if ((self_pop == 0) and (other_pop > 0)) {
+    } else if ((self_pop == 0) && (other_pop > 0)) {
       kll_helper::move_construct<T>(other.items_, other.levels_[lvl], other.levels_[lvl] + other_pop, workbuf, worklevels[lvl], false);
-    } else if ((self_pop > 0) and (other_pop > 0)) {
+    } else if ((self_pop > 0) && (other_pop > 0)) {
       kll_helper::merge_sorted_arrays<T, C>(items_, levels_[lvl], self_pop, other.items_, other.levels_[lvl], other_pop, workbuf, worklevels[lvl]);
     }
   }
@@ -917,7 +913,7 @@ template<typename T, typename C, typename S, typename A>
 void kll_sketch<T, C, S, A>::check_preamble_ints(uint8_t preamble_ints, uint8_t flags_byte) {
   const bool is_empty(flags_byte & (1 << flags::IS_EMPTY));
   const bool is_single_item(flags_byte & (1 << flags::IS_SINGLE_ITEM));
-  if (is_empty or is_single_item) {
+  if (is_empty || is_single_item) {
     if (preamble_ints != PREAMBLE_INTS_SHORT) {
       throw std::invalid_argument("Possible corruption: preamble ints must be "
           + std::to_string(PREAMBLE_INTS_SHORT) + " for an empty or single item sketch: " + std::to_string(preamble_ints));
@@ -932,7 +928,7 @@ void kll_sketch<T, C, S, A>::check_preamble_ints(uint8_t preamble_ints, uint8_t
 
 template<typename T, typename C, typename S, typename A>
 void kll_sketch<T, C, S, A>::check_serial_version(uint8_t serial_version) {
-  if (serial_version != SERIAL_VERSION_1 and serial_version != SERIAL_VERSION_2) {
+  if (serial_version != SERIAL_VERSION_1 && serial_version != SERIAL_VERSION_2) {
     throw std::invalid_argument("Possible corruption: serial version mismatch: expected "
         + std::to_string(SERIAL_VERSION_1) + " or " + std::to_string(SERIAL_VERSION_2)
         + ", got " + std::to_string(serial_version));
@@ -1024,7 +1020,7 @@ typename kll_sketch<T, C, S, A>::const_iterator& kll_sketch<T, C, S, A>::const_i
     do {
       ++level;
       weight *= 2;
-    } while (level < num_levels and levels[level] == levels[level + 1]);
+    } while (level < num_levels && levels[level] == levels[level + 1]);
   }
   return *this;
 }
diff --git a/theta/include/binomial_bounds.hpp b/theta/include/binomial_bounds.hpp
index 7ce0e15..0f0222a 100644
--- a/theta/include/binomial_bounds.hpp
+++ b/theta/include/binomial_bounds.hpp
@@ -23,11 +23,6 @@
 #include <algorithm>
 #include <cmath>
 
-#if defined(_MSC_VER)
-#include <iso646.h> // for and/or keywords
-#endif // _MSC_VER
-
-
 /*
  * This class enables the estimation of error bounds given a sample set size, the sampling
  * probability theta, the number of standard deviations and a simple noDataSeen flag. This can
@@ -445,13 +440,13 @@ private:
   }
 
   static void check_theta(double theta) {
-    if (theta < 0 or theta > 1) {
+    if (theta < 0 || theta > 1) {
       throw std::invalid_argument("theta must be in [0, 1]");
     }
   }
 
   static void check_num_std_devs(unsigned num_std_devs) {
-    if (num_std_devs < 1 or num_std_devs > 3) {
+    if (num_std_devs < 1 || num_std_devs > 3) {
       throw std::invalid_argument("num_std_devs must be 1, 2 or 3");
     }
   }
diff --git a/theta/include/theta_a_not_b_impl.hpp b/theta/include/theta_a_not_b_impl.hpp
index 80fe0dc..0cb3d40 100644
--- a/theta/include/theta_a_not_b_impl.hpp
+++ b/theta/include/theta_a_not_b_impl.hpp
@@ -53,15 +53,15 @@ compact_theta_sketch_alloc<A> theta_a_not_b_alloc<A>::compute(const theta_sketch
     keys_size = count;
     keys = AllocU64().allocate(keys_size);
     std::copy_if(a.begin(), a.end(), keys, [theta](uint64_t key) { return key < theta; });
-    if (ordered and !a.is_ordered()) std::sort(keys, &keys[keys_size]);
-    if (count == 0 and theta == theta_sketch_alloc<A>::MAX_THETA) is_empty = true;
+    if (ordered && !a.is_ordered()) std::sort(keys, &keys[keys_size]);
+    if (count == 0 && theta == theta_sketch_alloc<A>::MAX_THETA) is_empty = true;
     return compact_theta_sketch_alloc<A>(is_empty, theta, keys, count, seed_hash_, a.is_ordered() or ordered);
   }
 
   keys_size = a.get_num_retained();
   keys = AllocU64().allocate(keys_size);
 
-  if (a.is_ordered() and b.is_ordered()) { // sort-based
+  if (a.is_ordered() && b.is_ordered()) { // sort-based
     const auto end = std::set_difference(a.begin(), a.end(), b.begin(), b.end(), keys);
     count = end - keys;
   } else { // hash-based
@@ -97,7 +97,7 @@ compact_theta_sketch_alloc<A> theta_a_not_b_alloc<A>::compute(const theta_sketch
     std::copy(keys, &keys[count], keys_copy);
     AllocU64().deallocate(keys, keys_size);
     keys = keys_copy;
-    if (ordered and !a.is_ordered()) std::sort(keys, &keys[count]);
+    if (ordered && !a.is_ordered()) std::sort(keys, &keys[count]);
   }
 
   return compact_theta_sketch_alloc<A>(is_empty, theta, keys, count, seed_hash_, a.is_ordered() or ordered);
diff --git a/theta/include/theta_intersection_impl.hpp b/theta/include/theta_intersection_impl.hpp
index 07d8b9c..f6c66db 100644
--- a/theta/include/theta_intersection_impl.hpp
+++ b/theta/include/theta_intersection_impl.hpp
@@ -109,7 +109,7 @@ void theta_intersection_alloc<A>::update(const theta_sketch_alloc<A>& sketch) {
   if (sketch.get_seed_hash() != seed_hash_) throw std::invalid_argument("seed hash mismatch");
   is_empty_ |= sketch.is_empty();
   theta_ = std::min(theta_, sketch.get_theta64());
-  if (is_valid_ and num_keys_ == 0) return;
+  if (is_valid_ && num_keys_ == 0) return;
   if (sketch.get_num_retained() == 0) {
     is_valid_ = true;
     if (keys_ != nullptr) {
@@ -150,7 +150,7 @@ void theta_intersection_alloc<A>::update(const theta_sketch_alloc<A>& sketch) {
     }
     if (count > sketch.get_num_retained()) {
       throw std::invalid_argument(" more keys then expected, possibly corrupted input sketch");
-    } else if (!sketch.is_ordered() and count < sketch.get_num_retained()) {
+    } else if (!sketch.is_ordered() && count < sketch.get_num_retained()) {
       throw std::invalid_argument(" fewer keys then expected, possibly corrupted input sketch");
     }
     if (match_count == 0) {
diff --git a/theta/include/theta_sketch_impl.hpp b/theta/include/theta_sketch_impl.hpp
index 0956fcf..54f0ba7 100644
--- a/theta/include/theta_sketch_impl.hpp
+++ b/theta/include/theta_sketch_impl.hpp
@@ -96,7 +96,7 @@ double theta_sketch_alloc<A>::get_upper_bound(uint8_t num_std_devs) const {
 
 template<typename A>
 bool theta_sketch_alloc<A>::is_estimation_mode() const {
-  return theta_ < MAX_THETA and !is_empty_;
+  return theta_ < MAX_THETA && !is_empty_;
 }
 
 template<typename A>
@@ -589,7 +589,7 @@ compact_theta_sketch_alloc<A> update_theta_sketch_alloc<A>::compact(bool ordered
 template<typename A>
 void update_theta_sketch_alloc<A>::internal_update(uint64_t hash) {
   this->is_empty_ = false;
-  if (hash >= this->theta_ or hash == 0) return; // hash == 0 is reserved to mark empty slots in the table
+  if (hash >= this->theta_ || hash == 0) return; // hash == 0 is reserved to mark empty slots in the table
   if (hash_search_or_insert(hash, keys_, lg_cur_size_)) {
     num_keys_++;
     if (num_keys_ > capacity_) {
@@ -637,7 +637,7 @@ void update_theta_sketch_alloc<A>::rebuild() {
   std::fill(new_keys, &new_keys[cur_size], 0);
   num_keys_ = 0;
   for (uint32_t i = 0; i < cur_size; i++) {
-    if (keys_[i] != 0 and keys_[i] < this->theta_) {
+    if (keys_[i] != 0 && keys_[i] < this->theta_) {
       hash_search_or_insert(keys_[i], new_keys, lg_cur_size_); // TODO hash_insert
       num_keys_++;
     }
@@ -676,8 +676,6 @@ bool update_theta_sketch_alloc<A>::hash_search_or_insert(uint64_t hash, uint64_t
     }
     cur_probe = (cur_probe + stride) & mask;
   } while (cur_probe != loop_index);
-  //std::cerr << "hash_search_or_insert: lg=" << (int)lg_size << ", hash=" << hash << std::endl;
-  //std::cerr << "cur_probe=" << cur_probe << ", stride=" << stride << std::endl;
   throw std::logic_error("key not found and no empty slots!");
 }
 
@@ -696,8 +694,6 @@ bool update_theta_sketch_alloc<A>::hash_search(uint64_t hash, const uint64_t* ta
     }
     cur_probe = (cur_probe + stride) & mask;
   } while (cur_probe != loop_index);
-  //std::cerr << "hash_search: lg=" << (int)lg_size << ", hash=" << hash << std::endl;
-  //std::cerr << "cur_probe=" << cur_probe << ", stride=" << stride << std::endl;
   throw std::logic_error("key not found and search wrapped");
 }
 
@@ -739,10 +735,10 @@ theta_sketch_alloc<A>(other),
 keys_(AllocU64().allocate(other.get_num_retained())),
 num_keys_(other.get_num_retained()),
 seed_hash_(other.get_seed_hash()),
-is_ordered_(other.is_ordered() or ordered)
+is_ordered_(other.is_ordered() || ordered)
 {
   std::copy(other.begin(), other.end(), keys_);
-  if (ordered and !other.is_ordered()) std::sort(keys_, &keys_[num_keys_]);
+  if (ordered && !other.is_ordered()) std::sort(keys_, &keys_[num_keys_]);
 }
 
 template<typename A>
@@ -823,8 +819,8 @@ void compact_theta_sketch_alloc<A>::to_stream(std::ostream& os, bool print_items
 
 template<typename A>
 void compact_theta_sketch_alloc<A>::serialize(std::ostream& os) const {
-  const bool is_single_item = num_keys_ == 1 and !this->is_estimation_mode();
-  const uint8_t preamble_longs = this->is_empty() or is_single_item ? 1 : this->is_estimation_mode() ? 3 : 2;
+  const bool is_single_item = num_keys_ == 1 && !this->is_estimation_mode();
+  const uint8_t preamble_longs = this->is_empty() || is_single_item ? 1 : this->is_estimation_mode() ? 3 : 2;
   os.write((char*)&preamble_longs, sizeof(preamble_longs));
   const uint8_t serial_version = theta_sketch_alloc<A>::SERIAL_VERSION;
   os.write((char*)&serial_version, sizeof(serial_version));
@@ -856,8 +852,8 @@ void compact_theta_sketch_alloc<A>::serialize(std::ostream& os) const {
 
 template<typename A>
 vector_u8<A> compact_theta_sketch_alloc<A>::serialize(unsigned header_size_bytes) const {
-  const bool is_single_item = num_keys_ == 1 and !this->is_estimation_mode();
-  const uint8_t preamble_longs = this->is_empty() or is_single_item ? 1 : this->is_estimation_mode() ? 3 : 2;
+  const bool is_single_item = num_keys_ == 1 && !this->is_estimation_mode();
+  const uint8_t preamble_longs = this->is_empty() || is_single_item ? 1 : this->is_estimation_mode() ? 3 : 2;
   const size_t size = header_size_bytes + sizeof(uint64_t) * preamble_longs + sizeof(uint64_t) * num_keys_;
   vector_u8<A> bytes(size);
   uint8_t* ptr = bytes.data() + header_size_bytes;
@@ -1053,14 +1049,14 @@ update_theta_sketch_alloc<A> update_theta_sketch_alloc<A>::builder::build() cons
 template<typename A>
 theta_sketch_alloc<A>::const_iterator::const_iterator(const uint64_t* keys, uint32_t size, uint32_t index):
 keys_(keys), size_(size), index_(index) {
-  while (index_ < size_ and keys_[index_] == 0) ++index_;
+  while (index_ < size_ && keys_[index_] == 0) ++index_;
 }
 
 template<typename A>
 typename theta_sketch_alloc<A>::const_iterator& theta_sketch_alloc<A>::const_iterator::operator++() {
   do {
     ++index_;
-  } while (index_ < size_ and keys_[index_] == 0);
+  } while (index_ < size_ && keys_[index_] == 0);
   return *this;
 }
 
diff --git a/theta/include/theta_union_impl.hpp b/theta/include/theta_union_impl.hpp
index fcd1847..59c0266 100644
--- a/theta/include/theta_union_impl.hpp
+++ b/theta/include/theta_union_impl.hpp
@@ -53,7 +53,7 @@ template<typename A>
 compact_theta_sketch_alloc<A> theta_union_alloc<A>::get_result(bool ordered) const {
   if (is_empty_) return state_.compact(ordered);
   const uint32_t nom_num_keys = 1 << state_.lg_nom_size_;
-  if (theta_ >= state_.theta_ and state_.get_num_retained() <= nom_num_keys) return state_.compact(ordered);
+  if (theta_ >= state_.theta_ && state_.get_num_retained() <= nom_num_keys) return state_.compact(ordered);
   uint64_t theta = std::min(theta_, state_.get_theta64());
   typedef typename std::allocator_traits<A>::template rebind_alloc<uint64_t> AllocU64;
   uint64_t* keys = AllocU64().allocate(state_.get_num_retained());


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@datasketches.apache.org
For additional commands, e-mail: commits-help@datasketches.apache.org