You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@datasketches.apache.org by al...@apache.org on 2021/03/29 22:38:36 UTC

[datasketches-cpp] branch cleanup_warnings updated: better types to avoid warnings

This is an automated email from the ASF dual-hosted git repository.

alsay pushed a commit to branch cleanup_warnings
in repository https://gitbox.apache.org/repos/asf/datasketches-cpp.git


The following commit(s) were added to refs/heads/cleanup_warnings by this push:
     new 00a6c91  better types to avoid warnings
00a6c91 is described below

commit 00a6c9110482d80b6768f84bcacb3ca0d80b48d5
Author: Alexander Saydakov <al...@apache.org>
AuthorDate: Mon Mar 29 14:04:31 2021 -0700

    better types to avoid warnings
---
 common/include/MurmurHash3.h        |  12 +--
 common/include/serde.hpp            |   2 +-
 cpc/include/cpc_compressor.hpp      |  30 ++++----
 cpc/include/cpc_compressor_impl.hpp | 146 ++++++++++++++++++------------------
 cpc/include/cpc_sketch.hpp          |   2 +-
 cpc/include/cpc_sketch_impl.hpp     |  20 ++---
 cpc/include/cpc_union_impl.hpp      |  26 +++----
 cpc/include/cpc_util.hpp            |  14 ++--
 cpc/include/icon_estimator.hpp      |   6 +-
 cpc/include/u32_table.hpp           |  10 +--
 cpc/include/u32_table_impl.hpp      |   6 +-
 cpc/test/compression_test.cpp       |  20 ++---
 cpc/test/cpc_union_test.cpp         |   2 +-
 13 files changed, 149 insertions(+), 147 deletions(-)

diff --git a/common/include/MurmurHash3.h b/common/include/MurmurHash3.h
index c1cbeab..2ca72a6 100644
--- a/common/include/MurmurHash3.h
+++ b/common/include/MurmurHash3.h
@@ -76,7 +76,7 @@ typedef struct {
 // Block read - if your platform needs to do endian-swapping or can only
 // handle aligned reads, do the conversion here
 
-FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, int i )
+FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, size_t i )
 {
   return p[i];
 }
@@ -95,7 +95,7 @@ FORCE_INLINE uint64_t fmix64 ( uint64_t k )
   return k;
 }
 
-FORCE_INLINE void MurmurHash3_x64_128(const void* key, int lenBytes, uint64_t seed, HashState& out) {
+FORCE_INLINE void MurmurHash3_x64_128(const void* key, size_t lenBytes, uint64_t seed, HashState& out) {
   static const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
   static const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
 
@@ -106,13 +106,13 @@ FORCE_INLINE void MurmurHash3_x64_128(const void* key, int lenBytes, uint64_t se
 
   // Number of full 128-bit blocks of 16 bytes.
   // Possible exclusion of a remainder of up to 15 bytes.
-  const int nblocks = lenBytes >> 4; // bytes / 16 
+  const size_t nblocks = lenBytes >> 4; // bytes / 16 
 
   // Process the 128-bit blocks (the body) into the hash
   const uint64_t* blocks = (const uint64_t*)(data);
-  for (int i = 0; i < nblocks; ++i) { // 16 bytes per block
-    uint64_t k1 = getblock64(blocks,i*2+0);
-    uint64_t k2 = getblock64(blocks,i*2+1);
+  for (size_t i = 0; i < nblocks; ++i) { // 16 bytes per block
+    uint64_t k1 = getblock64(blocks, i * 2 + 0);
+    uint64_t k2 = getblock64(blocks, i * 2 + 1);
 
     k1 *= c1; k1  = ROTL64(k1,31); k1 *= c2; out.h1 ^= k1;
     out.h1 = ROTL64(out.h1,27);
diff --git a/common/include/serde.hpp b/common/include/serde.hpp
index 05a8a4e..9b3349b 100644
--- a/common/include/serde.hpp
+++ b/common/include/serde.hpp
@@ -121,7 +121,7 @@ struct serde<std::string> {
         std::string str;
         str.reserve(length);
         for (uint32_t j = 0; j < length; j++) {
-          str.push_back(is.get());
+          str.push_back(static_cast<char>(is.get()));
         }
         if (!is.good()) { break; }
         new (&items[i]) std::string(std::move(str));
diff --git a/cpc/include/cpc_compressor.hpp b/cpc/include/cpc_compressor.hpp
index de87457..a8f426f 100644
--- a/cpc/include/cpc_compressor.hpp
+++ b/cpc/include/cpc_compressor.hpp
@@ -48,44 +48,44 @@ template<typename A>
 class cpc_compressor {
 public:
   void compress(const cpc_sketch_alloc<A>& source, compressed_state<A>& target) const;
-  void uncompress(const compressed_state<A>& source, uncompressed_state<A>& target, uint8_t lg_k, uint64_t num_coupons) const;
+  void uncompress(const compressed_state<A>& source, uncompressed_state<A>& target, uint8_t lg_k, uint32_t num_coupons) const;
 
   // methods below are public for testing
 
   // This returns the number of compressed words that were actually used. It is the caller's
   // responsibility to ensure that the compressed_words array is long enough to prevent over-run.
-  size_t low_level_compress_bytes(
+  uint32_t low_level_compress_bytes(
       const uint8_t* byte_array, // input
-      size_t num_bytes_to_encode,
+      uint32_t num_bytes_to_encode,
       const uint16_t* encoding_table,
       uint32_t* compressed_words  // output
   ) const;
 
   void low_level_uncompress_bytes(
       uint8_t* byte_array, // output
-      size_t num_bytes_to_decode,
+      uint32_t num_bytes_to_decode,
       const uint16_t* decoding_table,
       const uint32_t* compressed_words,
-      size_t num_compressed_words // input
+      uint32_t num_compressed_words // input
   ) const;
 
   // Here "pairs" refers to row-column pairs that specify
   // the positions of surprising values in the bit matrix.
 
   // returns the number of compressedWords actually used
-  size_t low_level_compress_pairs(
+  uint32_t low_level_compress_pairs(
       const uint32_t* pair_array, // input
-      size_t num_pairs_to_encode,
+      uint32_t num_pairs_to_encode,
       uint8_t num_base_bits,
       uint32_t* compressed_words // output
   ) const;
 
   void low_level_uncompress_pairs(
       uint32_t* pair_array, // output
-      size_t num_pairs_to_decode,
+      uint32_t num_pairs_to_decode,
       uint8_t num_base_bits,
       const uint32_t* compressed_words, // input
-      size_t num_compressed_words // input
+      uint32_t num_compressed_words // input
   ) const;
 
 private:
@@ -122,19 +122,19 @@ private:
   void uncompress_pinned_flavor(const compressed_state<A>& source, uncompressed_state<A>& target, uint8_t lg_k, uint32_t num_coupons) const;
   void uncompress_sliding_flavor(const compressed_state<A>& source, uncompressed_state<A>& target, uint8_t lg_k, uint32_t num_coupons) const;
 
-  uint8_t* make_inverse_permutation(const uint8_t* permu, int length);
-  uint16_t* make_decoding_table(const uint16_t* encoding_table, int num_byte_values);
+  uint8_t* make_inverse_permutation(const uint8_t* permu, unsigned length);
+  uint16_t* make_decoding_table(const uint16_t* encoding_table, unsigned num_byte_values);
   void validate_decoding_table(const uint16_t* decoding_table, const uint16_t* encoding_table) const;
 
   void compress_surprising_values(const vector_u32<A>& pairs, uint8_t lg_k, compressed_state<A>& result) const;
   void compress_sliding_window(const uint8_t* window, uint8_t lg_k, uint32_t num_coupons, compressed_state<A>& target) const;
 
-  vector_u32<A> uncompress_surprising_values(const uint32_t* data, size_t data_words, size_t num_pairs, uint8_t lg_k, const A& allocator) const;
-  void uncompress_sliding_window(const uint32_t* data, size_t data_words, vector_u8<A>& window, uint8_t lg_k, uint32_t num_coupons) const;
+  vector_u32<A> uncompress_surprising_values(const uint32_t* data, uint32_t data_words, uint32_t num_pairs, uint8_t lg_k, const A& allocator) const;
+  void uncompress_sliding_window(const uint32_t* data, uint32_t data_words, vector_u8<A>& window, uint8_t lg_k, uint32_t num_coupons) const;
 
-  static size_t safe_length_for_compressed_pair_buf(uint32_t k, size_t num_pairs, uint8_t num_base_bits);
+  static size_t safe_length_for_compressed_pair_buf(uint32_t k, uint32_t num_pairs, uint8_t num_base_bits);
   static size_t safe_length_for_compressed_window_buf(uint32_t k);
-  static uint8_t determine_pseudo_phase(uint8_t lg_k, uint64_t c);
+  static uint8_t determine_pseudo_phase(uint8_t lg_k, uint32_t c);
 
   static inline vector_u32<A> tricky_get_pairs_from_window(const uint8_t* window, uint32_t k, uint32_t num_pairs_to_get, uint32_t empty_space, const A& allocator);
   static inline uint8_t golomb_choose_number_of_base_bits(uint32_t k, uint64_t count);
diff --git a/cpc/include/cpc_compressor_impl.hpp b/cpc/include/cpc_compressor_impl.hpp
index bb73a71..f163db8 100644
--- a/cpc/include/cpc_compressor_impl.hpp
+++ b/cpc/include/cpc_compressor_impl.hpp
@@ -49,12 +49,12 @@ cpc_compressor<A>::~cpc_compressor() {
 }
 
 template<typename A>
-uint8_t* cpc_compressor<A>::make_inverse_permutation(const uint8_t* permu, int length) {
+uint8_t* cpc_compressor<A>::make_inverse_permutation(const uint8_t* permu, unsigned length) {
   uint8_t* inverse = new uint8_t[length]; // use new for global initialization
-  for (int i = 0; i < length; i++) {
+  for (unsigned i = 0; i < length; i++) {
     inverse[permu[i]] = static_cast<uint8_t>(i);
   }
-  for (int i = 0; i < length; i++) {
+  for (unsigned i = 0; i < length; i++) {
     if (permu[inverse[i]] != i) throw std::logic_error("inverse permutation error");
   }
   return inverse;
@@ -64,17 +64,17 @@ uint8_t* cpc_compressor<A>::make_inverse_permutation(const uint8_t* permu, int l
    of length at most 12, this builds a size-4096 decoding table */
 // The second argument is typically 256, but can be other values such as 65.
 template<typename A>
-uint16_t* cpc_compressor<A>::make_decoding_table(const uint16_t* encoding_table, int num_byte_values) {
+uint16_t* cpc_compressor<A>::make_decoding_table(const uint16_t* encoding_table, unsigned num_byte_values) {
   uint16_t* decoding_table = new uint16_t[4096]; // use new for global initialization
-  for (int byte_value = 0; byte_value < num_byte_values; byte_value++) {
-    const int encoding_entry = encoding_table[byte_value];
-    const int code_value = encoding_entry & 0xfff;
-    const int code_length = encoding_entry >> 12;
-    const uint16_t decoding_entry = (code_length << 8) | byte_value;
-    const int garbage_length = 12 - code_length;
-    const int num_copies = 1 << garbage_length;
-    for (int garbage_bits = 0; garbage_bits < num_copies; garbage_bits++) {
-      const int extended_code_value = code_value | (garbage_bits << code_length);
+  for (unsigned byte_value = 0; byte_value < num_byte_values; byte_value++) {
+    const uint16_t encoding_entry = encoding_table[byte_value];
+    const uint16_t code_value = encoding_entry & 0xfff;
+    const uint8_t code_length = encoding_entry >> 12;
+    const uint16_t decoding_entry = static_cast<uint16_t>((code_length << 8) | byte_value);
+    const uint8_t garbage_length = 12 - code_length;
+    const unsigned num_copies = 1 << garbage_length;
+    for (unsigned garbage_bits = 0; garbage_bits < num_copies; garbage_bits++) {
+      const uint16_t extended_code_value = static_cast<uint16_t>(code_value | (garbage_bits << code_length));
       decoding_table[extended_code_value & 0xfff] = decoding_entry;
     }
   }
@@ -157,7 +157,7 @@ void cpc_compressor<A>::compress(const cpc_sketch_alloc<A>& source, compressed_s
 }
 
 template<typename A>
-void cpc_compressor<A>::uncompress(const compressed_state<A>& source, uncompressed_state<A>& target, uint8_t lg_k, uint64_t num_coupons) const {
+void cpc_compressor<A>::uncompress(const compressed_state<A>& source, uncompressed_state<A>& target, uint8_t lg_k, uint32_t num_coupons) const {
   switch (cpc_sketch_alloc<A>::determine_flavor(lg_k, num_coupons)) {
     case cpc_sketch_alloc<A>::flavor::EMPTY:
       target.table = u32_table<A>(2, 6 + lg_k, source.table_data.get_allocator());
@@ -204,14 +204,15 @@ void cpc_compressor<A>::compress_hybrid_flavor(const cpc_sketch_alloc<A>& source
   if (source.window_offset != 0) throw std::logic_error("window_offset != 0");
   const uint32_t k = 1 << source.get_lg_k();
   vector_u32<A> pairs_from_table = source.surprising_value_table.unwrapping_get_items();
-  if (pairs_from_table.size() > 0) u32_table<A>::introspective_insertion_sort(pairs_from_table.data(), 0, pairs_from_table.size());
-  const size_t num_pairs_from_window = source.get_num_coupons() - pairs_from_table.size(); // because the window offset is zero
+  const uint32_t num_pairs_from_table = static_cast<uint32_t>(pairs_from_table.size());
+  if (num_pairs_from_table > 0) u32_table<A>::introspective_insertion_sort(pairs_from_table.data(), 0, num_pairs_from_table);
+  const uint32_t num_pairs_from_window = source.get_num_coupons() - num_pairs_from_table; // because the window offset is zero
 
-  vector_u32<A> all_pairs = tricky_get_pairs_from_window(source.sliding_window.data(), k, num_pairs_from_window, pairs_from_table.size(), source.get_allocator());
+  vector_u32<A> all_pairs = tricky_get_pairs_from_window(source.sliding_window.data(), k, num_pairs_from_window, num_pairs_from_table, source.get_allocator());
 
   u32_table<A>::merge(
       pairs_from_table.data(), 0, pairs_from_table.size(),
-      all_pairs.data(), pairs_from_table.size(), num_pairs_from_window,
+      all_pairs.data(), num_pairs_from_table, num_pairs_from_window,
       all_pairs.data(), 0
   );  // note the overlapping subarray trick
 
@@ -230,13 +231,13 @@ void cpc_compressor<A>::uncompress_hybrid_flavor(const compressed_state<A>& sour
   // moving the "true" pairs to the bottom of the array.
   const uint32_t k = 1 << lg_k;
   target.window.resize(k, 0); // important: zero the memory
-  size_t next_true_pair = 0;
-  for (size_t i = 0; i < source.table_num_entries; i++) {
+  uint32_t next_true_pair = 0;
+  for (uint32_t i = 0; i < source.table_num_entries; i++) {
     const uint32_t row_col = pairs[i];
     if (row_col == UINT32_MAX) throw std::logic_error("empty marker is not expected");
     const uint8_t col = row_col & 63;
     if (col < 8) {
-      const size_t row = row_col >> 6;
+      const uint32_t row = row_col >> 6;
       target.window[row] |= 1 << col; // set the window bit
     } else {
       pairs[next_true_pair++] = row_col; // move true pair down
@@ -270,7 +271,7 @@ void cpc_compressor<A>::uncompress_pinned_flavor(const compressed_state<A>& sour
     uint8_t lg_k, uint32_t num_coupons) const {
   if (source.window_data.size() == 0) throw std::logic_error("window is expected");
   uncompress_sliding_window(source.window_data.data(), source.window_data_words, target.window, lg_k, num_coupons);
-  const size_t num_pairs = source.table_num_entries;
+  const uint32_t num_pairs = source.table_num_entries;
   if (num_pairs == 0) {
     target.table = u32_table<A>(2, 6 + lg_k, source.table_data.get_allocator());
   } else {
@@ -278,7 +279,7 @@ void cpc_compressor<A>::uncompress_pinned_flavor(const compressed_state<A>& sour
     vector_u32<A> pairs = uncompress_surprising_values(source.table_data.data(), source.table_data_words, num_pairs,
         lg_k, source.table_data.get_allocator());
     // undo the compressor's 8-column shift
-    for (size_t i = 0; i < num_pairs; i++) {
+    for (uint32_t i = 0; i < num_pairs; i++) {
       if ((pairs[i] & 63) >= 56) throw std::logic_error("(pairs[i] & 63) >= 56");
       pairs[i] += 8;
     }
@@ -302,7 +303,7 @@ void cpc_compressor<A>::compress_sliding_flavor(const cpc_sketch_alloc<A>& sourc
 
     for (size_t i = 0; i < pairs.size(); i++) {
       const uint32_t row_col = pairs[i];
-      const size_t row = row_col >> 6;
+      const uint32_t row = row_col >> 6;
       uint8_t col = row_col & 63;
       // first rotate the columns into a canonical configuration: new = ((old - (offset+8)) + 64) mod 64
       col = (col + 56 - offset) & 63;
@@ -322,7 +323,7 @@ void cpc_compressor<A>::uncompress_sliding_flavor(const compressed_state<A>& sou
     uint8_t lg_k, uint32_t num_coupons) const {
   if (source.window_data.size() == 0) throw std::logic_error("window is expected");
   uncompress_sliding_window(source.window_data.data(), source.window_data_words, target.window, lg_k, num_coupons);
-  const size_t num_pairs = source.table_num_entries;
+  const uint32_t num_pairs = source.table_num_entries;
   if (num_pairs == 0) {
     target.table = u32_table<A>(2, 6 + lg_k, source.table_data.get_allocator());
   } else {
@@ -337,9 +338,9 @@ void cpc_compressor<A>::uncompress_sliding_flavor(const compressed_state<A>& sou
     uint8_t offset = cpc_sketch_alloc<A>::determine_correct_offset(lg_k, num_coupons);
     if (offset > 56) throw std::out_of_range("offset out of range");
 
-    for (size_t i = 0; i < num_pairs; i++) {
+    for (uint32_t i = 0; i < num_pairs; i++) {
       const uint32_t row_col = pairs[i];
-      const size_t row = row_col >> 6;
+      const uint32_t row = row_col >> 6;
       uint8_t col = row_col & 63;
       // first undo the permutation
       col = permutation[col];
@@ -355,22 +356,23 @@ void cpc_compressor<A>::uncompress_sliding_flavor(const compressed_state<A>& sou
 template<typename A>
 void cpc_compressor<A>::compress_surprising_values(const vector_u32<A>& pairs, uint8_t lg_k, compressed_state<A>& result) const {
   const uint32_t k = 1 << lg_k;
-  const uint8_t num_base_bits = golomb_choose_number_of_base_bits(k + pairs.size(), pairs.size());
-  const uint64_t table_len = safe_length_for_compressed_pair_buf(k, pairs.size(), num_base_bits);
+  const uint32_t num_pairs = static_cast<uint32_t>(pairs.size());
+  const uint8_t num_base_bits = golomb_choose_number_of_base_bits(k + num_pairs, num_pairs);
+  const uint64_t table_len = safe_length_for_compressed_pair_buf(k, num_pairs, num_base_bits);
   result.table_data.resize(table_len);
 
-  size_t csv_length = low_level_compress_pairs(pairs.data(), pairs.size(), num_base_bits, result.table_data.data());
+  uint32_t csv_length = low_level_compress_pairs(pairs.data(), static_cast<uint32_t>(pairs.size()), num_base_bits, result.table_data.data());
 
   // At this point we could free the unused portion of the compression output buffer,
   // but it is not necessary if it is temporary
   // Note: realloc caused strange timing spikes for lgK = 11 and 12.
 
   result.table_data_words = csv_length;
-  result.table_num_entries = pairs.size();
+  result.table_num_entries = num_pairs;
 }
 
 template<typename A>
-vector_u32<A> cpc_compressor<A>::uncompress_surprising_values(const uint32_t* data, size_t data_words, size_t num_pairs,
+vector_u32<A> cpc_compressor<A>::uncompress_surprising_values(const uint32_t* data, uint32_t data_words, uint32_t num_pairs,
     uint8_t lg_k, const A& allocator) const {
   const uint32_t k = 1 << lg_k;
   vector_u32<A> pairs(num_pairs, 0, allocator);
@@ -391,11 +393,11 @@ void cpc_compressor<A>::compress_sliding_window(const uint8_t* window, uint8_t l
   // but it is not necessary if it is temporary
   // Note: realloc caused strange timing spikes for lgK = 11 and 12.
 
-  target.window_data_words = data_words;
+  target.window_data_words = static_cast<uint32_t>(data_words);
 }
 
 template<typename A>
-void cpc_compressor<A>::uncompress_sliding_window(const uint32_t* data, size_t data_words, vector_u8<A>& window,
+void cpc_compressor<A>::uncompress_sliding_window(const uint32_t* data, uint32_t data_words, vector_u8<A>& window,
     uint8_t lg_k, uint32_t num_coupons) const {
   const uint32_t k = 1 << lg_k;
   window.resize(k); // zeroing not needed here (unlike the Hybrid Flavor)
@@ -404,7 +406,7 @@ void cpc_compressor<A>::uncompress_sliding_window(const uint32_t* data, size_t d
 }
 
 template<typename A>
-size_t cpc_compressor<A>::safe_length_for_compressed_pair_buf(uint32_t k, size_t num_pairs, uint8_t num_base_bits) {
+size_t cpc_compressor<A>::safe_length_for_compressed_pair_buf(uint32_t k, uint32_t num_pairs, uint8_t num_base_bits) {
   // Long ybits = k + numPairs; // simpler and safer UB
   // The following tighter UB on ybits is based on page 198
   // of the textbook "Managing Gigabytes" by Witten, Moffat, and Bell.
@@ -428,7 +430,7 @@ size_t cpc_compressor<A>::safe_length_for_compressed_window_buf(uint32_t k) { //
 }
 
 template<typename A>
-uint8_t cpc_compressor<A>::determine_pseudo_phase(uint8_t lg_k, uint64_t c) {
+uint8_t cpc_compressor<A>::determine_pseudo_phase(uint8_t lg_k, uint32_t c) {
   const uint32_t k = 1 << lg_k;
   // This mid-range logic produces pseudo-phases. They are used to select encoding tables.
   // The thresholds were chosen by hand after looking at plots of measured compression.
@@ -450,7 +452,7 @@ uint8_t cpc_compressor<A>::determine_pseudo_phase(uint8_t lg_k, uint64_t c) {
   }
 }
 
-static inline void maybe_flush_bitbuf(uint64_t& bitbuf, uint8_t& bufbits, uint32_t* wordarr, size_t& wordindex) {
+static inline void maybe_flush_bitbuf(uint64_t& bitbuf, uint8_t& bufbits, uint32_t* wordarr, uint32_t& wordindex) {
   if (bufbits >= 32) {
     wordarr[wordindex++] = bitbuf & 0xffffffff;
     bitbuf = bitbuf >> 32;
@@ -458,7 +460,7 @@ static inline void maybe_flush_bitbuf(uint64_t& bitbuf, uint8_t& bufbits, uint32
   }
 }
 
-static inline void maybe_fill_bitbuf(uint64_t& bitbuf, uint8_t& bufbits, const uint32_t* wordarr, size_t& wordindex, uint8_t minbits) {
+static inline void maybe_fill_bitbuf(uint64_t& bitbuf, uint8_t& bufbits, const uint32_t* wordarr, uint32_t& wordindex, uint8_t minbits) {
   if (bufbits < minbits) {
     bitbuf |= static_cast<uint64_t>(wordarr[wordindex++]) << bufbits;
     bufbits += 32;
@@ -468,20 +470,20 @@ static inline void maybe_fill_bitbuf(uint64_t& bitbuf, uint8_t& bufbits, const u
 // This returns the number of compressed words that were actually used.
 // It is the caller's responsibility to ensure that the compressed_words array is long enough.
 template<typename A>
-size_t cpc_compressor<A>::low_level_compress_bytes(
+uint32_t cpc_compressor<A>::low_level_compress_bytes(
     const uint8_t* byte_array, // input
-    size_t num_bytes_to_encode,
+    uint32_t num_bytes_to_encode,
     const uint16_t* encoding_table,
     uint32_t* compressed_words // output
 ) const {
   uint64_t bitbuf = 0; // bits are packed into this first, then are flushed to compressed_words
   uint8_t bufbits = 0; // number of bits currently in bitbuf; must be between 0 and 31
-  size_t next_word_index = 0;
+  uint32_t next_word_index = 0;
 
-  for (size_t byte_index = 0; byte_index < num_bytes_to_encode; byte_index++) {
-    const uint64_t code_info = encoding_table[byte_array[byte_index]];
+  for (uint32_t byte_index = 0; byte_index < num_bytes_to_encode; byte_index++) {
+    const uint16_t code_info = encoding_table[byte_array[byte_index]];
     const uint64_t code_val = code_info & 0xfff;
-    const int code_len = code_info >> 12;
+    const uint8_t code_len = code_info >> 12;
     bitbuf |= (code_val << bufbits);
     bufbits += code_len;
     maybe_flush_bitbuf(bitbuf, bufbits, compressed_words, next_word_index);
@@ -502,12 +504,12 @@ size_t cpc_compressor<A>::low_level_compress_bytes(
 template<typename A>
 void cpc_compressor<A>::low_level_uncompress_bytes(
     uint8_t* byte_array, // output
-    size_t num_bytes_to_decode,
+    uint32_t num_bytes_to_decode,
     const uint16_t* decoding_table,
     const uint32_t* compressed_words, // input
-    size_t num_compressed_words
+    uint32_t num_compressed_words
 ) const {
-  size_t word_index = 0;
+  uint32_t word_index = 0;
   uint64_t bitbuf = 0;
   uint8_t bufbits = 0;
 
@@ -515,7 +517,7 @@ void cpc_compressor<A>::low_level_uncompress_bytes(
   if (decoding_table == nullptr) throw std::logic_error("decoding_table == NULL");
   if (compressed_words == nullptr) throw std::logic_error("compressed_words == NULL");
 
-  for (size_t byte_index = 0; byte_index < num_bytes_to_decode; byte_index++) {
+  for (uint32_t byte_index = 0; byte_index < num_bytes_to_decode; byte_index++) {
     maybe_fill_bitbuf(bitbuf, bufbits, compressed_words, word_index, 12); // ensure 12 bits in bit buffer
 
     const size_t peek12 = bitbuf & 0xfff; // These 12 bits will include an entire Huffman codeword.
@@ -533,14 +535,14 @@ void cpc_compressor<A>::low_level_uncompress_bytes(
 
 static inline uint64_t read_unary(
     const uint32_t* compressed_words,
-    size_t& next_word_index,
+    uint32_t& next_word_index,
     uint64_t& bitbuf,
     uint8_t& bufbits
 );
 
 static inline void write_unary(
     uint32_t* compressed_words,
-    size_t& next_word_index_ptr,
+    uint32_t& next_word_index_ptr,
     uint64_t& bit_buf_ptr,
     uint8_t& buf_bits_ptr,
     uint64_t value
@@ -551,36 +553,36 @@ static inline void write_unary(
 
 // returns the number of compressed_words actually used
 template<typename A>
-size_t cpc_compressor<A>::low_level_compress_pairs(
+uint32_t cpc_compressor<A>::low_level_compress_pairs(
     const uint32_t* pair_array,  // input
-    size_t num_pairs_to_encode,
+    uint32_t num_pairs_to_encode,
     uint8_t num_base_bits,
     uint32_t* compressed_words // output
 ) const {
   uint64_t bitbuf = 0;
   uint8_t bufbits = 0;
-  size_t next_word_index = 0;
+  uint32_t next_word_index = 0;
   const uint64_t golomb_lo_mask = (1 << num_base_bits) - 1;
-  uint64_t predicted_row_index = 0;
-  uint16_t predicted_col_index = 0;
+  uint32_t predicted_row_index = 0;
+  uint8_t predicted_col_index = 0;
 
-  for (size_t pair_index = 0; pair_index < num_pairs_to_encode; pair_index++) {
+  for (uint32_t pair_index = 0; pair_index < num_pairs_to_encode; pair_index++) {
     const uint32_t row_col = pair_array[pair_index];
-    const uint64_t row_index = row_col >> 6;
-    const uint16_t col_index = row_col & 63;
+    const uint32_t row_index = row_col >> 6;
+    const uint8_t col_index = row_col & 63;
 
     if (row_index != predicted_row_index) predicted_col_index = 0;
 
     if (row_index < predicted_row_index) throw std::logic_error("row_index < predicted_row_index");
     if (col_index < predicted_col_index) throw std::logic_error("col_index < predicted_col_index");
 
-    const uint64_t y_delta = row_index - predicted_row_index;
-    const uint16_t x_delta = col_index - predicted_col_index;
+    const uint32_t y_delta = row_index - predicted_row_index;
+    const uint8_t x_delta = col_index - predicted_col_index;
 
     predicted_row_index = row_index;
     predicted_col_index = col_index + 1;
 
-    const uint64_t code_info = length_limited_unary_encoding_table65[x_delta];
+    const uint16_t code_info = length_limited_unary_encoding_table65[x_delta];
     const uint64_t code_val = code_info & 0xfff;
     const uint8_t code_len = static_cast<uint8_t>(code_info >> 12);
     bitbuf |= code_val << bufbits;
@@ -614,29 +616,29 @@ size_t cpc_compressor<A>::low_level_compress_pairs(
 template<typename A>
 void cpc_compressor<A>::low_level_uncompress_pairs(
     uint32_t* pair_array, // output
-    size_t num_pairs_to_decode,
+    uint32_t num_pairs_to_decode,
     uint8_t num_base_bits,
     const uint32_t* compressed_words, // input
-    size_t num_compressed_words
+    uint32_t num_compressed_words
 ) const {
-  size_t word_index = 0;
+  uint32_t word_index = 0;
   uint64_t bitbuf = 0;
   uint8_t bufbits = 0;
   const uint64_t golomb_lo_mask = (1 << num_base_bits) - 1;
-  uint64_t predicted_row_index = 0;
-  uint16_t predicted_col_index = 0;
+  uint32_t predicted_row_index = 0;
+  uint8_t predicted_col_index = 0;
 
   // for each pair we need to read:
   // x_delta (12-bit length-limited unary)
   // y_delta_hi (unary)
   // y_delta_lo (basebits)
 
-  for (size_t pair_index = 0; pair_index < num_pairs_to_decode; pair_index++) {
+  for (uint32_t pair_index = 0; pair_index < num_pairs_to_decode; pair_index++) {
     maybe_fill_bitbuf(bitbuf, bufbits, compressed_words, word_index, 12); // ensure 12 bits in bit buffer
     const size_t peek12 = bitbuf & 0xfff;
     const uint16_t lookup = length_limited_unary_decoding_table65[peek12];
     const uint8_t code_word_length = lookup >> 8;
-    const int16_t x_delta = lookup & 0xff;
+    const int8_t x_delta = lookup & 0xff;
     bitbuf >>= code_word_length;
     bufbits -= code_word_length;
 
@@ -650,8 +652,8 @@ void cpc_compressor<A>::low_level_uncompress_pairs(
 
     // Now that we have x_delta and y_delta, we can compute the pair's row and column
     if (y_delta > 0) predicted_col_index = 0;
-    const uint64_t row_index = predicted_row_index + y_delta;
-    const uint16_t col_index = predicted_col_index + x_delta;
+    const uint32_t row_index = static_cast<uint32_t>(predicted_row_index + y_delta);
+    const uint8_t col_index = predicted_col_index + x_delta;
     const uint32_t row_col = (row_index << 6) | col_index;
     pair_array[pair_index] = row_col;
     predicted_row_index = row_index;
@@ -662,7 +664,7 @@ void cpc_compressor<A>::low_level_uncompress_pairs(
 
 uint64_t read_unary(
     const uint32_t* compressed_words,
-    size_t& next_word_index,
+    uint32_t& next_word_index,
     uint64_t& bitbuf,
     uint8_t& bufbits
 ) {
@@ -689,7 +691,7 @@ uint64_t read_unary(
 
 void write_unary(
     uint32_t* compressed_words,
-    size_t& next_word_index,
+    uint32_t& next_word_index,
     uint64_t& bitbuf,
     uint8_t& bufbits,
     uint64_t value
diff --git a/cpc/include/cpc_sketch.hpp b/cpc/include/cpc_sketch.hpp
index a4bf8f6..651c254 100644
--- a/cpc/include/cpc_sketch.hpp
+++ b/cpc/include/cpc_sketch.hpp
@@ -192,7 +192,7 @@ public:
    * @param data pointer to the data
    * @param length of the data in bytes
    */
-  void update(const void* value, int size);
+  void update(const void* value, size_t size);
 
   /**
    * Returns a human-readable summary of this sketch
diff --git a/cpc/include/cpc_sketch_impl.hpp b/cpc/include/cpc_sketch_impl.hpp
index d653b9f..1bb1be1 100644
--- a/cpc/include/cpc_sketch_impl.hpp
+++ b/cpc/include/cpc_sketch_impl.hpp
@@ -188,7 +188,7 @@ static inline uint32_t row_col_from_two_hashes(uint64_t hash0, uint64_t hash1, u
 }
 
 template<typename A>
-void cpc_sketch_alloc<A>::update(const void* value, int size) {
+void cpc_sketch_alloc<A>::update(const void* value, size_t size) {
   HashState hashes;
   MurmurHash3_x64_128(value, size, seed, hashes);
   row_col_update(row_col_from_two_hashes(hashes.h1, hashes.h2, lg_k));
@@ -285,16 +285,16 @@ void cpc_sketch_alloc<A>::promote_sparse_to_windowed() {
   u32_table<A> new_table(2, 6 + lg_k, sliding_window.get_allocator());
 
   const uint32_t* old_slots = surprising_value_table.get_slots();
-  const size_t old_num_slots = 1 << surprising_value_table.get_lg_size();
+  const uint32_t old_num_slots = 1 << surprising_value_table.get_lg_size();
 
   if (window_offset != 0) throw std::logic_error("window_offset != 0");
 
-  for (size_t i = 0; i < old_num_slots; i++) {
+  for (uint32_t i = 0; i < old_num_slots; i++) {
     const uint32_t row_col = old_slots[i];
     if (row_col != UINT32_MAX) {
       const uint8_t col = row_col & 63;
       if (col < 8) {
-        const size_t row = row_col >> 6;
+        const uint32_t row = row_col >> 6;
         sliding_window[row] |= 1 << col;
       } else {
         // cannot use u32_table::must_insert(), because it doesn't provide for growth
@@ -314,7 +314,7 @@ void cpc_sketch_alloc<A>::move_window() {
   if (new_offset != determine_correct_offset(lg_k, num_coupons)) throw std::logic_error("new_offset is wrong");
 
   if (sliding_window.size() == 0) throw std::logic_error("no sliding window");
-  const uint64_t k = 1 << lg_k;
+  const uint32_t k = 1 << lg_k;
 
   // Construct the full-sized bit matrix that corresponds to the sketch
   vector_u64<A> bit_matrix = build_bit_matrix();
@@ -328,7 +328,7 @@ void cpc_sketch_alloc<A>::move_window() {
   const uint64_t mask_for_flipping_early_zone = (static_cast<uint64_t>(1) << new_offset) - 1;
   uint64_t all_surprises_ored = 0;
 
-  for (size_t i = 0; i < k; i++) {
+  for (uint32_t i = 0; i < k; i++) {
     uint64_t pattern = bit_matrix[i];
     sliding_window[i] = (pattern >> new_offset) & 0xff;
     pattern &= mask_for_clearing_window;
@@ -689,7 +689,7 @@ uint32_t cpc_sketch_alloc<A>::get_num_coupons() const {
 template<typename A>
 bool cpc_sketch_alloc<A>::validate() const {
   vector_u64<A> bit_matrix = build_bit_matrix();
-  const uint64_t num_bits_set = count_bits_set_in_matrix(bit_matrix.data(), 1 << lg_k);
+  const uint64_t num_bits_set = count_bits_set_in_matrix(bit_matrix.data(), 1ULL << lg_k);
   return num_bits_set == num_coupons;
 }
 
@@ -753,7 +753,7 @@ uint8_t cpc_sketch_alloc<A>::determine_correct_offset(uint8_t lg_k, uint64_t c)
   const uint32_t k = 1 << lg_k;
   const int64_t tmp = static_cast<int64_t>(c << 3) - static_cast<int64_t>(19 * k); // 8C - 19K
   if (tmp < 0) return 0;
-  return tmp >> (lg_k + 3); // tmp / 8K
+  return static_cast<uint8_t>(tmp >> (lg_k + 3)); // tmp / 8K
 }
 
 template<typename A>
@@ -775,12 +775,12 @@ vector_u64<A> cpc_sketch_alloc<A>::build_bit_matrix() const {
   }
 
   const uint32_t* slots = surprising_value_table.get_slots();
-  const size_t num_slots = 1 << surprising_value_table.get_lg_size();
+  const uint32_t num_slots = 1 << surprising_value_table.get_lg_size();
   for (size_t i = 0; i < num_slots; i++) {
     const uint32_t row_col = slots[i];
     if (row_col != UINT32_MAX) {
       const uint8_t col = row_col & 63;
-      const size_t row = row_col >> 6;
+      const uint32_t row = row_col >> 6;
       // Flip the specified matrix bit from its default value.
       // In the "early" zone the bit changes from 1 to 0.
       // In the "late" zone the bit changes from 0 to 1.
diff --git a/cpc/include/cpc_union_impl.hpp b/cpc/include/cpc_union_impl.hpp
index 3728bd0..e5a1e5d 100644
--- a/cpc/include/cpc_union_impl.hpp
+++ b/cpc/include/cpc_union_impl.hpp
@@ -192,7 +192,7 @@ cpc_sketch_alloc<A> cpc_union_alloc<A>::get_result_from_accumulator() const {
 template<typename A>
 cpc_sketch_alloc<A> cpc_union_alloc<A>::get_result_from_bit_matrix() const {
   const uint32_t k = 1 << lg_k;
-  const uint64_t num_coupons = count_bits_set_in_matrix(bit_matrix.data(), k);
+  const uint32_t num_coupons = count_bits_set_in_matrix(bit_matrix.data(), k);
 
   const auto flavor = cpc_sketch_alloc<A>::determine_flavor(lg_k, num_coupons);
   if (flavor != cpc_sketch_alloc<A>::flavor::HYBRID && flavor != cpc_sketch_alloc<A>::flavor::PINNED
@@ -215,7 +215,7 @@ cpc_sketch_alloc<A> cpc_union_alloc<A>::get_result_from_bit_matrix() const {
 
   // The snowplow effect was caused by processing the rows in order,
   // but we have fixed it by using a sufficiently large hash table.
-  for (unsigned i = 0; i < k; i++) {
+  for (uint32_t i = 0; i < k; i++) {
     uint64_t pattern = bit_matrix[i];
     sliding_window[i] = (pattern >> offset) & 0xff;
     pattern &= mask_for_clearing_window;
@@ -250,17 +250,17 @@ void cpc_union_alloc<A>::switch_to_bit_matrix() {
 template<typename A>
 void cpc_union_alloc<A>::walk_table_updating_sketch(const u32_table<A>& table) {
   const uint32_t* slots = table.get_slots();
-  const size_t num_slots = 1 << table.get_lg_size();
+  const uint32_t num_slots = 1 << table.get_lg_size();
   const uint64_t dst_mask = (((1 << accumulator->get_lg_k()) - 1) << 6) | 63; // downsamples when dst lgK < src LgK
 
   // Using a golden ratio stride fixes the snowplow effect.
   const double golden = 0.6180339887498949025;
-  size_t stride = static_cast<size_t>(golden * static_cast<double>(num_slots));
+  uint32_t stride = static_cast<uint32_t>(golden * static_cast<double>(num_slots));
   if (stride < 2) throw std::logic_error("stride < 2");
   if (stride == ((stride >> 1) << 1)) stride += 1; // force the stride to be odd
   if (stride < 3 || stride >= num_slots) throw std::out_of_range("stride out of range");
 
-  for (size_t i = 0, j = 0; i < num_slots; i++, j += stride) {
+  for (uint32_t i = 0, j = 0; i < num_slots; i++, j += stride) {
     j &= num_slots - 1;
     const uint32_t row_col = slots[j];
     if (row_col != UINT32_MAX) {
@@ -272,13 +272,13 @@ void cpc_union_alloc<A>::walk_table_updating_sketch(const u32_table<A>& table) {
 template<typename A>
 void cpc_union_alloc<A>::or_table_into_matrix(const u32_table<A>& table) {
   const uint32_t* slots = table.get_slots();
-  const size_t num_slots = 1 << table.get_lg_size();
+  const uint32_t num_slots = 1 << table.get_lg_size();
   const uint64_t dest_mask = (1 << lg_k) - 1;  // downsamples when dst lgK < sr LgK
-  for (size_t i = 0; i < num_slots; i++) {
+  for (uint32_t i = 0; i < num_slots; i++) {
     const uint32_t row_col = slots[i];
     if (row_col != UINT32_MAX) {
       const uint8_t col = row_col & 63;
-      const size_t row = row_col >> 6;
+      const uint32_t row = row_col >> 6;
       bit_matrix[row & dest_mask] |= static_cast<uint64_t>(1) << col; // set the bit
     }
   }
@@ -288,8 +288,8 @@ template<typename A>
 void cpc_union_alloc<A>::or_window_into_matrix(const vector_u8<A>& sliding_window, uint8_t offset, uint8_t src_lg_k) {
   if (lg_k > src_lg_k) throw std::logic_error("dst LgK > src LgK");
   const uint64_t dst_mask = (1 << lg_k) - 1; // downsamples when dst lgK < src LgK
-  const size_t src_k = 1 << src_lg_k;
-  for (size_t src_row = 0; src_row < src_k; src_row++) {
+  const uint32_t src_k = 1 << src_lg_k;
+  for (uint32_t src_row = 0; src_row < src_k; src_row++) {
     bit_matrix[src_row & dst_mask] |= static_cast<uint64_t>(sliding_window[src_row]) << offset;
   }
 }
@@ -298,8 +298,8 @@ template<typename A>
 void cpc_union_alloc<A>::or_matrix_into_matrix(const vector_u64<A>& src_matrix, uint8_t src_lg_k) {
   if (lg_k > src_lg_k) throw std::logic_error("dst LgK > src LgK");
   const uint64_t dst_mask = (1 << lg_k) - 1; // downsamples when dst lgK < src LgK
-  const size_t src_k = 1 << src_lg_k;
-  for (size_t src_row = 0; src_row < src_k; src_row++) {
+  const uint32_t src_k = 1 << src_lg_k;
+  for (uint32_t src_row = 0; src_row < src_k; src_row++) {
     bit_matrix[src_row & dst_mask] |= src_matrix[src_row];
   }
 }
@@ -313,7 +313,7 @@ void cpc_union_alloc<A>::reduce_k(uint8_t new_lg_k) {
     if (accumulator != nullptr) throw std::logic_error("accumulator is not null");
     vector_u64<A> old_matrix = std::move(bit_matrix);
     const uint8_t old_lg_k = lg_k;
-    const size_t new_k = 1 << new_lg_k;
+    const uint32_t new_k = 1 << new_lg_k;
     bit_matrix = vector_u64<A>(new_k, 0, old_matrix.get_allocator());
     lg_k = new_lg_k;
     or_matrix_into_matrix(old_matrix, old_lg_k);
diff --git a/cpc/include/cpc_util.hpp b/cpc/include/cpc_util.hpp
index 12cdb82..9bf8aa3 100644
--- a/cpc/include/cpc_util.hpp
+++ b/cpc/include/cpc_util.hpp
@@ -69,7 +69,7 @@ static inline uint64_t wegner_count_bits_set_in_matrix(const uint64_t* array, si
 // Note: this is an adaptation of the Java code,
 // which is apparently a variation of Figure 5-2 in "Hacker's Delight"
 // by Henry S. Warren.
-static inline uint64_t warren_bit_count(uint64_t i) {
+static inline uint32_t warren_bit_count(uint64_t i) {
   i = i - ((i >> 1) & 0x5555555555555555ULL);
   i = (i & 0x3333333333333333ULL) + ((i >> 2) & 0x3333333333333333ULL);
   i = (i + (i >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
@@ -79,9 +79,9 @@ static inline uint64_t warren_bit_count(uint64_t i) {
   return i & 0x7f;
 }
 
-static inline uint64_t warren_count_bits_set_in_matrix(const uint64_t* array, size_t length) {
-  uint64_t count = 0;
-  for (size_t i = 0; i < length; i++) {
+static inline uint32_t warren_count_bits_set_in_matrix(const uint64_t* array, uint32_t length) {
+  uint32_t count = 0;
+  for (uint32_t i = 0; i < length; i++) {
     count += warren_bit_count(array[i]);
   }
   return count;
@@ -91,13 +91,13 @@ static inline uint64_t warren_count_bits_set_in_matrix(const uint64_t* array, si
 
 #define CSA(h,l,a,b,c) {uint64_t u = a ^ b; uint64_t v = c; h = (a & b) | (u & v); l = u ^ v;}
 
-static inline uint64_t count_bits_set_in_matrix(const uint64_t* a, size_t length) {
+static inline uint32_t count_bits_set_in_matrix(const uint64_t* a, uint32_t length) {
   if ((length & 0x7) != 0) throw std::invalid_argument("the length of the array must be a multiple of 8");
-  uint64_t total = 0;
+  uint32_t total = 0;
   uint64_t ones, twos, twos_a, twos_b, fours, fours_a, fours_b, eights;
   fours = twos = ones = 0;
 
-  for (size_t i = 0; i <= length - 8; i = i + 8) {
+  for (uint32_t i = 0; i <= length - 8; i += 8) {
     CSA(twos_a, ones, ones, a[i+0], a[i+1]);
     CSA(twos_b, ones, ones, a[i+2], a[i+3]);
     CSA(fours_a, twos, twos, twos_a, twos_b);
diff --git a/cpc/include/icon_estimator.hpp b/cpc/include/icon_estimator.hpp
index 60b0099..fb3c0c6 100644
--- a/cpc/include/icon_estimator.hpp
+++ b/cpc/include/icon_estimator.hpp
@@ -245,12 +245,12 @@ static inline double icon_exponential_approximation(double k, double c) {
   return (0.7940236163830469 * k * pow(2.0, c / k));
 }
 
-static inline double compute_icon_estimate(uint8_t lg_k, uint64_t c) {
+static inline double compute_icon_estimate(uint8_t lg_k, uint32_t c) {
   if (lg_k < ICON_MIN_LOG_K || lg_k > ICON_MAX_LOG_K) throw std::out_of_range("lg_k out of range");
   if (c < 2) return ((c == 0) ? 0.0 : 1.0);
   const uint32_t k = 1 << lg_k;
-  const double double_k = k;
-  const double double_c = c;
+  const double double_k = static_cast<double>(k);
+  const double double_c = static_cast<double>(c);
   // Differing thresholds ensure that the approximated estimator is monotonically increasing.
   const double threshold_factor = ((lg_k < 14) ? 5.7 : 5.6);
   if (double_c > (threshold_factor * double_k)) return icon_exponential_approximation(double_k, double_c);
diff --git a/cpc/include/u32_table.hpp b/cpc/include/u32_table.hpp
index a000c39..a344a17 100644
--- a/cpc/include/u32_table.hpp
+++ b/cpc/include/u32_table.hpp
@@ -29,11 +29,11 @@
 
 namespace datasketches {
 
-static const uint64_t U32_TABLE_UPSIZE_NUMER = 3LL;
-static const uint64_t U32_TABLE_UPSIZE_DENOM = 4LL;
+static const uint32_t U32_TABLE_UPSIZE_NUMER = 3LL;
+static const uint32_t U32_TABLE_UPSIZE_DENOM = 4LL;
 
-static const uint64_t U32_TABLE_DOWNSIZE_NUMER = 1LL;
-static const uint64_t U32_TABLE_DOWNSIZE_DENOM = 4LL;
+static const uint32_t U32_TABLE_DOWNSIZE_NUMER = 1LL;
+static const uint32_t U32_TABLE_DOWNSIZE_DENOM = 4LL;
 
 template<typename A>
 class u32_table {
@@ -52,7 +52,7 @@ public:
   // returns true iff the item was present and was therefore removed from the table
   inline bool maybe_delete(uint32_t item);
 
-  static u32_table make_from_pairs(const uint32_t* pairs, size_t num_pairs, uint8_t lg_k, const A& allocator);
+  static u32_table make_from_pairs(const uint32_t* pairs, uint32_t num_pairs, uint8_t lg_k, const A& allocator);
 
   vector_u32<A> unwrapping_get_items() const;
 
diff --git a/cpc/include/u32_table_impl.hpp b/cpc/include/u32_table_impl.hpp
index c3c9501..a82e7de 100644
--- a/cpc/include/u32_table_impl.hpp
+++ b/cpc/include/u32_table_impl.hpp
@@ -41,7 +41,7 @@ u32_table<A>::u32_table(uint8_t lg_size, uint8_t num_valid_bits, const A& alloca
 lg_size(lg_size),
 num_valid_bits(num_valid_bits),
 num_items(0),
-slots(1 << lg_size, UINT32_MAX, allocator)
+slots(1ULL << lg_size, UINT32_MAX, allocator)
 {
   if (lg_size < 2) throw std::invalid_argument("lg_size must be >= 2");
   if (num_valid_bits < 1 || num_valid_bits > 32) throw std::invalid_argument("num_valid_bits must be between 1 and 32");
@@ -110,7 +110,7 @@ bool u32_table<A>::maybe_delete(uint32_t item) {
 
 // this one is specifically tailored to be a part of fm85 decompression scheme
 template<typename A>
-u32_table<A> u32_table<A>::make_from_pairs(const uint32_t* pairs, size_t num_pairs, uint8_t lg_k, const A& allocator) {
+u32_table<A> u32_table<A>::make_from_pairs(const uint32_t* pairs, uint32_t num_pairs, uint8_t lg_k, const A& allocator) {
   uint8_t lg_num_slots = 2;
   while (U32_TABLE_UPSIZE_DENOM * num_pairs > U32_TABLE_UPSIZE_NUMER * (1 << lg_num_slots)) lg_num_slots++;
   u32_table<A> table(lg_num_slots, 6 + lg_k, allocator);
@@ -170,7 +170,7 @@ void u32_table<A>::rebuild(uint8_t new_lg_size) {
 template<typename A>
 vector_u32<A> u32_table<A>::unwrapping_get_items() const {
   if (num_items == 0) return vector_u32<A>(slots.get_allocator());
-  const size_t table_size = 1 << lg_size;
+  const uint32_t table_size = 1 << lg_size;
   vector_u32<A> result(num_items, 0, slots.get_allocator());
   size_t i = 0;
   size_t l = 0;
diff --git a/cpc/test/compression_test.cpp b/cpc/test/compression_test.cpp
index ed98274..9ce3e06 100644
--- a/cpc/test/compression_test.cpp
+++ b/cpc/test/compression_test.cpp
@@ -27,38 +27,38 @@ namespace datasketches {
 typedef u32_table<std::allocator<void>> table;
 
 TEST_CASE("cpc sketch: compress and decompress pairs", "[cpc_sketch]") {
-  const int N = 200;
-  const int MAXWORDS = 1000;
+  const size_t N = 200;
+  const size_t MAXWORDS = 1000;
 
   HashState twoHashes;
   uint32_t pairArray[N];
   uint32_t pairArray2[N];
   uint64_t value = 35538947; // some arbitrary starting value
   const uint64_t golden64 = 0x9e3779b97f4a7c13ULL; // the golden ratio
-  for (int i = 0; i < N; i++) {
+  for (size_t i = 0; i < N; i++) {
     MurmurHash3_x64_128(&value, sizeof(value), 0, twoHashes);
     uint32_t rand = twoHashes.h1 & 0xffff;
     pairArray[i] = rand;
     value += golden64;
   }
   //table::knuth_shell_sort3(pairArray, 0, N - 1); // unsigned numerical sort
-  std::sort(pairArray, &pairArray[N]);
+  std::sort(pairArray, pairArray + N);
   uint32_t prev = UINT32_MAX;
-  int nxt = 0;
-  for (int i = 0; i < N; i++) { // uniquify
+  uint32_t nxt = 0;
+  for (size_t i = 0; i < N; i++) { // uniquify
     if (pairArray[i] != prev) {
       prev = pairArray[i];
       pairArray[nxt++] = pairArray[i];
     }
   }
-  int numPairs = nxt;
+  uint32_t numPairs = nxt;
 
   uint32_t compressedWords[MAXWORDS];
 
-  for (size_t numBaseBits = 0; numBaseBits <= 11; numBaseBits++) {
-    size_t numWordsWritten = get_compressor<std::allocator<void>>().low_level_compress_pairs(pairArray, numPairs, numBaseBits, compressedWords);
+  for (uint8_t numBaseBits = 0; numBaseBits <= 11; numBaseBits++) {
+    uint32_t numWordsWritten = get_compressor<std::allocator<void>>().low_level_compress_pairs(pairArray, numPairs, numBaseBits, compressedWords);
     get_compressor<std::allocator<void>>().low_level_uncompress_pairs(pairArray2, numPairs, numBaseBits, compressedWords, numWordsWritten);
-    for (int i = 0; i < numPairs; i++) {
+    for (size_t i = 0; i < numPairs; i++) {
       REQUIRE(pairArray[i] == pairArray2[i]);
     }
   }
diff --git a/cpc/test/cpc_union_test.cpp b/cpc/test/cpc_union_test.cpp
index e114cd0..688ea12 100644
--- a/cpc/test/cpc_union_test.cpp
+++ b/cpc/test/cpc_union_test.cpp
@@ -81,7 +81,7 @@ TEST_CASE("cpc union: large", "[cpc_union]") {
   cpc_union u(11);
   for (int i = 0; i < 1000; i++) {
     cpc_sketch tmp(11);
-    for (int i = 0; i < 10000; i++) {
+    for (int j = 0; j < 10000; j++) {
       s.update(key);
       tmp.update(key);
       key++;

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@datasketches.apache.org
For additional commands, e-mail: commits-help@datasketches.apache.org