You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by br...@apache.org on 2020/02/12 17:22:01 UTC

[cassandra] branch trunk updated: Update hppc library to version 0.8.1

This is an automated email from the ASF dual-hosted git repository.

brandonwilliams pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/cassandra.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 3f2a248  Update hppc library to version 0.8.1
3f2a248 is described below

commit 3f2a248da11448ed9fd38467e4b353ed4df4888f
Author: Ekaterina Dimitrova <ek...@datastax.com>
AuthorDate: Tue Jan 28 12:19:11 2020 -0500

    Update hppc library to version 0.8.1
    
    Patch by Ekaterina Dimitrova, reviewed by brandonwilliams for CASSANDRA-12995
---
 CHANGES.txt                                        |   2 +-
 build.xml                                          |   2 +-
 lib/hppc-0.5.4.jar                                 | Bin 1305173 -> 0 bytes
 lib/hppc-0.8.1.jar                                 | Bin 0 -> 1159086 bytes
 .../org/apache/cassandra/db/ConsistencyLevel.java  |  10 ++++----
 .../index/sasi/analyzer/StandardAnalyzer.java      |   4 +--
 .../index/sasi/disk/DynamicTokenTreeBuilder.java   |   6 ++---
 .../cassandra/index/sasi/disk/TokenTree.java       |   4 +--
 .../index/sasi/memory/KeyRangeIterator.java        |   4 +--
 .../locator/AbstractReplicaCollection.java         |   8 +++---
 .../org/apache/cassandra/locator/ReplicaPlans.java |   8 +++---
 .../org/apache/cassandra/locator/Replicas.java     |  10 ++++----
 .../apache/cassandra/net/OutboundConnections.java  |   6 ++---
 .../reads/repair/RowIteratorMergeListener.java     |   6 ++---
 test/burn/org/apache/cassandra/net/Verifier.java   |  16 ++++++------
 .../apache/cassandra/index/sasi/SASIIndexTest.java |   2 +-
 .../cassandra/index/sasi/disk/TokenTreeTest.java   |  27 +++++++++++++--------
 .../cassandra/index/sasi/utils/LongIterator.java   |   4 +--
 .../sasi/utils/RangeIntersectionIteratorTest.java  |   4 +--
 19 files changed, 66 insertions(+), 57 deletions(-)

diff --git a/CHANGES.txt b/CHANGES.txt
index e6b3d6b..6235046 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,9 +1,9 @@
 4.0-alpha4
+ * Update hppc library to version 0.8.1 (CASSANDRA-12995)
  * Limit the dependencies used by UDFs/UDAs (CASSANDRA-14737)
  * Make native_transport_max_concurrent_requests_in_bytes updatable (CASSANDRA-15519)
  * Cleanup and improvements to IndexInfo/ColumnIndex (CASSANDRA-15469)
  * Potential Overflow in DatabaseDescriptor Functions That Convert Between KB/MB & Bytes (CASSANDRA-15470)
-
 4.0-alpha3
  * Restore monotonic read consistency guarantees for blocking read repair (CASSANDRA-14740)
  * Separate exceptions for CAS write timeout exceptions caused by contention and unkown result (CASSANDRA-15350)
diff --git a/build.xml b/build.xml
index ecc60f3..9c4f5b0 100644
--- a/build.xml
+++ b/build.xml
@@ -598,7 +598,7 @@
           <dependency groupId="org.fusesource" artifactId="sigar" version="1.6.4">
             <exclusion groupId="log4j" artifactId="log4j"/>
           </dependency>
-          <dependency groupId="com.carrotsearch" artifactId="hppc" version="0.5.4" />
+          <dependency groupId="com.carrotsearch" artifactId="hppc" version="0.8.1" />
           <dependency groupId="de.jflex" artifactId="jflex" version="1.6.0" />
           <dependency groupId="com.github.rholder" artifactId="snowball-stemmer" version="1.3.0.581.1" />
           <dependency groupId="com.googlecode.concurrent-trees" artifactId="concurrent-trees" version="2.4.0" />
diff --git a/lib/hppc-0.5.4.jar b/lib/hppc-0.5.4.jar
deleted file mode 100644
index d84b83b..0000000
Binary files a/lib/hppc-0.5.4.jar and /dev/null differ
diff --git a/lib/hppc-0.8.1.jar b/lib/hppc-0.8.1.jar
new file mode 100644
index 0000000..39a7c24
Binary files /dev/null and b/lib/hppc-0.8.1.jar differ
diff --git a/src/java/org/apache/cassandra/db/ConsistencyLevel.java b/src/java/org/apache/cassandra/db/ConsistencyLevel.java
index e685618..91e83a7 100644
--- a/src/java/org/apache/cassandra/db/ConsistencyLevel.java
+++ b/src/java/org/apache/cassandra/db/ConsistencyLevel.java
@@ -18,7 +18,7 @@
 package org.apache.cassandra.db;
 
 
-import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import com.carrotsearch.hppc.ObjectIntHashMap;
 import org.apache.cassandra.locator.Endpoints;
 import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.config.DatabaseDescriptor;
@@ -98,18 +98,18 @@ public enum ConsistencyLevel
         return localQuorumFor(keyspace, DatabaseDescriptor.getLocalDataCenter());
     }
 
-    public static ObjectIntOpenHashMap<String> eachQuorumForRead(Keyspace keyspace)
+    public static ObjectIntHashMap<String> eachQuorumForRead(Keyspace keyspace)
     {
         NetworkTopologyStrategy strategy = (NetworkTopologyStrategy) keyspace.getReplicationStrategy();
-        ObjectIntOpenHashMap<String> perDc = new ObjectIntOpenHashMap<>(((strategy.getDatacenters().size() + 1) * 4) / 3);
+        ObjectIntHashMap<String> perDc = new ObjectIntHashMap<>(((strategy.getDatacenters().size() + 1) * 4) / 3);
         for (String dc : strategy.getDatacenters())
             perDc.put(dc, ConsistencyLevel.localQuorumFor(keyspace, dc));
         return perDc;
     }
 
-    public static ObjectIntOpenHashMap<String> eachQuorumForWrite(Keyspace keyspace, Endpoints<?> pendingWithDown)
+    public static ObjectIntHashMap<String> eachQuorumForWrite(Keyspace keyspace, Endpoints<?> pendingWithDown)
     {
-        ObjectIntOpenHashMap<String> perDc = eachQuorumForRead(keyspace);
+        ObjectIntHashMap<String> perDc = eachQuorumForRead(keyspace);
         addToCountPerDc(perDc, pendingWithDown, 1);
         return perDc;
     }
diff --git a/src/java/org/apache/cassandra/index/sasi/analyzer/StandardAnalyzer.java b/src/java/org/apache/cassandra/index/sasi/analyzer/StandardAnalyzer.java
index e1a4a44..cd8feda 100644
--- a/src/java/org/apache/cassandra/index/sasi/analyzer/StandardAnalyzer.java
+++ b/src/java/org/apache/cassandra/index/sasi/analyzer/StandardAnalyzer.java
@@ -37,7 +37,7 @@ import org.apache.cassandra.utils.ByteBufferUtil;
 import com.google.common.annotations.VisibleForTesting;
 
 import com.carrotsearch.hppc.IntObjectMap;
-import com.carrotsearch.hppc.IntObjectOpenHashMap;
+import com.carrotsearch.hppc.IntObjectHashMap;
 
 public class StandardAnalyzer extends AbstractAnalyzer
 {
@@ -61,7 +61,7 @@ public class StandardAnalyzer extends AbstractAnalyzer
         KATAKANA(12),
         HANGUL(13);
 
-        private static final IntObjectMap<TokenType> TOKENS = new IntObjectOpenHashMap<>();
+        private static final IntObjectMap<TokenType> TOKENS = new IntObjectHashMap<>();
 
         static
         {
diff --git a/src/java/org/apache/cassandra/index/sasi/disk/DynamicTokenTreeBuilder.java b/src/java/org/apache/cassandra/index/sasi/disk/DynamicTokenTreeBuilder.java
index 2ddfd89..0e906e2 100644
--- a/src/java/org/apache/cassandra/index/sasi/disk/DynamicTokenTreeBuilder.java
+++ b/src/java/org/apache/cassandra/index/sasi/disk/DynamicTokenTreeBuilder.java
@@ -23,7 +23,7 @@ import java.util.*;
 import org.apache.cassandra.utils.AbstractIterator;
 import org.apache.cassandra.utils.Pair;
 
-import com.carrotsearch.hppc.LongOpenHashSet;
+import com.carrotsearch.hppc.LongHashSet;
 import com.carrotsearch.hppc.LongSet;
 import com.carrotsearch.hppc.cursors.LongCursor;
 
@@ -49,7 +49,7 @@ public class DynamicTokenTreeBuilder extends AbstractTokenTreeBuilder
     {
         LongSet found = tokens.get(token);
         if (found == null)
-            tokens.put(token, (found = new LongOpenHashSet(2)));
+            tokens.put(token, (found = new LongHashSet(2)));
 
         found.add(keyPosition);
     }
@@ -70,7 +70,7 @@ public class DynamicTokenTreeBuilder extends AbstractTokenTreeBuilder
         {
             LongSet found = tokens.get(newEntry.getKey());
             if (found == null)
-                tokens.put(newEntry.getKey(), (found = new LongOpenHashSet(4)));
+                tokens.put(newEntry.getKey(), (found = new LongHashSet(4)));
 
             for (LongCursor offset : newEntry.getValue())
                 found.add(offset.value);
diff --git a/src/java/org/apache/cassandra/index/sasi/disk/TokenTree.java b/src/java/org/apache/cassandra/index/sasi/disk/TokenTree.java
index c69ce00..e510cdd 100644
--- a/src/java/org/apache/cassandra/index/sasi/disk/TokenTree.java
+++ b/src/java/org/apache/cassandra/index/sasi/disk/TokenTree.java
@@ -27,7 +27,7 @@ import org.apache.cassandra.index.sasi.utils.MappedBuffer;
 import org.apache.cassandra.index.sasi.utils.RangeIterator;
 import org.apache.cassandra.utils.MergeIterator;
 
-import com.carrotsearch.hppc.LongOpenHashSet;
+import com.carrotsearch.hppc.LongHashSet;
 import com.carrotsearch.hppc.LongSet;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Function;
@@ -410,7 +410,7 @@ public class TokenTree
 
         public LongSet getOffsets()
         {
-            LongSet offsets = new LongOpenHashSet(4);
+            LongSet offsets = new LongHashSet(4);
             for (TokenInfo i : info)
             {
                 for (long offset : i.fetchOffsets())
diff --git a/src/java/org/apache/cassandra/index/sasi/memory/KeyRangeIterator.java b/src/java/org/apache/cassandra/index/sasi/memory/KeyRangeIterator.java
index a2f2c0e..59d1122 100644
--- a/src/java/org/apache/cassandra/index/sasi/memory/KeyRangeIterator.java
+++ b/src/java/org/apache/cassandra/index/sasi/memory/KeyRangeIterator.java
@@ -29,7 +29,7 @@ import org.apache.cassandra.index.sasi.utils.AbstractIterator;
 import org.apache.cassandra.index.sasi.utils.CombinedValue;
 import org.apache.cassandra.index.sasi.utils.RangeIterator;
 
-import com.carrotsearch.hppc.LongOpenHashSet;
+import com.carrotsearch.hppc.LongHashSet;
 import com.carrotsearch.hppc.LongSet;
 import com.google.common.collect.PeekingIterator;
 
@@ -95,7 +95,7 @@ public class KeyRangeIterator extends RangeIterator<Long, Token>
 
         public LongSet getOffsets()
         {
-            LongSet offsets = new LongOpenHashSet(4);
+            LongSet offsets = new LongHashSet(4);
             for (DecoratedKey key : keys)
                 offsets.add((long) key.getToken().getTokenValue());
 
diff --git a/src/java/org/apache/cassandra/locator/AbstractReplicaCollection.java b/src/java/org/apache/cassandra/locator/AbstractReplicaCollection.java
index 374afc6..2ec555c 100644
--- a/src/java/org/apache/cassandra/locator/AbstractReplicaCollection.java
+++ b/src/java/org/apache/cassandra/locator/AbstractReplicaCollection.java
@@ -18,7 +18,7 @@
 
 package org.apache.cassandra.locator;
 
-import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import com.carrotsearch.hppc.ObjectIntHashMap;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Iterables;
@@ -241,7 +241,7 @@ public abstract class AbstractReplicaCollection<C extends AbstractReplicaCollect
         // we maintain a map of key -> index in our list; this lets us share with subLists (or between Builder and snapshots)
         // since we only need to corroborate that the list index we find is within the bounds of our list
         // (if not, it's a shared map, and the key only occurs in one of our ancestors)
-        private final ObjectIntOpenHashMap<K> map;
+        private final ObjectIntHashMap<K> map;
         private Set<K> keySet;
         private Set<Entry<K, Replica>> entrySet;
 
@@ -285,7 +285,7 @@ public abstract class AbstractReplicaCollection<C extends AbstractReplicaCollect
             // 8*0.65 => RF=5; 16*0.65 ==> RF=10
             // use list capacity if empty, otherwise use actual list size
             this.toKey = toKey;
-            this.map = new ObjectIntOpenHashMap<>(list.size == 0 ? list.contents.length : list.size, 0.65f);
+            this.map = new ObjectIntHashMap<>(list.size == 0 ? list.contents.length : list.size, 0.65f);
             this.list = list;
             for (int i = list.begin ; i < list.begin + list.size ; ++i)
             {
@@ -294,7 +294,7 @@ public abstract class AbstractReplicaCollection<C extends AbstractReplicaCollect
             }
         }
 
-        public ReplicaMap(ReplicaList list, Function<Replica, K> toKey, ObjectIntOpenHashMap<K> map)
+        public ReplicaMap(ReplicaList list, Function<Replica, K> toKey, ObjectIntHashMap<K> map)
         {
             this.toKey = toKey;
             this.list = list;
diff --git a/src/java/org/apache/cassandra/locator/ReplicaPlans.java b/src/java/org/apache/cassandra/locator/ReplicaPlans.java
index 236706a..436a9ed 100644
--- a/src/java/org/apache/cassandra/locator/ReplicaPlans.java
+++ b/src/java/org/apache/cassandra/locator/ReplicaPlans.java
@@ -18,7 +18,7 @@
 
 package org.apache.cassandra.locator;
 
-import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import com.carrotsearch.hppc.ObjectIntHashMap;
 import com.carrotsearch.hppc.cursors.ObjectIntCursor;
 import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
 import com.google.common.annotations.VisibleForTesting;
@@ -406,7 +406,7 @@ public class ReplicaPlans
              * soft-ensure that we reach QUORUM in all DCs we are able to, by writing to every node;
              * even if we don't wait for ACK, we have in both cases sent sufficient messages.
               */
-            ObjectIntOpenHashMap<String> requiredPerDc = eachQuorumForWrite(keyspace, liveAndDown.pending());
+            ObjectIntHashMap<String> requiredPerDc = eachQuorumForWrite(keyspace, liveAndDown.pending());
             addToCountPerDc(requiredPerDc, live.natural().filter(Replica::isFull), -1);
             addToCountPerDc(requiredPerDc, live.pending(), -1);
 
@@ -462,7 +462,7 @@ public class ReplicaPlans
                 }
                 else
                 {
-                    ObjectIntOpenHashMap<String> requiredPerDc = eachQuorumForWrite(keyspace, liveAndDown.pending());
+                    ObjectIntHashMap<String> requiredPerDc = eachQuorumForWrite(keyspace, liveAndDown.pending());
                     addToCountPerDc(requiredPerDc, contacts.snapshot(), -1);
                     IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
                     for (Replica replica : filter(live.all(), r -> !contacts.contains(r)))
@@ -530,7 +530,7 @@ public class ReplicaPlans
     private static <E extends Endpoints<E>> E contactForEachQuorumRead(Keyspace keyspace, E candidates)
     {
         assert keyspace.getReplicationStrategy() instanceof NetworkTopologyStrategy;
-        ObjectIntOpenHashMap<String> perDc = eachQuorumForRead(keyspace);
+        ObjectIntHashMap<String> perDc = eachQuorumForRead(keyspace);
 
         final IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
         return candidates.filter(replica -> {
diff --git a/src/java/org/apache/cassandra/locator/Replicas.java b/src/java/org/apache/cassandra/locator/Replicas.java
index 9e6048a..1b299cf 100644
--- a/src/java/org/apache/cassandra/locator/Replicas.java
+++ b/src/java/org/apache/cassandra/locator/Replicas.java
@@ -23,8 +23,8 @@ import java.util.Collection;
 import java.util.List;
 import java.util.function.Predicate;
 
-import com.carrotsearch.hppc.ObjectIntOpenHashMap;
-import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import com.carrotsearch.hppc.ObjectIntHashMap;
+import com.carrotsearch.hppc.ObjectObjectHashMap;
 import com.google.common.collect.Iterables;
 import org.apache.cassandra.config.DatabaseDescriptor;
 
@@ -88,9 +88,9 @@ public class Replicas
     /**
      * count the number of full and transient replicas, separately, for each DC
      */
-    public static ObjectObjectOpenHashMap<String, ReplicaCount> countPerDc(Collection<String> dataCenters, Iterable<Replica> replicas)
+    public static ObjectObjectHashMap<String, ReplicaCount> countPerDc(Collection<String> dataCenters, Iterable<Replica> replicas)
     {
-        ObjectObjectOpenHashMap<String, ReplicaCount> perDc = new ObjectObjectOpenHashMap<>(dataCenters.size());
+        ObjectObjectHashMap<String, ReplicaCount> perDc = new ObjectObjectHashMap<>(dataCenters.size());
         for (String dc: dataCenters)
             perDc.put(dc, new ReplicaCount());
 
@@ -106,7 +106,7 @@ public class Replicas
     /**
      * increment each of the map's DC entries for each matching replica provided
      */
-    public static void addToCountPerDc(ObjectIntOpenHashMap<String> perDc, Iterable<Replica> replicas, int add)
+    public static void addToCountPerDc(ObjectIntHashMap<String> perDc, Iterable<Replica> replicas, int add)
     {
         IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
         for (Replica replica : replicas)
diff --git a/src/java/org/apache/cassandra/net/OutboundConnections.java b/src/java/org/apache/cassandra/net/OutboundConnections.java
index 5f9190b..c900908 100644
--- a/src/java/org/apache/cassandra/net/OutboundConnections.java
+++ b/src/java/org/apache/cassandra/net/OutboundConnections.java
@@ -27,7 +27,7 @@ import java.util.function.Function;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableList;
 
-import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
+import com.carrotsearch.hppc.ObjectObjectHashMap;
 import io.netty.util.concurrent.Future;
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.gms.Gossiper;
@@ -264,11 +264,11 @@ public class OutboundConnections
         }
 
         final MessagingService messagingService;
-        ObjectObjectOpenHashMap<InetAddressAndPort, Counts> prevEndpointToCounts = new ObjectObjectOpenHashMap<>();
+        ObjectObjectHashMap<InetAddressAndPort, Counts> prevEndpointToCounts = new ObjectObjectHashMap<>();
 
         private void closeUnusedSinceLastRun()
         {
-            ObjectObjectOpenHashMap<InetAddressAndPort, Counts> curEndpointToCounts = new ObjectObjectOpenHashMap<>();
+            ObjectObjectHashMap<InetAddressAndPort, Counts> curEndpointToCounts = new ObjectObjectHashMap<>();
             for (OutboundConnections connections : messagingService.channelManagers.values())
             {
                 Counts cur = new Counts(
diff --git a/src/java/org/apache/cassandra/service/reads/repair/RowIteratorMergeListener.java b/src/java/org/apache/cassandra/service/reads/repair/RowIteratorMergeListener.java
index fc4c351..ac019da 100644
--- a/src/java/org/apache/cassandra/service/reads/repair/RowIteratorMergeListener.java
+++ b/src/java/org/apache/cassandra/service/reads/repair/RowIteratorMergeListener.java
@@ -26,7 +26,7 @@ import java.util.function.Consumer;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Maps;
 
-import com.carrotsearch.hppc.ObjectIntOpenHashMap;
+import com.carrotsearch.hppc.ObjectIntHashMap;
 import net.nicoulaj.compilecommand.annotations.Inline;
 import org.apache.cassandra.db.Clustering;
 import org.apache.cassandra.db.ClusteringBound;
@@ -361,7 +361,7 @@ public class RowIteratorMergeListener<E extends Endpoints<E>>
             fullDiffRepair = repairs[repairs.length - 1].build();
 
         Map<Replica, Mutation> mutations = Maps.newHashMapWithExpectedSize(writePlan.contacts().size());
-        ObjectIntOpenHashMap<InetAddressAndPort> sourceIds = new ObjectIntOpenHashMap<>(((repairs.length + 1) * 4) / 3);
+        ObjectIntHashMap<InetAddressAndPort> sourceIds = new ObjectIntHashMap<>(((repairs.length + 1) * 4) / 3);
         for (int i = 0 ; i < readPlan.contacts().size() ; ++i)
             sourceIds.put(readPlan.contacts().get(i).endpoint(), 1 + i);
 
@@ -383,4 +383,4 @@ public class RowIteratorMergeListener<E extends Endpoints<E>>
 
         readRepair.repairPartition(partitionKey, mutations, writePlan);
     }
-}
\ No newline at end of file
+}
diff --git a/test/burn/org/apache/cassandra/net/Verifier.java b/test/burn/org/apache/cassandra/net/Verifier.java
index 8b48c9a..219e613 100644
--- a/test/burn/org/apache/cassandra/net/Verifier.java
+++ b/test/burn/org/apache/cassandra/net/Verifier.java
@@ -31,9 +31,11 @@ import java.util.function.Consumer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.carrotsearch.hppc.LongObjectOpenHashMap;
+import com.carrotsearch.hppc.LongObjectHashMap;
+import com.carrotsearch.hppc.predicates.LongObjectPredicate;
+import com.carrotsearch.hppc.procedures.LongObjectProcedure;
+import com.carrotsearch.hppc.procedures.LongProcedure;
 import org.apache.cassandra.net.Verifier.ExpiredMessageEvent.ExpirationType;
-import org.apache.cassandra.utils.ApproximateTime;
 import org.apache.cassandra.utils.concurrent.WaitQueue;
 
 import static java.util.concurrent.TimeUnit.*;
@@ -559,7 +561,7 @@ public class Verifier
         }
     }
 
-    private final LongObjectOpenHashMap<MessageState> messages = new LongObjectOpenHashMap<>();
+    private final LongObjectHashMap<MessageState> messages = new LongObjectHashMap<>();
 
     // messages start here, but may enter in a haphazard (non-sequential) fashion;
     // ENQUEUE_START, ENQUEUE_END both take place here, with the latter imposing bounds on the out-of-order appearance of messages.
@@ -667,7 +669,7 @@ public class Verifier
                             // TODO: even 2s or 5s are unreasonable periods of time without _any_ movement on a message waiting to arrive
                             //       this seems to happen regularly on MacOS, but we should confirm this does not happen on Linux
                             fail("Unreasonably long period spent waiting for sync (%dms)", NANOSECONDS.toMillis(now - lastEventAt));
-                            messages.forEach((k, v) -> {
+                            messages.<LongObjectProcedure<MessageState>>forEach((k, v) -> {
                                 failinfo("%s", v);
                                 controller.fail(v.message.serializedSize(v.messagingVersion == 0 ? current_version : v.messagingVersion));
                             });
@@ -1232,14 +1234,14 @@ public class Verifier
         }
     }
 
-    private static MessageState remove(long messageId, Queue<MessageState> queue, LongObjectOpenHashMap<MessageState> lookup)
+    private static MessageState remove(long messageId, Queue<MessageState> queue, LongObjectHashMap<MessageState> lookup)
     {
         MessageState m = lookup.remove(messageId);
         queue.remove(m);
         return m;
     }
 
-    private static void clearFirst(int count, Queue<MessageState> queue, LongObjectOpenHashMap<MessageState> lookup)
+    private static void clearFirst(int count, Queue<MessageState> queue, LongObjectHashMap<MessageState> lookup)
     {
         if (count > 0)
         {
@@ -1249,7 +1251,7 @@ public class Verifier
         }
     }
 
-    private static void clear(Queue<MessageState> queue, LongObjectOpenHashMap<MessageState> lookup)
+    private static void clear(Queue<MessageState> queue, LongObjectHashMap<MessageState> lookup)
     {
         if (!queue.isEmpty())
             clearFirst(queue.size(), queue, lookup);
diff --git a/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java b/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java
index e3ba45b..0be05b9 100644
--- a/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java
@@ -1113,7 +1113,7 @@ public class SASIIndexTest
         }
         while (updates.get() < writeCount);
 
-        // to make sure that after all of the right are done we can read all "count" worth of rows
+        // to make sure that after all of the writes are done we can read all "count" worth of rows
         Set<DecoratedKey> rows = getPaged(store, 100, buildExpression(firstName, Operator.LIKE_CONTAINS, UTF8Type.instance.decompose("a")),
                                                       buildExpression(age, Operator.EQ, Int32Type.instance.decompose(26)));
 
diff --git a/test/unit/org/apache/cassandra/index/sasi/disk/TokenTreeTest.java b/test/unit/org/apache/cassandra/index/sasi/disk/TokenTreeTest.java
index 72b9e0d..4339a62 100644
--- a/test/unit/org/apache/cassandra/index/sasi/disk/TokenTreeTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/disk/TokenTreeTest.java
@@ -47,7 +47,7 @@ import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.apache.commons.lang3.builder.HashCodeBuilder;
-import com.carrotsearch.hppc.LongOpenHashSet;
+import com.carrotsearch.hppc.LongHashSet;
 import com.carrotsearch.hppc.LongSet;
 import com.carrotsearch.hppc.cursors.LongCursor;
 import com.google.common.base.Function;
@@ -56,18 +56,26 @@ public class TokenTreeTest
 {
     private static final Function<Long, DecoratedKey> KEY_CONVERTER = new KeyConverter();
 
+    static LongSet singleOffset = new LongHashSet();
+    static LongSet bigSingleOffset = new LongHashSet();
+    static LongSet shortPackableCollision = new LongHashSet();
+    static LongSet intPackableCollision = new LongHashSet();
+    static LongSet multiCollision =  new LongHashSet();
+    static LongSet unpackableCollision = new LongHashSet();
+
     @BeforeClass
     public static void setupDD()
     {
         DatabaseDescriptor.daemonInitialization();
+        singleOffset.add(1);
+        bigSingleOffset.add(2147521562L);
+        shortPackableCollision.add(2L); shortPackableCollision.add(3L);
+        intPackableCollision.add(6L); intPackableCollision.add(((long) Short.MAX_VALUE) + 1);
+        multiCollision.add(3L); multiCollision.add(4L); multiCollision.add(5L);
+        unpackableCollision.add(((long) Short.MAX_VALUE) + 1); unpackableCollision.add(((long) Short.MAX_VALUE) + 2);
     }
 
-    static LongSet singleOffset = new LongOpenHashSet() {{ add(1); }};
-    static LongSet bigSingleOffset = new LongOpenHashSet() {{ add(2147521562L); }};
-    static LongSet shortPackableCollision = new LongOpenHashSet() {{ add(2L); add(3L); }}; // can pack two shorts
-    static LongSet intPackableCollision = new LongOpenHashSet() {{ add(6L); add(((long) Short.MAX_VALUE) + 1); }}; // can pack int & short
-    static LongSet multiCollision =  new LongOpenHashSet() {{ add(3L); add(4L); add(5L); }}; // can't pack
-    static LongSet unpackableCollision = new LongOpenHashSet() {{ add(((long) Short.MAX_VALUE) + 1); add(((long) Short.MAX_VALUE) + 2); }}; // can't pack
+
 
     final static SortedMap<Long, LongSet> simpleTokenMap = new TreeMap<Long, LongSet>()
     {{
@@ -422,7 +430,6 @@ public class TokenTreeTest
 
             LongSet found = result.getOffsets();
             Assert.assertEquals(entry.getValue(), found);
-
         }
     }
 
@@ -609,7 +616,7 @@ public class TokenTreeTest
 
     private static LongSet convert(long... values)
     {
-        LongSet result = new LongOpenHashSet(values.length);
+        LongSet result = new LongHashSet(values.length);
         for (long v : values)
             result.add(v);
 
@@ -640,7 +647,7 @@ public class TokenTreeTest
         {{
                 for (long i = minToken; i <= maxToken; i++)
                 {
-                    LongSet offsetSet = new LongOpenHashSet();
+                    LongSet offsetSet = new LongHashSet();
                     offsetSet.add(i);
                     put(i, offsetSet);
                 }
diff --git a/test/unit/org/apache/cassandra/index/sasi/utils/LongIterator.java b/test/unit/org/apache/cassandra/index/sasi/utils/LongIterator.java
index 205d28f..e7ff5b8 100644
--- a/test/unit/org/apache/cassandra/index/sasi/utils/LongIterator.java
+++ b/test/unit/org/apache/cassandra/index/sasi/utils/LongIterator.java
@@ -23,7 +23,7 @@ import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
 
-import com.carrotsearch.hppc.LongOpenHashSet;
+import com.carrotsearch.hppc.LongHashSet;
 import com.carrotsearch.hppc.LongSet;
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.index.sasi.disk.Token;
@@ -84,7 +84,7 @@ public class LongIterator extends RangeIterator<Long, Token>
         @Override
         public LongSet getOffsets()
         {
-            return new LongOpenHashSet(4);
+            return new LongHashSet(4);
         }
 
         @Override
diff --git a/test/unit/org/apache/cassandra/index/sasi/utils/RangeIntersectionIteratorTest.java b/test/unit/org/apache/cassandra/index/sasi/utils/RangeIntersectionIteratorTest.java
index 4dc9e3f..e796240 100644
--- a/test/unit/org/apache/cassandra/index/sasi/utils/RangeIntersectionIteratorTest.java
+++ b/test/unit/org/apache/cassandra/index/sasi/utils/RangeIntersectionIteratorTest.java
@@ -27,7 +27,7 @@ import org.apache.cassandra.index.sasi.utils.RangeIntersectionIterator.LookupInt
 import org.apache.cassandra.index.sasi.utils.RangeIntersectionIterator.BounceIntersectionIterator;
 import org.apache.cassandra.io.util.FileUtils;
 
-import com.carrotsearch.hppc.LongOpenHashSet;
+import com.carrotsearch.hppc.LongHashSet;
 import com.carrotsearch.hppc.LongSet;
 
 import org.junit.Assert;
@@ -387,7 +387,7 @@ public class RangeIntersectionIteratorTest
             for (int i = 0; i < ranges.length; i++)
             {
                 int rangeSize = random.nextInt(16, 512);
-                LongSet range = new LongOpenHashSet(rangeSize);
+                LongSet range = new LongHashSet(rangeSize);
 
                 for (int j = 0; j < rangeSize; j++)
                     range.add(random.nextLong(0, 100));


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@cassandra.apache.org
For additional commands, e-mail: commits-help@cassandra.apache.org